[
  {
    "path": ".clang-format",
    "content": "AccessModifierOffset: 0\nAlignAfterOpenBracket: AlwaysBreak\nAlignConsecutiveAssignments: true\nAlignConsecutiveDeclarations: true\nAlignConsecutiveMacros: true\nAlignEscapedNewlines: true\nAlignOperands: true\nAlignTrailingComments: true\nAllowAllArgumentsOnNextLine: false\nAllowAllConstructorInitializersOnNextLine: false\nAllowAllParametersOfDeclarationOnNextLine: false\nAllowShortBlocksOnASingleLine: true\nAllowShortCaseLabelsOnASingleLine: false\nAllowShortFunctionsOnASingleLine: false\nAllowShortIfStatementsOnASingleLine: true\nAllowShortLambdasOnASingleLine: true\nAllowShortLoopsOnASingleLine: true\nAlwaysBreakAfterReturnType: None\nAlwaysBreakBeforeMultilineStrings: false\nBinPackArguments: false\nBinPackParameters: false\nBreakBeforeBinaryOperators: None\nBreakBeforeBraces: Attach\nBreakBeforeTernaryOperators: true\nBreakConstructorInitializers: BeforeColon\nBreakStringLiterals: true\nColumnLimit: 100\nCompactNamespaces: false\nConstructorInitializerAllOnOneLineOrOnePerLine: true\nConstructorInitializerIndentWidth: 4\nContinuationIndentWidth: 4\nCpp11BracedListStyle: true\nDeriveLineEnding: true\nDerivePointerAlignment: false\nFixNamespaceComments: true\nIncludeBlocks: Regroup\nIndentCaseLabels: false\nIndentGotoLabels: false\nIndentWidth: 2\nKeepEmptyLinesAtTheStartOfBlocks: false\nMaxEmptyLinesToKeep: 1\nNamespaceIndentation: None\nPointerAlignment: Left\nReflowComments: true\nSortIncludes: true\nSortUsingDeclarations: true\nSpaceAfterCStyleCast: false\nSpaceAfterTemplateKeyword: false\nSpaceBeforeAssignmentOperators: true\nSpaceBeforeCpp11BracedList: true\nSpaceBeforeCtorInitializerColon: true\nSpaceBeforeParens: true\nSpaceBeforeInheritanceColon: true\nSpaceBeforeRangeBasedForLoopColon: true\nSpaceBeforeSquareBrackets: false\nSpaceInEmptyBlock: false\nSpaceInEmptyParentheses: false\nSpacesBeforeTrailingComments: 2\nSpacesInAngles: false\nSpacesInCStyleCastParentheses: false\nSpacesInConditionalStatement: false\nSpacesInContainerLiterals: false\nSpacesInSquareBrackets: false\nTabWidth: 2\nUseCRLF: false\nUseTab: Never\n"
  },
  {
    "path": ".github/workflows/tests.yml",
    "content": "name: Run tests\n\non:\n  push:\n    branches: [ master ]\n  pull_request:\n    branches: [ master ]\nenv:\n  CC: /usr/bin/clang\n  CXX: /usr/bin/clang++\n  \njobs:\n  build:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n    - name: update_apt\n      run: sudo apt-get update\n    - name: install_deps\n      run: sudo apt install libx11-xcb-dev\n    - name: make_build_dir\n      run: mkdir -p build\n    - name: run_cmake\n      run: cd ./build && cmake .. -DNGF_BUILD_TESTS=yes\n    - name: make\n      run:  cd ./build && make vk-backend-tests\n    - name: test\n      run:  ./build/vk-backend-tests\n"
  },
  {
    "path": ".gitignore",
    "content": "build/*\ntests/build/*\ndocs/doxygen/html\ndocs/doxygen/xml\ndocs/doxygen/latex\ntests/ngf_tests\nsamples/binaries/*\nsamples/deps/niceshade/*\n/out/build/x64-Debug\n/.vs\nsamples-build-files/*\n.gitmodules\n.idea\ncmake-build-debug-visual-studio\ndocs/doxygen\n**/.DS_Store\n"
  },
  {
    "path": "CMakeLists.txt",
    "content": "#[[\nCopyright (c) 2026 nicegraf contributors\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the “Software”), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n]]\n\ncmake_minimum_required(VERSION 3.24)\nproject(nicegraf)\n\n\nset(CMAKE_C_STANDARD 99)\nset(CMAKE_CXX_STANDARD 20)\n\ninclude(\"${CMAKE_CURRENT_LIST_DIR}/build-utils.cmake\")\n\n# These are the compiler flags that are used on all nicegraf targets.\nif(MSVC)\n    set(NICEMAKE_COMMON_COMPILE_OPTS \"/W4\")\nelse()\n    set(NICEMAKE_COMMON_COMPILE_OPTS \"-Wall\" \"-Wconversion\" \"-Wno-unknown-pragmas\" \"-Wno-error=comment\")\nendif()\n\nif(CMAKE_CXX_COMPILER_ID MATCHES \"Clang\")\n    list(APPEND NICEMAKE_COMMON_COMPILE_OPTS \"-Wno-unknown-warning-option\" \"-Wno-missing-designated-field-initializers\")\nendif()\n\nset(NICEGRAF_COMMON_DEPS nicegraf-internal)\n\n# A library with various utilities shared internally across different backends.\nnmk_static_library(NAME nicegraf-internal\n                   SRCS ${CMAKE_CURRENT_LIST_DIR}/include/nicegraf.h\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/macros.h\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/unique-ptr.h\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/value-or-error.h\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/cmdbuf-state.h\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/internal.cpp\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/util.h\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/arena.h\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/arena.cpp\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/default-arenas.h\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/default-arenas.cpp\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/chunked-list.h\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/hashtable.h\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/array.h)\n\n# nicegraf utility library.\nnmk_static_library(NAME nicegraf-util\n                   SRCS ${CMAKE_CURRENT_LIST_DIR}/include/nicegraf-util.h\n                        ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/util.c\n                   DEPS nicegraf-internal)\n\n\nif (APPLE)\n  find_library(APPLE_METAL Metal)\n  find_library(APPLE_QUARTZ QuartzCore)\n  find_library(APPLE_COREGRAPHICS CoreGraphics)\n  find_library(APPLE_COCOA Cocoa)\n  find_library(APPLE_UIKIT UIKit)\nendif()\n\nif (APPLE AND NOT (NGF_USE_MVK STREQUAL \"yes\"))\n  # Nicegraf with native Metal backend.\n  set(APPLE_LIBS ${APPLE_METAL} ${APPLE_QUARTZ} ${APPLE_COREGRAPHICS})\n\n  if (APPLE_COCOA)\n    set(APPLE_LIBS ${APPLE_LIBS} ${APPLE_COCOA}) # macOS\n  else()\n    set(APPLE_LIBS ${APPLE_LIBS} ${UIKit}) # iOS\n  endif()\n\n  nmk_static_library(NAME nicegraf-mtl\n                     SRCS ${CMAKE_CURRENT_LIST_DIR}/include/nicegraf.h\n                          ${CMAKE_CURRENT_LIST_DIR}/include/nicegraf-mtl-handles.h\n                          ${CMAKE_CURRENT_LIST_DIR}/source/ngf-mtl/impl.cpp\n                          ${CMAKE_CURRENT_LIST_DIR}/source/ngf-mtl/layer.mm\n                     DEPS ${NICEGRAF_COMMON_DEPS} ${APPLE_LIBS}\n                     PVT_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/deps/metal-cpp\n                     COPTS \"-fobjc-arc\")\nelse()\n  nmk_header_library(NAME nicegraf-vk-headers\n                     PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/deps/vulkan-headers)\n  nmk_header_library(NAME nicegraf-renderdoc\n                     PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/deps/renderdoc)\n\n  # Import VMA for handling vulkan memory allocation.\n  add_definitions(\"-DVMA_STATIC_VULKAN_FUNCTIONS=0\")\n  add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/deps/vma)\n  add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/deps/SPIRV-reflect)\n\n  # Some vulkan-specific dependencies.\n  set(NICEGRAF_VK_DEPS vma spvreflect nicegraf-vk-headers nicegraf-renderdoc)\n  if (NOT WIN32 AND NOT APPLE)\n    set(NICEGRAF_VK_DEPS ${NICEGRAF_VK_DEPS} xcb)\n  elseif (APPLE)\n    set(NICEGRAF_VK_DEPS ${NICEGRAF_VK_DEPS} ${APPLE_QUARTZ})\n  endif()\n\n  set(NICEGRAF_VK_DEPS ${NICEGRAF_VK_DEPS} ${NICEGRAF_COMMON_DEPS})\n\n  set(NICEGRAF_VK_SRCS ${CMAKE_CURRENT_LIST_DIR}/include/nicegraf.h\n                       ${CMAKE_CURRENT_LIST_DIR}/source/ngf-vk/impl.cpp\n                       ${CMAKE_CURRENT_LIST_DIR}/source/ngf-vk/vk_10.c)\n  if (NGF_USE_MVK STREQUAL \"yes\")\n    set(NICEGRAF_VK_SRCS ${NICEGRAF_VK_SRCS} ${CMAKE_CURRENT_LIST_DIR}/source/ngf-vk/ca-metal-layer.mm)\n  endif()\n\n  # Vulkan backend.\n  nmk_static_library(NAME nicegraf-vk\n                     SRCS ${NICEGRAF_VK_SRCS}\n                     PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/include\n                     DEPS ${NICEGRAF_VK_DEPS})\n\n  if (NGF_BUILD_TESTS STREQUAL \"yes\")\n    nmk_binary(NAME vk-backend-tests\n               SRCS ${NICEGRAF_VK_SRCS}\n               DEPS utest ${NICEGRAF_VK_DEPS}\n               PVT_DEFINES NGFVK_TEST_MODE)\n    set_target_properties(vk-backend-tests PROPERTIES COMPILE_WARNING_AS_ERROR NO)\n  endif()\nendif()\n\n# Build tests only if explicitly requested.\nif (NGF_BUILD_TESTS STREQUAL \"yes\")\n  nmk_header_library(NAME utest\n                     PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/deps/utest)\n  nmk_binary(NAME common-tests\n             SRCS ${CMAKE_CURRENT_LIST_DIR}/tests/common-tests.cpp\n             DEPS utest nicegraf-internal \"$<IF:$<NOT:$<BOOL:${WIN32}>>,pthread,>\")\nendif()\n\n\n# Build samples only if explicitly requested.\nif (NGF_BUILD_SAMPLES STREQUAL \"yes\")\n    add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/misc/common)\n    add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/samples/deps/glfw)\n\n    # Ensure we have the required dependencies are available.\n    if (TARGET glfw)\n        set_target_properties(glfw PROPERTIES FOLDER \"samples\")\n    else()\n        message(FATAL_ERROR \"Dependencies required for building samples not found. Make sure to run `git submodule update` from the repo root.\")\n    endif()\n    \n    # Determine the backend to build the samples with based on the platform, and\n    # any platform-specific source files.\n    set(NGF_PLATFORM_SOURCE \"\")\n    if(APPLE)\n      set(NGF_PLATFORM_SOURCE ${NGF_PLATFORM_SOURCE}\n                              ${CMAKE_CURRENT_LIST_DIR}/samples/common/platform/macos/glfw-cocoa-contentview.mm\n                              ${CMAKE_CURRENT_LIST_DIR}/samples/common/platform/macos/glfw-cocoa-contentview.h)\n    endif()\n    \n    # Set the folder to hold all samples binaries.\n    set(NGF_SAMPLES_OUTPUT_DIR ${CMAKE_CURRENT_LIST_DIR}/samples/binaries)\n\n    # Custom target for generated shaders.\n    file(GLOB shader_files ${CMAKE_CURRENT_LIST_DIR}/samples/shaders/*.hlsl)\n    include(${CMAKE_CURRENT_LIST_DIR}/misc/shaders.cmake)\n    ngf_shaders_target(NAME sample-shaders\n                       OUTPUT_DIR ${NGF_SAMPLES_OUTPUT_DIR}/shaders\n                       NICESHADE_PATH ${CMAKE_CURRENT_LIST_DIR}/samples/deps/niceshade/${NICESHADE_PLATFORM}\n                       SRCS ${shader_files})\n    set_target_properties(sample-shaders PROPERTIES FOLDER \"samples\")\n\n    set(NGF_IMGUI_SRCS ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui.cpp\n                       ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui_draw.cpp\n                       ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui_tables.cpp\n                       ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui_widgets.cpp\n                       ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui_demo.cpp\n                       ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui.cpp\n                       ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui.h\n                       ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/backends/imgui_impl_glfw.h\n                       ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/backends/imgui_impl_glfw.cpp)\n\t       \n    if(MSVC)\n        set(NGF_IMGUI_COPTS \"\")\n    else()\n        # Turn off reporting warnings as errors for ImGui on gcc/clang, because it has a lot of them.\n        set(NGF_IMGUI_COPTS \"-Wno-error\")\n    endif()\n\n    nmk_static_library(NAME ngf-imgui\n                SRCS ${NGF_IMGUI_SRCS}\n                DEPS glfw\n                PVT_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui\n                PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui\n                             ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/backends\n\t            PVT_DEFINES \"GLFW_INCLUDE_NONE\"\n\t           \tCOPTS ${NGF_IMGUI_COPTS})\n    nmk_static_library(NAME ngf-samples-common\n                       SRCS ${CMAKE_CURRENT_LIST_DIR}/samples/common/main.cpp\n                            ${CMAKE_CURRENT_LIST_DIR}/samples/common/diagnostic-callback.cpp\n                            ${CMAKE_CURRENT_LIST_DIR}/samples/common/sample-interface.h\n                            ${CMAKE_CURRENT_LIST_DIR}/samples/common/diagnostic-callback.h\n                            ${CMAKE_CURRENT_LIST_DIR}/samples/common/imgui-backend.h\n                            ${CMAKE_CURRENT_LIST_DIR}/samples/common/imgui-backend.cpp\n                            ${CMAKE_CURRENT_LIST_DIR}/samples/common/staging-image.h\n                            ${CMAKE_CURRENT_LIST_DIR}/samples/common/staging-image.cpp\n                            ${CMAKE_CURRENT_LIST_DIR}/samples/common/camera-controller.h\n                            ${CMAKE_CURRENT_LIST_DIR}/samples/common/camera-controller.cpp\n                            ${NGF_PLATFORM_SOURCE}\n                       DEPS ngf-imgui glfw nicegraf-misc-common\n                       PVT_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/samples/deps/nicemath\n                                    ${CMAKE_CURRENT_LIST_DIR}/samples/common\n                       PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/samples/common\n                                    ${CMAKE_CURRENT_LIST_DIR}/samples/deps/nicemath)\n    set_target_properties(ngf-samples-common PROPERTIES FOLDER \"samples\")\n\n    function (ngf_sample)\n        cmake_parse_arguments(SAMPLE \"\" \"NAME\" \"\" ${ARGN})\n        file(GLOB_RECURSE SAMPLE_SRCS ${CMAKE_CURRENT_LIST_DIR}/samples/${SAMPLE_NAME}/*.cpp)\n\n        nmk_binary(NAME ${SAMPLE_NAME}\n                   SRCS ${SAMPLE_SRCS}\n                   DEPS nicegraf ngf-samples-common nicegraf-misc-common nicegraf-util ngf-imgui\n                   PVT_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/samples/${SAMPLE_NAME}\n\t\t           PVT_DEFINES \"GLFW_INCLUDE_NONE\"\n                   OUTPUT_DIR \"${NGF_SAMPLES_OUTPUT_DIR}\")\n        add_dependencies(${SAMPLE_NAME} sample-shaders)                   \n        set_target_properties(${SAMPLE_NAME} PROPERTIES FOLDER \"samples\")\n    endfunction()\n    file(MAKE_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/samples/binaries/shaders)\n    ngf_sample(NAME 01-fullscreen-triangle)\n    ngf_sample(NAME 02-render-to-texture)\n    ngf_sample(NAME 03-uniform-buffers)\n    ngf_sample(NAME 04-texture-sampling)\n    ngf_sample(NAME 05-cubemap)\n    ngf_sample(NAME 06-vertex-attribs)\n    ngf_sample(NAME 07-blinn-phong)    \n    ngf_sample(NAME 08-image-arrays)    \n    ngf_sample(NAME 09-volume-rendering)\n    ngf_sample(NAME 0a-compute-mandelbrot)\n    ngf_sample(NAME 0b-compute-vertices)\n    ngf_sample(NAME 0c-render-to-multisample-texture)\nendif()\n\n# Build image tests only if explicitly requested.\n# These tests run samples headlessly and compare rendered output against golden images.\n# Requires NGF_BUILD_SAMPLES=yes since image tests depend on sample shaders and utilities.\nif (NGF_BUILD_IMAGE_TESTS STREQUAL \"yes\")\n    if (NOT NGF_BUILD_SAMPLES STREQUAL \"yes\")\n        message(FATAL_ERROR \"NGF_BUILD_IMAGE_TESTS requires NGF_BUILD_SAMPLES=yes\")\n    endif()\n\n    # Sample utility sources needed by image tests (excluding main.cpp and factory.cpp)\n    set(IMAGE_TEST_SAMPLE_UTILS\n        ${CMAKE_CURRENT_LIST_DIR}/samples/common/staging-image.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/samples/common/camera-controller.cpp\n    )\n\n    # All sample sources (excluding factory.cpp which is only for interactive samples)\n    set(IMAGE_TEST_SAMPLE_SOURCES\n        ${CMAKE_CURRENT_LIST_DIR}/samples/01-fullscreen-triangle/fullscreen-triangle.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/samples/02-render-to-texture/render-to-texture.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/samples/03-uniform-buffers/uniform-buffers.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/samples/04-texture-sampling/texture-sampling.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/samples/05-cubemap/cubemap.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/samples/06-vertex-attribs/vertex-attribs.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/samples/07-blinn-phong/blinn-phong.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/samples/08-image-arrays/image-arrays.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/samples/09-volume-rendering/volume-rendering.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/samples/0a-compute-mandelbrot/compute-mandelbrot.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/samples/0b-compute-vertices/compute-vertices.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/samples/0c-render-to-multisample-texture/render-to-multisample-texture.cpp\n    )\n\n    # Image test sources\n    set(IMAGE_TEST_SOURCES\n        ${CMAKE_CURRENT_LIST_DIR}/tests/image-tests/image-test-main.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/tests/image-tests/headless-harness.cpp\n        ${CMAKE_CURRENT_LIST_DIR}/tests/image-tests/image-comparator.cpp\n        ${IMAGE_TEST_SAMPLE_UTILS}\n        ${IMAGE_TEST_SAMPLE_SOURCES}\n    )\n\n    # Ensure golden images directory exists\n    file(MAKE_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/tests/golden)\n\n    # Create unified image test executable\n    nmk_binary(NAME image-tests\n               SRCS ${IMAGE_TEST_SOURCES}\n               DEPS nicegraf nicegraf-util nicegraf-misc-common ngf-imgui\n               PVT_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/tests\n                            ${CMAKE_CURRENT_LIST_DIR}/tests/image-tests\n                            ${CMAKE_CURRENT_LIST_DIR}/samples/common\n                            ${CMAKE_CURRENT_LIST_DIR}/samples/deps/nicemath\n                            ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui\n               OUTPUT_DIR \"${NGF_SAMPLES_OUTPUT_DIR}\")\n    add_dependencies(image-tests sample-shaders)\n    set_target_properties(image-tests PROPERTIES FOLDER \"tests\")\nendif()\n"
  },
  {
    "path": "README.md",
    "content": "nicegraf\n========\n\n![Run tests](https://github.com/nicebyte/nicegraf/workflows/Run%20tests/badge.svg)\n\n<p align=\"center\">\n<img src=\"https://github.com/nicebyte/nicegraf/blob/master/docs/logo.png?raw=true\" width=\"256\"/>\n</p>\n<p align=\"center\">\nAn abstraction layer for GPU APIs.\n</p>\n<p align=\"center\">\n<a href=\"https://discord.gg/NMbpC9btWA\">Discord</a> · <a href=\"http://wiki.gpfault.net/docs/nicegraf/index.html\">Reference Documentation</a> · <a href=\"https://github.com/nicebyte/nicegraf/tree/master/samples\">Sample Code</a>\n</p>\n\n# platform support matrix\n\n|   | 🟦 | 🐧 | 🍏 |\n|---|---|---|---|\n| 🌋 | 🟩 | 🟩 | 🟨 |\n| 🤘 | 🟥 | 🟥 | 🟩 |\n\n\n\n\n# credits\n\n## current maintainers\n\n* nicebyte · [@nice_byte](http://twitter.com/nice_byte)\n* Bagrat 'dBuger' Dabaghyan · [@dBagrat](http://twitter.com/dBagrat)\n* Andranik 'HedgeTheHog' Melikyan · [@andranik3949](http://twitter.com/andranik3949)\n\n## dependencies\n\n* The Vulkan backend uses SPIRV-Reflect, maintained by the Khronos Group, and the Vulkan Memory Allocator, maintained by AMD.\n* The sample code uses GLFW, maintained by Camilla Berglund, and ImGui, maintained by Omar Cornut.\n\n"
  },
  {
    "path": "build-samples.bat",
    "content": "@echo off\n\necho Downloading binary dependencies and data for samples...\npowershell -Command \"(New-Object Net.WebClient).DownloadFile('https://github.com/nicebyte/nicegraf/releases/download/v0.1.1/nicegraf-samples-data.zip', 'nicegraf-samples-data.zip')\" || (exit /b)\necho Unpacking binary dependencies and data for samples...\npowershell -Command \"Expand-Archive -Force nicegraf-samples-data.zip .\" || (exit /b)\necho Removing temporary files...\ndel nicegraf-samples-data.zip || (exit /b)\necho Downloading library dependencies for samples...\ngit submodule init || (exit /b)\ngit submodule update || (exit /b)\necho Setting up folder for build files...\nif not exist \".\\samples-build-files\" mkdir samples-build-files || (exit /b)\ncd samples-build-files || (exit /b)\necho Generating build files...\ncmake .. -DNGF_BUILD_SAMPLES=\"yes\" || (exit /b)\necho Finished successfully!\npause\n"
  },
  {
    "path": "build-samples.sh",
    "content": "#!/bin/bash\n\nset -e\n\necho \"Downloading binary dependencies for samples...\"\ncurl https://github.com/nicebyte/nicegraf/releases/download/v0.1.1/nicegraf-samples-data.zip -fL -o nicegraf-samples-data.zip\necho \"Unpacking binary dependencies and data for samples...\"\nunzip -u nicegraf-samples-data.zip\nchmod +x ./samples/deps/niceshade/macos/niceshade\nchmod +x ./samples/deps/niceshade/linux/niceshade\necho \"Removing temporary files...\"\nrm -rf nicegraf-samples-data.zip\necho \"Downloading library dependencies for samples...\"\ngit submodule init\ngit submodule update\necho \"Setting up folder for build files...\"\nmkdir -p samples-build-files\ncd samples-build-files\necho \"Generating build files...\"\nif  [ \"`uname -s`\" = \"Darwin\" ]; then\n  NGF_GENERATOR=\"-GXcode\" \nelse\n  NGF_GENERATOR=\nfi\n\ncmake .. -DNGF_BUILD_SAMPLES=\"yes\" -DNGF_BUILD_TESTS=\"yes\" ${NGF_GENERATOR} $@\ncd ..\necho \"Finished successfully!\"\n"
  },
  {
    "path": "build-utils.cmake",
    "content": "#[[\nCopyright (c) 2022 nicegraf contributors\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the “Software”), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n]]\n\n# This function adds a new target and sets some configuration options for it.\n# Parameters:\n#  TYPE - type of the target. Must be one of: \n#    - `lib`, for a static library;\n#    - `hdr`, for a header-only library;\n#    - `exe`, for an executable binary.\n#  SRCS - a list of source files for the target.\n#  COPTS - a list of compiler options.\n#  PVT_INCLUDES - a list of paths to add to this target's include paths.\n#  PUB_INCLUDES - a list of paths to add to the include paths of all targets depending on this target.\n#  PVT_DEFINES  - a list of preprocessor definitions to add for this target.\n#  PUB_DEFINES  - a list of preprocessor definitions to add to all targets depending on this target.\n#  OUTPUT_DIR   - the path to the folder where the output for this target shall be stored.\nfunction (nmk_target)\n\tcmake_parse_arguments(TGT \"\" \"NAME;TYPE\" \"SRCS;DEPS;COPTS;PUB_INCLUDES;PVT_INCLUDES;PUB_DEFINES;PUB_DEPS;PVT_DEFINES;OUTPUT_DIR;VS_DEBUGGER_WORKING_DIR\" ${ARGN})\n  if (TGT_TYPE STREQUAL \"lib\")\n    add_library(${TGT_NAME} STATIC ${TGT_SRCS})\n  elseif(TGT_TYPE STREQUAL \"hdr\")    \n    add_library(${TGT_NAME} INTERFACE ${TGT_SRCS})\n  elseif(TGT_TYPE STREQUAL \"exe\")\n    add_executable(${TGT_NAME} ${TGT_SRCS})\n  else()\n    message(FATAL_ERROR \"invalid target type\")\n  endif()\n\n  # Add dependencies.\n  if ( TGT_DEPS )\n    target_link_libraries(${TGT_NAME} PRIVATE ${TGT_DEPS})\n  endif()\n  if ( TGT_PUB_DEPS )\n    target_link_libraries(${TGT_NAME} INTERFACE ${TGT_PUB_DEPS})\n  endif()\n\n  # Add include directories.\n  if ( TGT_PUB_INCLUDES )\n  target_include_directories(${TGT_NAME}\n                             INTERFACE ${TGT_PUB_INCLUDES})\n  endif()\n  if ( TGT_PVT_INCLUDES )\n  target_include_directories(${TGT_NAME}\n                             PRIVATE ${TGT_PVT_INCLUDES})\n  endif()\n  if ( NOT ( TGT_TYPE STREQUAL \"hdr\" ) )\n    target_include_directories(${TGT_NAME}\n                               PRIVATE ${CMAKE_CURRENT_LIST_DIR}/source ${CMAKE_CURRENT_LIST_DIR}/include)\n  endif()\n  target_include_directories(${TGT_NAME}\n                             INTERFACE ${CMAKE_CURRENT_LIST_DIR}/include)\n\n  # Add compile-time definitions.\n  if ( TGT_PUB_DEFINES )\n    target_compile_definitions(${TGT_NAME} INTERFACE ${TGT_PUB_DEFINES})\n  endif()\n  if ( TGT_PVT_DEFINES )\n    target_compile_definitions(${TGT_NAME} PRIVATE ${TGT_PVT_DEFINES})    \n  endif()\n   \n  # Add compiler options.\n  if ( NOT ( TGT_TYPE STREQUAL \"hdr\" ) )\n    if ( NICEMAKE_COMMON_COMPILE_OPTS )\n      target_compile_options(${TGT_NAME} PRIVATE ${NICEMAKE_COMMON_COMPILE_OPTS})\n    endif()\n    if ( TGT_COPTS )\n      target_compile_options(${TGT_NAME} PRIVATE ${TGT_COPTS})\n    endif()\n    set_target_properties(${TGT_NAME} PROPERTIES COMPILE_WARNING_AS_ERROR ON)\n  endif()\n\n  # Set output directory.\n  if( TGT_OUTPUT_DIR )\n    set_target_properties(${TGT_NAME} PROPERTIES\n      RUNTIME_OUTPUT_DIRECTORY \"${TGT_OUTPUT_DIR}\")\n    set_target_properties(${TGT_NAME} PROPERTIES\n      RUNTIME_OUTPUT_DIRECTORY_DEBUG \"${TGT_OUTPUT_DIR}\")\n    set_target_properties(${TGT_NAME} PROPERTIES\n      RUNTIME_OUTPUT_DIRECTORY_RELEASE \"${TGT_OUTPUT_DIR}\")  \n    set_target_properties(${TGT_NAME} PROPERTIES VS_DEBUGGER_WORKING_DIRECTORY \"${TGT_OUTPUT_DIR}\")      \n  endif()\nendfunction()\n\n# Shortcut for adding a new library target.\nfunction (nmk_static_library)\n   nmk_target(TYPE lib ${ARGN})\nendfunction()\n\n# Shortcut for adding a new header-only library target.\nfunction (nmk_header_library)\n   nmk_target(TYPE hdr ${ARGN})\nendfunction()\n\n# Shortcut for adding a new executable target.\nfunction (nmk_binary)\n   nmk_target(TYPE exe ${ARGN})\nendfunction()\n"
  },
  {
    "path": "deps/SPIRV-reflect/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.14.0)\nproject(spvreflect)\n\nset(CMAKE_C_STANDARD 99)\n\nadd_library(spvreflect STATIC\n  ${CMAKE_CURRENT_LIST_DIR}/include/spirv/unified1/spirv.h\n  ${CMAKE_CURRENT_LIST_DIR}/spirv_reflect.h\n  ${CMAKE_CURRENT_LIST_DIR}/spirv_reflect.c)\n\ntarget_include_directories(spvreflect SYSTEM PUBLIC ${CMAKE_CURRENT_LIST_DIR})\n"
  },
  {
    "path": "deps/SPIRV-reflect/include/spirv/unified1/spirv.h",
    "content": "/*\n** Copyright (c) 2014-2020 The Khronos Group Inc.\n**\n** Permission is hereby granted, free of charge, to any person obtaining a copy\n** of this software and/or associated documentation files (the \"Materials\"),\n** to deal in the Materials without restriction, including without limitation\n** the rights to use, copy, modify, merge, publish, distribute, sublicense,\n** and/or sell copies of the Materials, and to permit persons to whom the\n** Materials are furnished to do so, subject to the following conditions:\n**\n** The above copyright notice and this permission notice shall be included in\n** all copies or substantial portions of the Materials.\n**\n** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS\n** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND\n** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/\n**\n** THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS\n** IN THE MATERIALS.\n*/\n\n/*\n** This header is automatically generated by the same tool that creates\n** the Binary Section of the SPIR-V specification.\n*/\n\n/*\n** Enumeration tokens for SPIR-V, in various styles:\n**   C, C++, C++11, JSON, Lua, Python, C#, D, Beef\n**\n** - C will have tokens with a \"Spv\" prefix, e.g.: SpvSourceLanguageGLSL\n** - C++ will have tokens in the \"spv\" name space, e.g.: spv::SourceLanguageGLSL\n** - C++11 will use enum classes in the spv namespace, e.g.:\n*spv::SourceLanguage::GLSL\n** - Lua will use tables, e.g.: spv.SourceLanguage.GLSL\n** - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']\n** - C# will use enum classes in the Specification class located in the \"Spv\"\n*namespace,\n**     e.g.: Spv.Specification.SourceLanguage.GLSL\n** - D will have tokens under the \"spv\" module, e.g: spv.SourceLanguage.GLSL\n** - Beef will use enum classes in the Specification class located in the \"Spv\"\n*namespace,\n**     e.g.: Spv.Specification.SourceLanguage.GLSL\n**\n** Some tokens act like mask values, which can be OR'd together,\n** while others are mutually exclusive.  The mask-like ones have\n** \"Mask\" in their name, and a parallel enum that has the shift\n** amount (1 << x) for each corresponding enumerant.\n*/\n\n#ifndef spirv_H\n#define spirv_H\n\ntypedef unsigned int SpvId;\n\n#define SPV_VERSION 0x10600\n#define SPV_REVISION 1\n\nstatic const unsigned int SpvMagicNumber = 0x07230203;\nstatic const unsigned int SpvVersion = 0x00010600;\nstatic const unsigned int SpvRevision = 1;\nstatic const unsigned int SpvOpCodeMask = 0xffff;\nstatic const unsigned int SpvWordCountShift = 16;\n\ntypedef enum SpvSourceLanguage_ {\n  SpvSourceLanguageUnknown = 0,\n  SpvSourceLanguageESSL = 1,\n  SpvSourceLanguageGLSL = 2,\n  SpvSourceLanguageOpenCL_C = 3,\n  SpvSourceLanguageOpenCL_CPP = 4,\n  SpvSourceLanguageHLSL = 5,\n  SpvSourceLanguageCPP_for_OpenCL = 6,\n  SpvSourceLanguageSYCL = 7,\n  SpvSourceLanguageHERO_C = 8,\n  SpvSourceLanguageNZSL = 9,\n  SpvSourceLanguageMax = 0x7fffffff,\n} SpvSourceLanguage;\n\ntypedef enum SpvExecutionModel_ {\n  SpvExecutionModelVertex = 0,\n  SpvExecutionModelTessellationControl = 1,\n  SpvExecutionModelTessellationEvaluation = 2,\n  SpvExecutionModelGeometry = 3,\n  SpvExecutionModelFragment = 4,\n  SpvExecutionModelGLCompute = 5,\n  SpvExecutionModelKernel = 6,\n  SpvExecutionModelTaskNV = 5267,\n  SpvExecutionModelMeshNV = 5268,\n  SpvExecutionModelRayGenerationKHR = 5313,\n  SpvExecutionModelRayGenerationNV = 5313,\n  SpvExecutionModelIntersectionKHR = 5314,\n  SpvExecutionModelIntersectionNV = 5314,\n  SpvExecutionModelAnyHitKHR = 5315,\n  SpvExecutionModelAnyHitNV = 5315,\n  SpvExecutionModelClosestHitKHR = 5316,\n  SpvExecutionModelClosestHitNV = 5316,\n  SpvExecutionModelMissKHR = 5317,\n  SpvExecutionModelMissNV = 5317,\n  SpvExecutionModelCallableKHR = 5318,\n  SpvExecutionModelCallableNV = 5318,\n  SpvExecutionModelTaskEXT = 5364,\n  SpvExecutionModelMeshEXT = 5365,\n  SpvExecutionModelMax = 0x7fffffff,\n} SpvExecutionModel;\n\ntypedef enum SpvAddressingModel_ {\n  SpvAddressingModelLogical = 0,\n  SpvAddressingModelPhysical32 = 1,\n  SpvAddressingModelPhysical64 = 2,\n  SpvAddressingModelPhysicalStorageBuffer64 = 5348,\n  SpvAddressingModelPhysicalStorageBuffer64EXT = 5348,\n  SpvAddressingModelMax = 0x7fffffff,\n} SpvAddressingModel;\n\ntypedef enum SpvMemoryModel_ {\n  SpvMemoryModelSimple = 0,\n  SpvMemoryModelGLSL450 = 1,\n  SpvMemoryModelOpenCL = 2,\n  SpvMemoryModelVulkan = 3,\n  SpvMemoryModelVulkanKHR = 3,\n  SpvMemoryModelMax = 0x7fffffff,\n} SpvMemoryModel;\n\ntypedef enum SpvExecutionMode_ {\n  SpvExecutionModeInvocations = 0,\n  SpvExecutionModeSpacingEqual = 1,\n  SpvExecutionModeSpacingFractionalEven = 2,\n  SpvExecutionModeSpacingFractionalOdd = 3,\n  SpvExecutionModeVertexOrderCw = 4,\n  SpvExecutionModeVertexOrderCcw = 5,\n  SpvExecutionModePixelCenterInteger = 6,\n  SpvExecutionModeOriginUpperLeft = 7,\n  SpvExecutionModeOriginLowerLeft = 8,\n  SpvExecutionModeEarlyFragmentTests = 9,\n  SpvExecutionModePointMode = 10,\n  SpvExecutionModeXfb = 11,\n  SpvExecutionModeDepthReplacing = 12,\n  SpvExecutionModeDepthGreater = 14,\n  SpvExecutionModeDepthLess = 15,\n  SpvExecutionModeDepthUnchanged = 16,\n  SpvExecutionModeLocalSize = 17,\n  SpvExecutionModeLocalSizeHint = 18,\n  SpvExecutionModeInputPoints = 19,\n  SpvExecutionModeInputLines = 20,\n  SpvExecutionModeInputLinesAdjacency = 21,\n  SpvExecutionModeTriangles = 22,\n  SpvExecutionModeInputTrianglesAdjacency = 23,\n  SpvExecutionModeQuads = 24,\n  SpvExecutionModeIsolines = 25,\n  SpvExecutionModeOutputVertices = 26,\n  SpvExecutionModeOutputPoints = 27,\n  SpvExecutionModeOutputLineStrip = 28,\n  SpvExecutionModeOutputTriangleStrip = 29,\n  SpvExecutionModeVecTypeHint = 30,\n  SpvExecutionModeContractionOff = 31,\n  SpvExecutionModeInitializer = 33,\n  SpvExecutionModeFinalizer = 34,\n  SpvExecutionModeSubgroupSize = 35,\n  SpvExecutionModeSubgroupsPerWorkgroup = 36,\n  SpvExecutionModeSubgroupsPerWorkgroupId = 37,\n  SpvExecutionModeLocalSizeId = 38,\n  SpvExecutionModeLocalSizeHintId = 39,\n  SpvExecutionModeNonCoherentColorAttachmentReadEXT = 4169,\n  SpvExecutionModeNonCoherentDepthAttachmentReadEXT = 4170,\n  SpvExecutionModeNonCoherentStencilAttachmentReadEXT = 4171,\n  SpvExecutionModeSubgroupUniformControlFlowKHR = 4421,\n  SpvExecutionModePostDepthCoverage = 4446,\n  SpvExecutionModeDenormPreserve = 4459,\n  SpvExecutionModeDenormFlushToZero = 4460,\n  SpvExecutionModeSignedZeroInfNanPreserve = 4461,\n  SpvExecutionModeRoundingModeRTE = 4462,\n  SpvExecutionModeRoundingModeRTZ = 4463,\n  SpvExecutionModeEarlyAndLateFragmentTestsAMD = 5017,\n  SpvExecutionModeStencilRefReplacingEXT = 5027,\n  SpvExecutionModeStencilRefUnchangedFrontAMD = 5079,\n  SpvExecutionModeStencilRefGreaterFrontAMD = 5080,\n  SpvExecutionModeStencilRefLessFrontAMD = 5081,\n  SpvExecutionModeStencilRefUnchangedBackAMD = 5082,\n  SpvExecutionModeStencilRefGreaterBackAMD = 5083,\n  SpvExecutionModeStencilRefLessBackAMD = 5084,\n  SpvExecutionModeOutputLinesEXT = 5269,\n  SpvExecutionModeOutputLinesNV = 5269,\n  SpvExecutionModeOutputPrimitivesEXT = 5270,\n  SpvExecutionModeOutputPrimitivesNV = 5270,\n  SpvExecutionModeDerivativeGroupQuadsNV = 5289,\n  SpvExecutionModeDerivativeGroupLinearNV = 5290,\n  SpvExecutionModeOutputTrianglesEXT = 5298,\n  SpvExecutionModeOutputTrianglesNV = 5298,\n  SpvExecutionModePixelInterlockOrderedEXT = 5366,\n  SpvExecutionModePixelInterlockUnorderedEXT = 5367,\n  SpvExecutionModeSampleInterlockOrderedEXT = 5368,\n  SpvExecutionModeSampleInterlockUnorderedEXT = 5369,\n  SpvExecutionModeShadingRateInterlockOrderedEXT = 5370,\n  SpvExecutionModeShadingRateInterlockUnorderedEXT = 5371,\n  SpvExecutionModeSharedLocalMemorySizeINTEL = 5618,\n  SpvExecutionModeRoundingModeRTPINTEL = 5620,\n  SpvExecutionModeRoundingModeRTNINTEL = 5621,\n  SpvExecutionModeFloatingPointModeALTINTEL = 5622,\n  SpvExecutionModeFloatingPointModeIEEEINTEL = 5623,\n  SpvExecutionModeMaxWorkgroupSizeINTEL = 5893,\n  SpvExecutionModeMaxWorkDimINTEL = 5894,\n  SpvExecutionModeNoGlobalOffsetINTEL = 5895,\n  SpvExecutionModeNumSIMDWorkitemsINTEL = 5896,\n  SpvExecutionModeSchedulerTargetFmaxMhzINTEL = 5903,\n  SpvExecutionModeStreamingInterfaceINTEL = 6154,\n  SpvExecutionModeRegisterMapInterfaceINTEL = 6160,\n  SpvExecutionModeNamedBarrierCountINTEL = 6417,\n  SpvExecutionModeMax = 0x7fffffff,\n} SpvExecutionMode;\n\ntypedef enum SpvStorageClass_ {\n  SpvStorageClassUniformConstant = 0,\n  SpvStorageClassInput = 1,\n  SpvStorageClassUniform = 2,\n  SpvStorageClassOutput = 3,\n  SpvStorageClassWorkgroup = 4,\n  SpvStorageClassCrossWorkgroup = 5,\n  SpvStorageClassPrivate = 6,\n  SpvStorageClassFunction = 7,\n  SpvStorageClassGeneric = 8,\n  SpvStorageClassPushConstant = 9,\n  SpvStorageClassAtomicCounter = 10,\n  SpvStorageClassImage = 11,\n  SpvStorageClassStorageBuffer = 12,\n  SpvStorageClassTileImageEXT = 4172,\n  SpvStorageClassCallableDataKHR = 5328,\n  SpvStorageClassCallableDataNV = 5328,\n  SpvStorageClassIncomingCallableDataKHR = 5329,\n  SpvStorageClassIncomingCallableDataNV = 5329,\n  SpvStorageClassRayPayloadKHR = 5338,\n  SpvStorageClassRayPayloadNV = 5338,\n  SpvStorageClassHitAttributeKHR = 5339,\n  SpvStorageClassHitAttributeNV = 5339,\n  SpvStorageClassIncomingRayPayloadKHR = 5342,\n  SpvStorageClassIncomingRayPayloadNV = 5342,\n  SpvStorageClassShaderRecordBufferKHR = 5343,\n  SpvStorageClassShaderRecordBufferNV = 5343,\n  SpvStorageClassPhysicalStorageBuffer = 5349,\n  SpvStorageClassPhysicalStorageBufferEXT = 5349,\n  SpvStorageClassHitObjectAttributeNV = 5385,\n  SpvStorageClassTaskPayloadWorkgroupEXT = 5402,\n  SpvStorageClassCodeSectionINTEL = 5605,\n  SpvStorageClassDeviceOnlyINTEL = 5936,\n  SpvStorageClassHostOnlyINTEL = 5937,\n  SpvStorageClassMax = 0x7fffffff,\n} SpvStorageClass;\n\ntypedef enum SpvDim_ {\n  SpvDim1D = 0,\n  SpvDim2D = 1,\n  SpvDim3D = 2,\n  SpvDimCube = 3,\n  SpvDimRect = 4,\n  SpvDimBuffer = 5,\n  SpvDimSubpassData = 6,\n  SpvDimTileImageDataEXT = 4173,\n  SpvDimMax = 0x7fffffff,\n} SpvDim;\n\ntypedef enum SpvSamplerAddressingMode_ {\n  SpvSamplerAddressingModeNone = 0,\n  SpvSamplerAddressingModeClampToEdge = 1,\n  SpvSamplerAddressingModeClamp = 2,\n  SpvSamplerAddressingModeRepeat = 3,\n  SpvSamplerAddressingModeRepeatMirrored = 4,\n  SpvSamplerAddressingModeMax = 0x7fffffff,\n} SpvSamplerAddressingMode;\n\ntypedef enum SpvSamplerFilterMode_ {\n  SpvSamplerFilterModeNearest = 0,\n  SpvSamplerFilterModeLinear = 1,\n  SpvSamplerFilterModeMax = 0x7fffffff,\n} SpvSamplerFilterMode;\n\ntypedef enum SpvImageFormat_ {\n  SpvImageFormatUnknown = 0,\n  SpvImageFormatRgba32f = 1,\n  SpvImageFormatRgba16f = 2,\n  SpvImageFormatR32f = 3,\n  SpvImageFormatRgba8 = 4,\n  SpvImageFormatRgba8Snorm = 5,\n  SpvImageFormatRg32f = 6,\n  SpvImageFormatRg16f = 7,\n  SpvImageFormatR11fG11fB10f = 8,\n  SpvImageFormatR16f = 9,\n  SpvImageFormatRgba16 = 10,\n  SpvImageFormatRgb10A2 = 11,\n  SpvImageFormatRg16 = 12,\n  SpvImageFormatRg8 = 13,\n  SpvImageFormatR16 = 14,\n  SpvImageFormatR8 = 15,\n  SpvImageFormatRgba16Snorm = 16,\n  SpvImageFormatRg16Snorm = 17,\n  SpvImageFormatRg8Snorm = 18,\n  SpvImageFormatR16Snorm = 19,\n  SpvImageFormatR8Snorm = 20,\n  SpvImageFormatRgba32i = 21,\n  SpvImageFormatRgba16i = 22,\n  SpvImageFormatRgba8i = 23,\n  SpvImageFormatR32i = 24,\n  SpvImageFormatRg32i = 25,\n  SpvImageFormatRg16i = 26,\n  SpvImageFormatRg8i = 27,\n  SpvImageFormatR16i = 28,\n  SpvImageFormatR8i = 29,\n  SpvImageFormatRgba32ui = 30,\n  SpvImageFormatRgba16ui = 31,\n  SpvImageFormatRgba8ui = 32,\n  SpvImageFormatR32ui = 33,\n  SpvImageFormatRgb10a2ui = 34,\n  SpvImageFormatRg32ui = 35,\n  SpvImageFormatRg16ui = 36,\n  SpvImageFormatRg8ui = 37,\n  SpvImageFormatR16ui = 38,\n  SpvImageFormatR8ui = 39,\n  SpvImageFormatR64ui = 40,\n  SpvImageFormatR64i = 41,\n  SpvImageFormatMax = 0x7fffffff,\n} SpvImageFormat;\n\ntypedef enum SpvImageChannelOrder_ {\n  SpvImageChannelOrderR = 0,\n  SpvImageChannelOrderA = 1,\n  SpvImageChannelOrderRG = 2,\n  SpvImageChannelOrderRA = 3,\n  SpvImageChannelOrderRGB = 4,\n  SpvImageChannelOrderRGBA = 5,\n  SpvImageChannelOrderBGRA = 6,\n  SpvImageChannelOrderARGB = 7,\n  SpvImageChannelOrderIntensity = 8,\n  SpvImageChannelOrderLuminance = 9,\n  SpvImageChannelOrderRx = 10,\n  SpvImageChannelOrderRGx = 11,\n  SpvImageChannelOrderRGBx = 12,\n  SpvImageChannelOrderDepth = 13,\n  SpvImageChannelOrderDepthStencil = 14,\n  SpvImageChannelOrdersRGB = 15,\n  SpvImageChannelOrdersRGBx = 16,\n  SpvImageChannelOrdersRGBA = 17,\n  SpvImageChannelOrdersBGRA = 18,\n  SpvImageChannelOrderABGR = 19,\n  SpvImageChannelOrderMax = 0x7fffffff,\n} SpvImageChannelOrder;\n\ntypedef enum SpvImageChannelDataType_ {\n  SpvImageChannelDataTypeSnormInt8 = 0,\n  SpvImageChannelDataTypeSnormInt16 = 1,\n  SpvImageChannelDataTypeUnormInt8 = 2,\n  SpvImageChannelDataTypeUnormInt16 = 3,\n  SpvImageChannelDataTypeUnormShort565 = 4,\n  SpvImageChannelDataTypeUnormShort555 = 5,\n  SpvImageChannelDataTypeUnormInt101010 = 6,\n  SpvImageChannelDataTypeSignedInt8 = 7,\n  SpvImageChannelDataTypeSignedInt16 = 8,\n  SpvImageChannelDataTypeSignedInt32 = 9,\n  SpvImageChannelDataTypeUnsignedInt8 = 10,\n  SpvImageChannelDataTypeUnsignedInt16 = 11,\n  SpvImageChannelDataTypeUnsignedInt32 = 12,\n  SpvImageChannelDataTypeHalfFloat = 13,\n  SpvImageChannelDataTypeFloat = 14,\n  SpvImageChannelDataTypeUnormInt24 = 15,\n  SpvImageChannelDataTypeUnormInt101010_2 = 16,\n  SpvImageChannelDataTypeUnsignedIntRaw10EXT = 19,\n  SpvImageChannelDataTypeUnsignedIntRaw12EXT = 20,\n  SpvImageChannelDataTypeMax = 0x7fffffff,\n} SpvImageChannelDataType;\n\ntypedef enum SpvImageOperandsShift_ {\n  SpvImageOperandsBiasShift = 0,\n  SpvImageOperandsLodShift = 1,\n  SpvImageOperandsGradShift = 2,\n  SpvImageOperandsConstOffsetShift = 3,\n  SpvImageOperandsOffsetShift = 4,\n  SpvImageOperandsConstOffsetsShift = 5,\n  SpvImageOperandsSampleShift = 6,\n  SpvImageOperandsMinLodShift = 7,\n  SpvImageOperandsMakeTexelAvailableShift = 8,\n  SpvImageOperandsMakeTexelAvailableKHRShift = 8,\n  SpvImageOperandsMakeTexelVisibleShift = 9,\n  SpvImageOperandsMakeTexelVisibleKHRShift = 9,\n  SpvImageOperandsNonPrivateTexelShift = 10,\n  SpvImageOperandsNonPrivateTexelKHRShift = 10,\n  SpvImageOperandsVolatileTexelShift = 11,\n  SpvImageOperandsVolatileTexelKHRShift = 11,\n  SpvImageOperandsSignExtendShift = 12,\n  SpvImageOperandsZeroExtendShift = 13,\n  SpvImageOperandsNontemporalShift = 14,\n  SpvImageOperandsOffsetsShift = 16,\n  SpvImageOperandsMax = 0x7fffffff,\n} SpvImageOperandsShift;\n\ntypedef enum SpvImageOperandsMask_ {\n  SpvImageOperandsMaskNone = 0,\n  SpvImageOperandsBiasMask = 0x00000001,\n  SpvImageOperandsLodMask = 0x00000002,\n  SpvImageOperandsGradMask = 0x00000004,\n  SpvImageOperandsConstOffsetMask = 0x00000008,\n  SpvImageOperandsOffsetMask = 0x00000010,\n  SpvImageOperandsConstOffsetsMask = 0x00000020,\n  SpvImageOperandsSampleMask = 0x00000040,\n  SpvImageOperandsMinLodMask = 0x00000080,\n  SpvImageOperandsMakeTexelAvailableMask = 0x00000100,\n  SpvImageOperandsMakeTexelAvailableKHRMask = 0x00000100,\n  SpvImageOperandsMakeTexelVisibleMask = 0x00000200,\n  SpvImageOperandsMakeTexelVisibleKHRMask = 0x00000200,\n  SpvImageOperandsNonPrivateTexelMask = 0x00000400,\n  SpvImageOperandsNonPrivateTexelKHRMask = 0x00000400,\n  SpvImageOperandsVolatileTexelMask = 0x00000800,\n  SpvImageOperandsVolatileTexelKHRMask = 0x00000800,\n  SpvImageOperandsSignExtendMask = 0x00001000,\n  SpvImageOperandsZeroExtendMask = 0x00002000,\n  SpvImageOperandsNontemporalMask = 0x00004000,\n  SpvImageOperandsOffsetsMask = 0x00010000,\n} SpvImageOperandsMask;\n\ntypedef enum SpvFPFastMathModeShift_ {\n  SpvFPFastMathModeNotNaNShift = 0,\n  SpvFPFastMathModeNotInfShift = 1,\n  SpvFPFastMathModeNSZShift = 2,\n  SpvFPFastMathModeAllowRecipShift = 3,\n  SpvFPFastMathModeFastShift = 4,\n  SpvFPFastMathModeAllowContractFastINTELShift = 16,\n  SpvFPFastMathModeAllowReassocINTELShift = 17,\n  SpvFPFastMathModeMax = 0x7fffffff,\n} SpvFPFastMathModeShift;\n\ntypedef enum SpvFPFastMathModeMask_ {\n  SpvFPFastMathModeMaskNone = 0,\n  SpvFPFastMathModeNotNaNMask = 0x00000001,\n  SpvFPFastMathModeNotInfMask = 0x00000002,\n  SpvFPFastMathModeNSZMask = 0x00000004,\n  SpvFPFastMathModeAllowRecipMask = 0x00000008,\n  SpvFPFastMathModeFastMask = 0x00000010,\n  SpvFPFastMathModeAllowContractFastINTELMask = 0x00010000,\n  SpvFPFastMathModeAllowReassocINTELMask = 0x00020000,\n} SpvFPFastMathModeMask;\n\ntypedef enum SpvFPRoundingMode_ {\n  SpvFPRoundingModeRTE = 0,\n  SpvFPRoundingModeRTZ = 1,\n  SpvFPRoundingModeRTP = 2,\n  SpvFPRoundingModeRTN = 3,\n  SpvFPRoundingModeMax = 0x7fffffff,\n} SpvFPRoundingMode;\n\ntypedef enum SpvLinkageType_ {\n  SpvLinkageTypeExport = 0,\n  SpvLinkageTypeImport = 1,\n  SpvLinkageTypeLinkOnceODR = 2,\n  SpvLinkageTypeMax = 0x7fffffff,\n} SpvLinkageType;\n\ntypedef enum SpvAccessQualifier_ {\n  SpvAccessQualifierReadOnly = 0,\n  SpvAccessQualifierWriteOnly = 1,\n  SpvAccessQualifierReadWrite = 2,\n  SpvAccessQualifierMax = 0x7fffffff,\n} SpvAccessQualifier;\n\ntypedef enum SpvFunctionParameterAttribute_ {\n  SpvFunctionParameterAttributeZext = 0,\n  SpvFunctionParameterAttributeSext = 1,\n  SpvFunctionParameterAttributeByVal = 2,\n  SpvFunctionParameterAttributeSret = 3,\n  SpvFunctionParameterAttributeNoAlias = 4,\n  SpvFunctionParameterAttributeNoCapture = 5,\n  SpvFunctionParameterAttributeNoWrite = 6,\n  SpvFunctionParameterAttributeNoReadWrite = 7,\n  SpvFunctionParameterAttributeRuntimeAlignedINTEL = 5940,\n  SpvFunctionParameterAttributeMax = 0x7fffffff,\n} SpvFunctionParameterAttribute;\n\ntypedef enum SpvDecoration_ {\n  SpvDecorationRelaxedPrecision = 0,\n  SpvDecorationSpecId = 1,\n  SpvDecorationBlock = 2,\n  SpvDecorationBufferBlock = 3,\n  SpvDecorationRowMajor = 4,\n  SpvDecorationColMajor = 5,\n  SpvDecorationArrayStride = 6,\n  SpvDecorationMatrixStride = 7,\n  SpvDecorationGLSLShared = 8,\n  SpvDecorationGLSLPacked = 9,\n  SpvDecorationCPacked = 10,\n  SpvDecorationBuiltIn = 11,\n  SpvDecorationNoPerspective = 13,\n  SpvDecorationFlat = 14,\n  SpvDecorationPatch = 15,\n  SpvDecorationCentroid = 16,\n  SpvDecorationSample = 17,\n  SpvDecorationInvariant = 18,\n  SpvDecorationRestrict = 19,\n  SpvDecorationAliased = 20,\n  SpvDecorationVolatile = 21,\n  SpvDecorationConstant = 22,\n  SpvDecorationCoherent = 23,\n  SpvDecorationNonWritable = 24,\n  SpvDecorationNonReadable = 25,\n  SpvDecorationUniform = 26,\n  SpvDecorationUniformId = 27,\n  SpvDecorationSaturatedConversion = 28,\n  SpvDecorationStream = 29,\n  SpvDecorationLocation = 30,\n  SpvDecorationComponent = 31,\n  SpvDecorationIndex = 32,\n  SpvDecorationBinding = 33,\n  SpvDecorationDescriptorSet = 34,\n  SpvDecorationOffset = 35,\n  SpvDecorationXfbBuffer = 36,\n  SpvDecorationXfbStride = 37,\n  SpvDecorationFuncParamAttr = 38,\n  SpvDecorationFPRoundingMode = 39,\n  SpvDecorationFPFastMathMode = 40,\n  SpvDecorationLinkageAttributes = 41,\n  SpvDecorationNoContraction = 42,\n  SpvDecorationInputAttachmentIndex = 43,\n  SpvDecorationAlignment = 44,\n  SpvDecorationMaxByteOffset = 45,\n  SpvDecorationAlignmentId = 46,\n  SpvDecorationMaxByteOffsetId = 47,\n  SpvDecorationNoSignedWrap = 4469,\n  SpvDecorationNoUnsignedWrap = 4470,\n  SpvDecorationWeightTextureQCOM = 4487,\n  SpvDecorationBlockMatchTextureQCOM = 4488,\n  SpvDecorationExplicitInterpAMD = 4999,\n  SpvDecorationOverrideCoverageNV = 5248,\n  SpvDecorationPassthroughNV = 5250,\n  SpvDecorationViewportRelativeNV = 5252,\n  SpvDecorationSecondaryViewportRelativeNV = 5256,\n  SpvDecorationPerPrimitiveEXT = 5271,\n  SpvDecorationPerPrimitiveNV = 5271,\n  SpvDecorationPerViewNV = 5272,\n  SpvDecorationPerTaskNV = 5273,\n  SpvDecorationPerVertexKHR = 5285,\n  SpvDecorationPerVertexNV = 5285,\n  SpvDecorationNonUniform = 5300,\n  SpvDecorationNonUniformEXT = 5300,\n  SpvDecorationRestrictPointer = 5355,\n  SpvDecorationRestrictPointerEXT = 5355,\n  SpvDecorationAliasedPointer = 5356,\n  SpvDecorationAliasedPointerEXT = 5356,\n  SpvDecorationHitObjectShaderRecordBufferNV = 5386,\n  SpvDecorationBindlessSamplerNV = 5398,\n  SpvDecorationBindlessImageNV = 5399,\n  SpvDecorationBoundSamplerNV = 5400,\n  SpvDecorationBoundImageNV = 5401,\n  SpvDecorationSIMTCallINTEL = 5599,\n  SpvDecorationReferencedIndirectlyINTEL = 5602,\n  SpvDecorationClobberINTEL = 5607,\n  SpvDecorationSideEffectsINTEL = 5608,\n  SpvDecorationVectorComputeVariableINTEL = 5624,\n  SpvDecorationFuncParamIOKindINTEL = 5625,\n  SpvDecorationVectorComputeFunctionINTEL = 5626,\n  SpvDecorationStackCallINTEL = 5627,\n  SpvDecorationGlobalVariableOffsetINTEL = 5628,\n  SpvDecorationCounterBuffer = 5634,\n  SpvDecorationHlslCounterBufferGOOGLE = 5634,\n  SpvDecorationHlslSemanticGOOGLE = 5635,\n  SpvDecorationUserSemantic = 5635,\n  SpvDecorationUserTypeGOOGLE = 5636,\n  SpvDecorationFunctionRoundingModeINTEL = 5822,\n  SpvDecorationFunctionDenormModeINTEL = 5823,\n  SpvDecorationRegisterINTEL = 5825,\n  SpvDecorationMemoryINTEL = 5826,\n  SpvDecorationNumbanksINTEL = 5827,\n  SpvDecorationBankwidthINTEL = 5828,\n  SpvDecorationMaxPrivateCopiesINTEL = 5829,\n  SpvDecorationSinglepumpINTEL = 5830,\n  SpvDecorationDoublepumpINTEL = 5831,\n  SpvDecorationMaxReplicatesINTEL = 5832,\n  SpvDecorationSimpleDualPortINTEL = 5833,\n  SpvDecorationMergeINTEL = 5834,\n  SpvDecorationBankBitsINTEL = 5835,\n  SpvDecorationForcePow2DepthINTEL = 5836,\n  SpvDecorationBurstCoalesceINTEL = 5899,\n  SpvDecorationCacheSizeINTEL = 5900,\n  SpvDecorationDontStaticallyCoalesceINTEL = 5901,\n  SpvDecorationPrefetchINTEL = 5902,\n  SpvDecorationStallEnableINTEL = 5905,\n  SpvDecorationFuseLoopsInFunctionINTEL = 5907,\n  SpvDecorationMathOpDSPModeINTEL = 5909,\n  SpvDecorationAliasScopeINTEL = 5914,\n  SpvDecorationNoAliasINTEL = 5915,\n  SpvDecorationInitiationIntervalINTEL = 5917,\n  SpvDecorationMaxConcurrencyINTEL = 5918,\n  SpvDecorationPipelineEnableINTEL = 5919,\n  SpvDecorationBufferLocationINTEL = 5921,\n  SpvDecorationIOPipeStorageINTEL = 5944,\n  SpvDecorationFunctionFloatingPointModeINTEL = 6080,\n  SpvDecorationSingleElementVectorINTEL = 6085,\n  SpvDecorationVectorComputeCallableFunctionINTEL = 6087,\n  SpvDecorationMediaBlockIOINTEL = 6140,\n  SpvDecorationLatencyControlLabelINTEL = 6172,\n  SpvDecorationLatencyControlConstraintINTEL = 6173,\n  SpvDecorationConduitKernelArgumentINTEL = 6175,\n  SpvDecorationRegisterMapKernelArgumentINTEL = 6176,\n  SpvDecorationMMHostInterfaceAddressWidthINTEL = 6177,\n  SpvDecorationMMHostInterfaceDataWidthINTEL = 6178,\n  SpvDecorationMMHostInterfaceLatencyINTEL = 6179,\n  SpvDecorationMMHostInterfaceReadWriteModeINTEL = 6180,\n  SpvDecorationMMHostInterfaceMaxBurstINTEL = 6181,\n  SpvDecorationMMHostInterfaceWaitRequestINTEL = 6182,\n  SpvDecorationStableKernelArgumentINTEL = 6183,\n  SpvDecorationMax = 0x7fffffff,\n} SpvDecoration;\n\ntypedef enum SpvBuiltIn_ {\n  SpvBuiltInPosition = 0,\n  SpvBuiltInPointSize = 1,\n  SpvBuiltInClipDistance = 3,\n  SpvBuiltInCullDistance = 4,\n  SpvBuiltInVertexId = 5,\n  SpvBuiltInInstanceId = 6,\n  SpvBuiltInPrimitiveId = 7,\n  SpvBuiltInInvocationId = 8,\n  SpvBuiltInLayer = 9,\n  SpvBuiltInViewportIndex = 10,\n  SpvBuiltInTessLevelOuter = 11,\n  SpvBuiltInTessLevelInner = 12,\n  SpvBuiltInTessCoord = 13,\n  SpvBuiltInPatchVertices = 14,\n  SpvBuiltInFragCoord = 15,\n  SpvBuiltInPointCoord = 16,\n  SpvBuiltInFrontFacing = 17,\n  SpvBuiltInSampleId = 18,\n  SpvBuiltInSamplePosition = 19,\n  SpvBuiltInSampleMask = 20,\n  SpvBuiltInFragDepth = 22,\n  SpvBuiltInHelperInvocation = 23,\n  SpvBuiltInNumWorkgroups = 24,\n  SpvBuiltInWorkgroupSize = 25,\n  SpvBuiltInWorkgroupId = 26,\n  SpvBuiltInLocalInvocationId = 27,\n  SpvBuiltInGlobalInvocationId = 28,\n  SpvBuiltInLocalInvocationIndex = 29,\n  SpvBuiltInWorkDim = 30,\n  SpvBuiltInGlobalSize = 31,\n  SpvBuiltInEnqueuedWorkgroupSize = 32,\n  SpvBuiltInGlobalOffset = 33,\n  SpvBuiltInGlobalLinearId = 34,\n  SpvBuiltInSubgroupSize = 36,\n  SpvBuiltInSubgroupMaxSize = 37,\n  SpvBuiltInNumSubgroups = 38,\n  SpvBuiltInNumEnqueuedSubgroups = 39,\n  SpvBuiltInSubgroupId = 40,\n  SpvBuiltInSubgroupLocalInvocationId = 41,\n  SpvBuiltInVertexIndex = 42,\n  SpvBuiltInInstanceIndex = 43,\n  SpvBuiltInCoreIDARM = 4160,\n  SpvBuiltInCoreCountARM = 4161,\n  SpvBuiltInCoreMaxIDARM = 4162,\n  SpvBuiltInWarpIDARM = 4163,\n  SpvBuiltInWarpMaxIDARM = 4164,\n  SpvBuiltInSubgroupEqMask = 4416,\n  SpvBuiltInSubgroupEqMaskKHR = 4416,\n  SpvBuiltInSubgroupGeMask = 4417,\n  SpvBuiltInSubgroupGeMaskKHR = 4417,\n  SpvBuiltInSubgroupGtMask = 4418,\n  SpvBuiltInSubgroupGtMaskKHR = 4418,\n  SpvBuiltInSubgroupLeMask = 4419,\n  SpvBuiltInSubgroupLeMaskKHR = 4419,\n  SpvBuiltInSubgroupLtMask = 4420,\n  SpvBuiltInSubgroupLtMaskKHR = 4420,\n  SpvBuiltInBaseVertex = 4424,\n  SpvBuiltInBaseInstance = 4425,\n  SpvBuiltInDrawIndex = 4426,\n  SpvBuiltInPrimitiveShadingRateKHR = 4432,\n  SpvBuiltInDeviceIndex = 4438,\n  SpvBuiltInViewIndex = 4440,\n  SpvBuiltInShadingRateKHR = 4444,\n  SpvBuiltInBaryCoordNoPerspAMD = 4992,\n  SpvBuiltInBaryCoordNoPerspCentroidAMD = 4993,\n  SpvBuiltInBaryCoordNoPerspSampleAMD = 4994,\n  SpvBuiltInBaryCoordSmoothAMD = 4995,\n  SpvBuiltInBaryCoordSmoothCentroidAMD = 4996,\n  SpvBuiltInBaryCoordSmoothSampleAMD = 4997,\n  SpvBuiltInBaryCoordPullModelAMD = 4998,\n  SpvBuiltInFragStencilRefEXT = 5014,\n  SpvBuiltInViewportMaskNV = 5253,\n  SpvBuiltInSecondaryPositionNV = 5257,\n  SpvBuiltInSecondaryViewportMaskNV = 5258,\n  SpvBuiltInPositionPerViewNV = 5261,\n  SpvBuiltInViewportMaskPerViewNV = 5262,\n  SpvBuiltInFullyCoveredEXT = 5264,\n  SpvBuiltInTaskCountNV = 5274,\n  SpvBuiltInPrimitiveCountNV = 5275,\n  SpvBuiltInPrimitiveIndicesNV = 5276,\n  SpvBuiltInClipDistancePerViewNV = 5277,\n  SpvBuiltInCullDistancePerViewNV = 5278,\n  SpvBuiltInLayerPerViewNV = 5279,\n  SpvBuiltInMeshViewCountNV = 5280,\n  SpvBuiltInMeshViewIndicesNV = 5281,\n  SpvBuiltInBaryCoordKHR = 5286,\n  SpvBuiltInBaryCoordNV = 5286,\n  SpvBuiltInBaryCoordNoPerspKHR = 5287,\n  SpvBuiltInBaryCoordNoPerspNV = 5287,\n  SpvBuiltInFragSizeEXT = 5292,\n  SpvBuiltInFragmentSizeNV = 5292,\n  SpvBuiltInFragInvocationCountEXT = 5293,\n  SpvBuiltInInvocationsPerPixelNV = 5293,\n  SpvBuiltInPrimitivePointIndicesEXT = 5294,\n  SpvBuiltInPrimitiveLineIndicesEXT = 5295,\n  SpvBuiltInPrimitiveTriangleIndicesEXT = 5296,\n  SpvBuiltInCullPrimitiveEXT = 5299,\n  SpvBuiltInLaunchIdKHR = 5319,\n  SpvBuiltInLaunchIdNV = 5319,\n  SpvBuiltInLaunchSizeKHR = 5320,\n  SpvBuiltInLaunchSizeNV = 5320,\n  SpvBuiltInWorldRayOriginKHR = 5321,\n  SpvBuiltInWorldRayOriginNV = 5321,\n  SpvBuiltInWorldRayDirectionKHR = 5322,\n  SpvBuiltInWorldRayDirectionNV = 5322,\n  SpvBuiltInObjectRayOriginKHR = 5323,\n  SpvBuiltInObjectRayOriginNV = 5323,\n  SpvBuiltInObjectRayDirectionKHR = 5324,\n  SpvBuiltInObjectRayDirectionNV = 5324,\n  SpvBuiltInRayTminKHR = 5325,\n  SpvBuiltInRayTminNV = 5325,\n  SpvBuiltInRayTmaxKHR = 5326,\n  SpvBuiltInRayTmaxNV = 5326,\n  SpvBuiltInInstanceCustomIndexKHR = 5327,\n  SpvBuiltInInstanceCustomIndexNV = 5327,\n  SpvBuiltInObjectToWorldKHR = 5330,\n  SpvBuiltInObjectToWorldNV = 5330,\n  SpvBuiltInWorldToObjectKHR = 5331,\n  SpvBuiltInWorldToObjectNV = 5331,\n  SpvBuiltInHitTNV = 5332,\n  SpvBuiltInHitKindKHR = 5333,\n  SpvBuiltInHitKindNV = 5333,\n  SpvBuiltInCurrentRayTimeNV = 5334,\n  SpvBuiltInHitTriangleVertexPositionsKHR = 5335,\n  SpvBuiltInIncomingRayFlagsKHR = 5351,\n  SpvBuiltInIncomingRayFlagsNV = 5351,\n  SpvBuiltInRayGeometryIndexKHR = 5352,\n  SpvBuiltInWarpsPerSMNV = 5374,\n  SpvBuiltInSMCountNV = 5375,\n  SpvBuiltInWarpIDNV = 5376,\n  SpvBuiltInSMIDNV = 5377,\n  SpvBuiltInCullMaskKHR = 6021,\n  SpvBuiltInMax = 0x7fffffff,\n} SpvBuiltIn;\n\ntypedef enum SpvSelectionControlShift_ {\n  SpvSelectionControlFlattenShift = 0,\n  SpvSelectionControlDontFlattenShift = 1,\n  SpvSelectionControlMax = 0x7fffffff,\n} SpvSelectionControlShift;\n\ntypedef enum SpvSelectionControlMask_ {\n  SpvSelectionControlMaskNone = 0,\n  SpvSelectionControlFlattenMask = 0x00000001,\n  SpvSelectionControlDontFlattenMask = 0x00000002,\n} SpvSelectionControlMask;\n\ntypedef enum SpvLoopControlShift_ {\n  SpvLoopControlUnrollShift = 0,\n  SpvLoopControlDontUnrollShift = 1,\n  SpvLoopControlDependencyInfiniteShift = 2,\n  SpvLoopControlDependencyLengthShift = 3,\n  SpvLoopControlMinIterationsShift = 4,\n  SpvLoopControlMaxIterationsShift = 5,\n  SpvLoopControlIterationMultipleShift = 6,\n  SpvLoopControlPeelCountShift = 7,\n  SpvLoopControlPartialCountShift = 8,\n  SpvLoopControlInitiationIntervalINTELShift = 16,\n  SpvLoopControlMaxConcurrencyINTELShift = 17,\n  SpvLoopControlDependencyArrayINTELShift = 18,\n  SpvLoopControlPipelineEnableINTELShift = 19,\n  SpvLoopControlLoopCoalesceINTELShift = 20,\n  SpvLoopControlMaxInterleavingINTELShift = 21,\n  SpvLoopControlSpeculatedIterationsINTELShift = 22,\n  SpvLoopControlNoFusionINTELShift = 23,\n  SpvLoopControlLoopCountINTELShift = 24,\n  SpvLoopControlMaxReinvocationDelayINTELShift = 25,\n  SpvLoopControlMax = 0x7fffffff,\n} SpvLoopControlShift;\n\ntypedef enum SpvLoopControlMask_ {\n  SpvLoopControlMaskNone = 0,\n  SpvLoopControlUnrollMask = 0x00000001,\n  SpvLoopControlDontUnrollMask = 0x00000002,\n  SpvLoopControlDependencyInfiniteMask = 0x00000004,\n  SpvLoopControlDependencyLengthMask = 0x00000008,\n  SpvLoopControlMinIterationsMask = 0x00000010,\n  SpvLoopControlMaxIterationsMask = 0x00000020,\n  SpvLoopControlIterationMultipleMask = 0x00000040,\n  SpvLoopControlPeelCountMask = 0x00000080,\n  SpvLoopControlPartialCountMask = 0x00000100,\n  SpvLoopControlInitiationIntervalINTELMask = 0x00010000,\n  SpvLoopControlMaxConcurrencyINTELMask = 0x00020000,\n  SpvLoopControlDependencyArrayINTELMask = 0x00040000,\n  SpvLoopControlPipelineEnableINTELMask = 0x00080000,\n  SpvLoopControlLoopCoalesceINTELMask = 0x00100000,\n  SpvLoopControlMaxInterleavingINTELMask = 0x00200000,\n  SpvLoopControlSpeculatedIterationsINTELMask = 0x00400000,\n  SpvLoopControlNoFusionINTELMask = 0x00800000,\n  SpvLoopControlLoopCountINTELMask = 0x01000000,\n  SpvLoopControlMaxReinvocationDelayINTELMask = 0x02000000,\n} SpvLoopControlMask;\n\ntypedef enum SpvFunctionControlShift_ {\n  SpvFunctionControlInlineShift = 0,\n  SpvFunctionControlDontInlineShift = 1,\n  SpvFunctionControlPureShift = 2,\n  SpvFunctionControlConstShift = 3,\n  SpvFunctionControlOptNoneINTELShift = 16,\n  SpvFunctionControlMax = 0x7fffffff,\n} SpvFunctionControlShift;\n\ntypedef enum SpvFunctionControlMask_ {\n  SpvFunctionControlMaskNone = 0,\n  SpvFunctionControlInlineMask = 0x00000001,\n  SpvFunctionControlDontInlineMask = 0x00000002,\n  SpvFunctionControlPureMask = 0x00000004,\n  SpvFunctionControlConstMask = 0x00000008,\n  SpvFunctionControlOptNoneINTELMask = 0x00010000,\n} SpvFunctionControlMask;\n\ntypedef enum SpvMemorySemanticsShift_ {\n  SpvMemorySemanticsAcquireShift = 1,\n  SpvMemorySemanticsReleaseShift = 2,\n  SpvMemorySemanticsAcquireReleaseShift = 3,\n  SpvMemorySemanticsSequentiallyConsistentShift = 4,\n  SpvMemorySemanticsUniformMemoryShift = 6,\n  SpvMemorySemanticsSubgroupMemoryShift = 7,\n  SpvMemorySemanticsWorkgroupMemoryShift = 8,\n  SpvMemorySemanticsCrossWorkgroupMemoryShift = 9,\n  SpvMemorySemanticsAtomicCounterMemoryShift = 10,\n  SpvMemorySemanticsImageMemoryShift = 11,\n  SpvMemorySemanticsOutputMemoryShift = 12,\n  SpvMemorySemanticsOutputMemoryKHRShift = 12,\n  SpvMemorySemanticsMakeAvailableShift = 13,\n  SpvMemorySemanticsMakeAvailableKHRShift = 13,\n  SpvMemorySemanticsMakeVisibleShift = 14,\n  SpvMemorySemanticsMakeVisibleKHRShift = 14,\n  SpvMemorySemanticsVolatileShift = 15,\n  SpvMemorySemanticsMax = 0x7fffffff,\n} SpvMemorySemanticsShift;\n\ntypedef enum SpvMemorySemanticsMask_ {\n  SpvMemorySemanticsMaskNone = 0,\n  SpvMemorySemanticsAcquireMask = 0x00000002,\n  SpvMemorySemanticsReleaseMask = 0x00000004,\n  SpvMemorySemanticsAcquireReleaseMask = 0x00000008,\n  SpvMemorySemanticsSequentiallyConsistentMask = 0x00000010,\n  SpvMemorySemanticsUniformMemoryMask = 0x00000040,\n  SpvMemorySemanticsSubgroupMemoryMask = 0x00000080,\n  SpvMemorySemanticsWorkgroupMemoryMask = 0x00000100,\n  SpvMemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,\n  SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000400,\n  SpvMemorySemanticsImageMemoryMask = 0x00000800,\n  SpvMemorySemanticsOutputMemoryMask = 0x00001000,\n  SpvMemorySemanticsOutputMemoryKHRMask = 0x00001000,\n  SpvMemorySemanticsMakeAvailableMask = 0x00002000,\n  SpvMemorySemanticsMakeAvailableKHRMask = 0x00002000,\n  SpvMemorySemanticsMakeVisibleMask = 0x00004000,\n  SpvMemorySemanticsMakeVisibleKHRMask = 0x00004000,\n  SpvMemorySemanticsVolatileMask = 0x00008000,\n} SpvMemorySemanticsMask;\n\ntypedef enum SpvMemoryAccessShift_ {\n  SpvMemoryAccessVolatileShift = 0,\n  SpvMemoryAccessAlignedShift = 1,\n  SpvMemoryAccessNontemporalShift = 2,\n  SpvMemoryAccessMakePointerAvailableShift = 3,\n  SpvMemoryAccessMakePointerAvailableKHRShift = 3,\n  SpvMemoryAccessMakePointerVisibleShift = 4,\n  SpvMemoryAccessMakePointerVisibleKHRShift = 4,\n  SpvMemoryAccessNonPrivatePointerShift = 5,\n  SpvMemoryAccessNonPrivatePointerKHRShift = 5,\n  SpvMemoryAccessAliasScopeINTELMaskShift = 16,\n  SpvMemoryAccessNoAliasINTELMaskShift = 17,\n  SpvMemoryAccessMax = 0x7fffffff,\n} SpvMemoryAccessShift;\n\ntypedef enum SpvMemoryAccessMask_ {\n  SpvMemoryAccessMaskNone = 0,\n  SpvMemoryAccessVolatileMask = 0x00000001,\n  SpvMemoryAccessAlignedMask = 0x00000002,\n  SpvMemoryAccessNontemporalMask = 0x00000004,\n  SpvMemoryAccessMakePointerAvailableMask = 0x00000008,\n  SpvMemoryAccessMakePointerAvailableKHRMask = 0x00000008,\n  SpvMemoryAccessMakePointerVisibleMask = 0x00000010,\n  SpvMemoryAccessMakePointerVisibleKHRMask = 0x00000010,\n  SpvMemoryAccessNonPrivatePointerMask = 0x00000020,\n  SpvMemoryAccessNonPrivatePointerKHRMask = 0x00000020,\n  SpvMemoryAccessAliasScopeINTELMaskMask = 0x00010000,\n  SpvMemoryAccessNoAliasINTELMaskMask = 0x00020000,\n} SpvMemoryAccessMask;\n\ntypedef enum SpvScope_ {\n  SpvScopeCrossDevice = 0,\n  SpvScopeDevice = 1,\n  SpvScopeWorkgroup = 2,\n  SpvScopeSubgroup = 3,\n  SpvScopeInvocation = 4,\n  SpvScopeQueueFamily = 5,\n  SpvScopeQueueFamilyKHR = 5,\n  SpvScopeShaderCallKHR = 6,\n  SpvScopeMax = 0x7fffffff,\n} SpvScope;\n\ntypedef enum SpvGroupOperation_ {\n  SpvGroupOperationReduce = 0,\n  SpvGroupOperationInclusiveScan = 1,\n  SpvGroupOperationExclusiveScan = 2,\n  SpvGroupOperationClusteredReduce = 3,\n  SpvGroupOperationPartitionedReduceNV = 6,\n  SpvGroupOperationPartitionedInclusiveScanNV = 7,\n  SpvGroupOperationPartitionedExclusiveScanNV = 8,\n  SpvGroupOperationMax = 0x7fffffff,\n} SpvGroupOperation;\n\ntypedef enum SpvKernelEnqueueFlags_ {\n  SpvKernelEnqueueFlagsNoWait = 0,\n  SpvKernelEnqueueFlagsWaitKernel = 1,\n  SpvKernelEnqueueFlagsWaitWorkGroup = 2,\n  SpvKernelEnqueueFlagsMax = 0x7fffffff,\n} SpvKernelEnqueueFlags;\n\ntypedef enum SpvKernelProfilingInfoShift_ {\n  SpvKernelProfilingInfoCmdExecTimeShift = 0,\n  SpvKernelProfilingInfoMax = 0x7fffffff,\n} SpvKernelProfilingInfoShift;\n\ntypedef enum SpvKernelProfilingInfoMask_ {\n  SpvKernelProfilingInfoMaskNone = 0,\n  SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001,\n} SpvKernelProfilingInfoMask;\n\ntypedef enum SpvCapability_ {\n  SpvCapabilityMatrix = 0,\n  SpvCapabilityShader = 1,\n  SpvCapabilityGeometry = 2,\n  SpvCapabilityTessellation = 3,\n  SpvCapabilityAddresses = 4,\n  SpvCapabilityLinkage = 5,\n  SpvCapabilityKernel = 6,\n  SpvCapabilityVector16 = 7,\n  SpvCapabilityFloat16Buffer = 8,\n  SpvCapabilityFloat16 = 9,\n  SpvCapabilityFloat64 = 10,\n  SpvCapabilityInt64 = 11,\n  SpvCapabilityInt64Atomics = 12,\n  SpvCapabilityImageBasic = 13,\n  SpvCapabilityImageReadWrite = 14,\n  SpvCapabilityImageMipmap = 15,\n  SpvCapabilityPipes = 17,\n  SpvCapabilityGroups = 18,\n  SpvCapabilityDeviceEnqueue = 19,\n  SpvCapabilityLiteralSampler = 20,\n  SpvCapabilityAtomicStorage = 21,\n  SpvCapabilityInt16 = 22,\n  SpvCapabilityTessellationPointSize = 23,\n  SpvCapabilityGeometryPointSize = 24,\n  SpvCapabilityImageGatherExtended = 25,\n  SpvCapabilityStorageImageMultisample = 27,\n  SpvCapabilityUniformBufferArrayDynamicIndexing = 28,\n  SpvCapabilitySampledImageArrayDynamicIndexing = 29,\n  SpvCapabilityStorageBufferArrayDynamicIndexing = 30,\n  SpvCapabilityStorageImageArrayDynamicIndexing = 31,\n  SpvCapabilityClipDistance = 32,\n  SpvCapabilityCullDistance = 33,\n  SpvCapabilityImageCubeArray = 34,\n  SpvCapabilitySampleRateShading = 35,\n  SpvCapabilityImageRect = 36,\n  SpvCapabilitySampledRect = 37,\n  SpvCapabilityGenericPointer = 38,\n  SpvCapabilityInt8 = 39,\n  SpvCapabilityInputAttachment = 40,\n  SpvCapabilitySparseResidency = 41,\n  SpvCapabilityMinLod = 42,\n  SpvCapabilitySampled1D = 43,\n  SpvCapabilityImage1D = 44,\n  SpvCapabilitySampledCubeArray = 45,\n  SpvCapabilitySampledBuffer = 46,\n  SpvCapabilityImageBuffer = 47,\n  SpvCapabilityImageMSArray = 48,\n  SpvCapabilityStorageImageExtendedFormats = 49,\n  SpvCapabilityImageQuery = 50,\n  SpvCapabilityDerivativeControl = 51,\n  SpvCapabilityInterpolationFunction = 52,\n  SpvCapabilityTransformFeedback = 53,\n  SpvCapabilityGeometryStreams = 54,\n  SpvCapabilityStorageImageReadWithoutFormat = 55,\n  SpvCapabilityStorageImageWriteWithoutFormat = 56,\n  SpvCapabilityMultiViewport = 57,\n  SpvCapabilitySubgroupDispatch = 58,\n  SpvCapabilityNamedBarrier = 59,\n  SpvCapabilityPipeStorage = 60,\n  SpvCapabilityGroupNonUniform = 61,\n  SpvCapabilityGroupNonUniformVote = 62,\n  SpvCapabilityGroupNonUniformArithmetic = 63,\n  SpvCapabilityGroupNonUniformBallot = 64,\n  SpvCapabilityGroupNonUniformShuffle = 65,\n  SpvCapabilityGroupNonUniformShuffleRelative = 66,\n  SpvCapabilityGroupNonUniformClustered = 67,\n  SpvCapabilityGroupNonUniformQuad = 68,\n  SpvCapabilityShaderLayer = 69,\n  SpvCapabilityShaderViewportIndex = 70,\n  SpvCapabilityUniformDecoration = 71,\n  SpvCapabilityCoreBuiltinsARM = 4165,\n  SpvCapabilityTileImageColorReadAccessEXT = 4166,\n  SpvCapabilityTileImageDepthReadAccessEXT = 4167,\n  SpvCapabilityTileImageStencilReadAccessEXT = 4168,\n  SpvCapabilityFragmentShadingRateKHR = 4422,\n  SpvCapabilitySubgroupBallotKHR = 4423,\n  SpvCapabilityDrawParameters = 4427,\n  SpvCapabilityWorkgroupMemoryExplicitLayoutKHR = 4428,\n  SpvCapabilityWorkgroupMemoryExplicitLayout8BitAccessKHR = 4429,\n  SpvCapabilityWorkgroupMemoryExplicitLayout16BitAccessKHR = 4430,\n  SpvCapabilitySubgroupVoteKHR = 4431,\n  SpvCapabilityStorageBuffer16BitAccess = 4433,\n  SpvCapabilityStorageUniformBufferBlock16 = 4433,\n  SpvCapabilityStorageUniform16 = 4434,\n  SpvCapabilityUniformAndStorageBuffer16BitAccess = 4434,\n  SpvCapabilityStoragePushConstant16 = 4435,\n  SpvCapabilityStorageInputOutput16 = 4436,\n  SpvCapabilityDeviceGroup = 4437,\n  SpvCapabilityMultiView = 4439,\n  SpvCapabilityVariablePointersStorageBuffer = 4441,\n  SpvCapabilityVariablePointers = 4442,\n  SpvCapabilityAtomicStorageOps = 4445,\n  SpvCapabilitySampleMaskPostDepthCoverage = 4447,\n  SpvCapabilityStorageBuffer8BitAccess = 4448,\n  SpvCapabilityUniformAndStorageBuffer8BitAccess = 4449,\n  SpvCapabilityStoragePushConstant8 = 4450,\n  SpvCapabilityDenormPreserve = 4464,\n  SpvCapabilityDenormFlushToZero = 4465,\n  SpvCapabilitySignedZeroInfNanPreserve = 4466,\n  SpvCapabilityRoundingModeRTE = 4467,\n  SpvCapabilityRoundingModeRTZ = 4468,\n  SpvCapabilityRayQueryProvisionalKHR = 4471,\n  SpvCapabilityRayQueryKHR = 4472,\n  SpvCapabilityRayTraversalPrimitiveCullingKHR = 4478,\n  SpvCapabilityRayTracingKHR = 4479,\n  SpvCapabilityTextureSampleWeightedQCOM = 4484,\n  SpvCapabilityTextureBoxFilterQCOM = 4485,\n  SpvCapabilityTextureBlockMatchQCOM = 4486,\n  SpvCapabilityFloat16ImageAMD = 5008,\n  SpvCapabilityImageGatherBiasLodAMD = 5009,\n  SpvCapabilityFragmentMaskAMD = 5010,\n  SpvCapabilityStencilExportEXT = 5013,\n  SpvCapabilityImageReadWriteLodAMD = 5015,\n  SpvCapabilityInt64ImageEXT = 5016,\n  SpvCapabilityShaderClockKHR = 5055,\n  SpvCapabilitySampleMaskOverrideCoverageNV = 5249,\n  SpvCapabilityGeometryShaderPassthroughNV = 5251,\n  SpvCapabilityShaderViewportIndexLayerEXT = 5254,\n  SpvCapabilityShaderViewportIndexLayerNV = 5254,\n  SpvCapabilityShaderViewportMaskNV = 5255,\n  SpvCapabilityShaderStereoViewNV = 5259,\n  SpvCapabilityPerViewAttributesNV = 5260,\n  SpvCapabilityFragmentFullyCoveredEXT = 5265,\n  SpvCapabilityMeshShadingNV = 5266,\n  SpvCapabilityImageFootprintNV = 5282,\n  SpvCapabilityMeshShadingEXT = 5283,\n  SpvCapabilityFragmentBarycentricKHR = 5284,\n  SpvCapabilityFragmentBarycentricNV = 5284,\n  SpvCapabilityComputeDerivativeGroupQuadsNV = 5288,\n  SpvCapabilityFragmentDensityEXT = 5291,\n  SpvCapabilityShadingRateNV = 5291,\n  SpvCapabilityGroupNonUniformPartitionedNV = 5297,\n  SpvCapabilityShaderNonUniform = 5301,\n  SpvCapabilityShaderNonUniformEXT = 5301,\n  SpvCapabilityRuntimeDescriptorArray = 5302,\n  SpvCapabilityRuntimeDescriptorArrayEXT = 5302,\n  SpvCapabilityInputAttachmentArrayDynamicIndexing = 5303,\n  SpvCapabilityInputAttachmentArrayDynamicIndexingEXT = 5303,\n  SpvCapabilityUniformTexelBufferArrayDynamicIndexing = 5304,\n  SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304,\n  SpvCapabilityStorageTexelBufferArrayDynamicIndexing = 5305,\n  SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305,\n  SpvCapabilityUniformBufferArrayNonUniformIndexing = 5306,\n  SpvCapabilityUniformBufferArrayNonUniformIndexingEXT = 5306,\n  SpvCapabilitySampledImageArrayNonUniformIndexing = 5307,\n  SpvCapabilitySampledImageArrayNonUniformIndexingEXT = 5307,\n  SpvCapabilityStorageBufferArrayNonUniformIndexing = 5308,\n  SpvCapabilityStorageBufferArrayNonUniformIndexingEXT = 5308,\n  SpvCapabilityStorageImageArrayNonUniformIndexing = 5309,\n  SpvCapabilityStorageImageArrayNonUniformIndexingEXT = 5309,\n  SpvCapabilityInputAttachmentArrayNonUniformIndexing = 5310,\n  SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310,\n  SpvCapabilityUniformTexelBufferArrayNonUniformIndexing = 5311,\n  SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311,\n  SpvCapabilityStorageTexelBufferArrayNonUniformIndexing = 5312,\n  SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312,\n  SpvCapabilityRayTracingPositionFetchKHR = 5336,\n  SpvCapabilityRayTracingNV = 5340,\n  SpvCapabilityRayTracingMotionBlurNV = 5341,\n  SpvCapabilityVulkanMemoryModel = 5345,\n  SpvCapabilityVulkanMemoryModelKHR = 5345,\n  SpvCapabilityVulkanMemoryModelDeviceScope = 5346,\n  SpvCapabilityVulkanMemoryModelDeviceScopeKHR = 5346,\n  SpvCapabilityPhysicalStorageBufferAddresses = 5347,\n  SpvCapabilityPhysicalStorageBufferAddressesEXT = 5347,\n  SpvCapabilityComputeDerivativeGroupLinearNV = 5350,\n  SpvCapabilityRayTracingProvisionalKHR = 5353,\n  SpvCapabilityCooperativeMatrixNV = 5357,\n  SpvCapabilityFragmentShaderSampleInterlockEXT = 5363,\n  SpvCapabilityFragmentShaderShadingRateInterlockEXT = 5372,\n  SpvCapabilityShaderSMBuiltinsNV = 5373,\n  SpvCapabilityFragmentShaderPixelInterlockEXT = 5378,\n  SpvCapabilityDemoteToHelperInvocation = 5379,\n  SpvCapabilityDemoteToHelperInvocationEXT = 5379,\n  SpvCapabilityRayTracingOpacityMicromapEXT = 5381,\n  SpvCapabilityShaderInvocationReorderNV = 5383,\n  SpvCapabilityBindlessTextureNV = 5390,\n  SpvCapabilityRayQueryPositionFetchKHR = 5391,\n  SpvCapabilitySubgroupShuffleINTEL = 5568,\n  SpvCapabilitySubgroupBufferBlockIOINTEL = 5569,\n  SpvCapabilitySubgroupImageBlockIOINTEL = 5570,\n  SpvCapabilitySubgroupImageMediaBlockIOINTEL = 5579,\n  SpvCapabilityRoundToInfinityINTEL = 5582,\n  SpvCapabilityFloatingPointModeINTEL = 5583,\n  SpvCapabilityIntegerFunctions2INTEL = 5584,\n  SpvCapabilityFunctionPointersINTEL = 5603,\n  SpvCapabilityIndirectReferencesINTEL = 5604,\n  SpvCapabilityAsmINTEL = 5606,\n  SpvCapabilityAtomicFloat32MinMaxEXT = 5612,\n  SpvCapabilityAtomicFloat64MinMaxEXT = 5613,\n  SpvCapabilityAtomicFloat16MinMaxEXT = 5616,\n  SpvCapabilityVectorComputeINTEL = 5617,\n  SpvCapabilityVectorAnyINTEL = 5619,\n  SpvCapabilityExpectAssumeKHR = 5629,\n  SpvCapabilitySubgroupAvcMotionEstimationINTEL = 5696,\n  SpvCapabilitySubgroupAvcMotionEstimationIntraINTEL = 5697,\n  SpvCapabilitySubgroupAvcMotionEstimationChromaINTEL = 5698,\n  SpvCapabilityVariableLengthArrayINTEL = 5817,\n  SpvCapabilityFunctionFloatControlINTEL = 5821,\n  SpvCapabilityFPGAMemoryAttributesINTEL = 5824,\n  SpvCapabilityFPFastMathModeINTEL = 5837,\n  SpvCapabilityArbitraryPrecisionIntegersINTEL = 5844,\n  SpvCapabilityArbitraryPrecisionFloatingPointINTEL = 5845,\n  SpvCapabilityUnstructuredLoopControlsINTEL = 5886,\n  SpvCapabilityFPGALoopControlsINTEL = 5888,\n  SpvCapabilityKernelAttributesINTEL = 5892,\n  SpvCapabilityFPGAKernelAttributesINTEL = 5897,\n  SpvCapabilityFPGAMemoryAccessesINTEL = 5898,\n  SpvCapabilityFPGAClusterAttributesINTEL = 5904,\n  SpvCapabilityLoopFuseINTEL = 5906,\n  SpvCapabilityFPGADSPControlINTEL = 5908,\n  SpvCapabilityMemoryAccessAliasingINTEL = 5910,\n  SpvCapabilityFPGAInvocationPipeliningAttributesINTEL = 5916,\n  SpvCapabilityFPGABufferLocationINTEL = 5920,\n  SpvCapabilityArbitraryPrecisionFixedPointINTEL = 5922,\n  SpvCapabilityUSMStorageClassesINTEL = 5935,\n  SpvCapabilityRuntimeAlignedAttributeINTEL = 5939,\n  SpvCapabilityIOPipesINTEL = 5943,\n  SpvCapabilityBlockingPipesINTEL = 5945,\n  SpvCapabilityFPGARegINTEL = 5948,\n  SpvCapabilityDotProductInputAll = 6016,\n  SpvCapabilityDotProductInputAllKHR = 6016,\n  SpvCapabilityDotProductInput4x8Bit = 6017,\n  SpvCapabilityDotProductInput4x8BitKHR = 6017,\n  SpvCapabilityDotProductInput4x8BitPacked = 6018,\n  SpvCapabilityDotProductInput4x8BitPackedKHR = 6018,\n  SpvCapabilityDotProduct = 6019,\n  SpvCapabilityDotProductKHR = 6019,\n  SpvCapabilityRayCullMaskKHR = 6020,\n  SpvCapabilityCooperativeMatrixKHR = 6022,\n  SpvCapabilityBitInstructions = 6025,\n  SpvCapabilityGroupNonUniformRotateKHR = 6026,\n  SpvCapabilityAtomicFloat32AddEXT = 6033,\n  SpvCapabilityAtomicFloat64AddEXT = 6034,\n  SpvCapabilityLongConstantCompositeINTEL = 6089,\n  SpvCapabilityOptNoneINTEL = 6094,\n  SpvCapabilityAtomicFloat16AddEXT = 6095,\n  SpvCapabilityDebugInfoModuleINTEL = 6114,\n  SpvCapabilityBFloat16ConversionINTEL = 6115,\n  SpvCapabilitySplitBarrierINTEL = 6141,\n  SpvCapabilityFPGAKernelAttributesv2INTEL = 6161,\n  SpvCapabilityFPGALatencyControlINTEL = 6171,\n  SpvCapabilityFPGAArgumentInterfacesINTEL = 6174,\n  SpvCapabilityGroupUniformArithmeticKHR = 6400,\n  SpvCapabilityMax = 0x7fffffff,\n} SpvCapability;\n\ntypedef enum SpvRayFlagsShift_ {\n  SpvRayFlagsOpaqueKHRShift = 0,\n  SpvRayFlagsNoOpaqueKHRShift = 1,\n  SpvRayFlagsTerminateOnFirstHitKHRShift = 2,\n  SpvRayFlagsSkipClosestHitShaderKHRShift = 3,\n  SpvRayFlagsCullBackFacingTrianglesKHRShift = 4,\n  SpvRayFlagsCullFrontFacingTrianglesKHRShift = 5,\n  SpvRayFlagsCullOpaqueKHRShift = 6,\n  SpvRayFlagsCullNoOpaqueKHRShift = 7,\n  SpvRayFlagsSkipTrianglesKHRShift = 8,\n  SpvRayFlagsSkipAABBsKHRShift = 9,\n  SpvRayFlagsForceOpacityMicromap2StateEXTShift = 10,\n  SpvRayFlagsMax = 0x7fffffff,\n} SpvRayFlagsShift;\n\ntypedef enum SpvRayFlagsMask_ {\n  SpvRayFlagsMaskNone = 0,\n  SpvRayFlagsOpaqueKHRMask = 0x00000001,\n  SpvRayFlagsNoOpaqueKHRMask = 0x00000002,\n  SpvRayFlagsTerminateOnFirstHitKHRMask = 0x00000004,\n  SpvRayFlagsSkipClosestHitShaderKHRMask = 0x00000008,\n  SpvRayFlagsCullBackFacingTrianglesKHRMask = 0x00000010,\n  SpvRayFlagsCullFrontFacingTrianglesKHRMask = 0x00000020,\n  SpvRayFlagsCullOpaqueKHRMask = 0x00000040,\n  SpvRayFlagsCullNoOpaqueKHRMask = 0x00000080,\n  SpvRayFlagsSkipTrianglesKHRMask = 0x00000100,\n  SpvRayFlagsSkipAABBsKHRMask = 0x00000200,\n  SpvRayFlagsForceOpacityMicromap2StateEXTMask = 0x00000400,\n} SpvRayFlagsMask;\n\ntypedef enum SpvRayQueryIntersection_ {\n  SpvRayQueryIntersectionRayQueryCandidateIntersectionKHR = 0,\n  SpvRayQueryIntersectionRayQueryCommittedIntersectionKHR = 1,\n  SpvRayQueryIntersectionMax = 0x7fffffff,\n} SpvRayQueryIntersection;\n\ntypedef enum SpvRayQueryCommittedIntersectionType_ {\n  SpvRayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionNoneKHR = 0,\n  SpvRayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionTriangleKHR =\n      1,\n  SpvRayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionGeneratedKHR =\n      2,\n  SpvRayQueryCommittedIntersectionTypeMax = 0x7fffffff,\n} SpvRayQueryCommittedIntersectionType;\n\ntypedef enum SpvRayQueryCandidateIntersectionType_ {\n  SpvRayQueryCandidateIntersectionTypeRayQueryCandidateIntersectionTriangleKHR =\n      0,\n  SpvRayQueryCandidateIntersectionTypeRayQueryCandidateIntersectionAABBKHR = 1,\n  SpvRayQueryCandidateIntersectionTypeMax = 0x7fffffff,\n} SpvRayQueryCandidateIntersectionType;\n\ntypedef enum SpvFragmentShadingRateShift_ {\n  SpvFragmentShadingRateVertical2PixelsShift = 0,\n  SpvFragmentShadingRateVertical4PixelsShift = 1,\n  SpvFragmentShadingRateHorizontal2PixelsShift = 2,\n  SpvFragmentShadingRateHorizontal4PixelsShift = 3,\n  SpvFragmentShadingRateMax = 0x7fffffff,\n} SpvFragmentShadingRateShift;\n\ntypedef enum SpvFragmentShadingRateMask_ {\n  SpvFragmentShadingRateMaskNone = 0,\n  SpvFragmentShadingRateVertical2PixelsMask = 0x00000001,\n  SpvFragmentShadingRateVertical4PixelsMask = 0x00000002,\n  SpvFragmentShadingRateHorizontal2PixelsMask = 0x00000004,\n  SpvFragmentShadingRateHorizontal4PixelsMask = 0x00000008,\n} SpvFragmentShadingRateMask;\n\ntypedef enum SpvFPDenormMode_ {\n  SpvFPDenormModePreserve = 0,\n  SpvFPDenormModeFlushToZero = 1,\n  SpvFPDenormModeMax = 0x7fffffff,\n} SpvFPDenormMode;\n\ntypedef enum SpvFPOperationMode_ {\n  SpvFPOperationModeIEEE = 0,\n  SpvFPOperationModeALT = 1,\n  SpvFPOperationModeMax = 0x7fffffff,\n} SpvFPOperationMode;\n\ntypedef enum SpvQuantizationModes_ {\n  SpvQuantizationModesTRN = 0,\n  SpvQuantizationModesTRN_ZERO = 1,\n  SpvQuantizationModesRND = 2,\n  SpvQuantizationModesRND_ZERO = 3,\n  SpvQuantizationModesRND_INF = 4,\n  SpvQuantizationModesRND_MIN_INF = 5,\n  SpvQuantizationModesRND_CONV = 6,\n  SpvQuantizationModesRND_CONV_ODD = 7,\n  SpvQuantizationModesMax = 0x7fffffff,\n} SpvQuantizationModes;\n\ntypedef enum SpvOverflowModes_ {\n  SpvOverflowModesWRAP = 0,\n  SpvOverflowModesSAT = 1,\n  SpvOverflowModesSAT_ZERO = 2,\n  SpvOverflowModesSAT_SYM = 3,\n  SpvOverflowModesMax = 0x7fffffff,\n} SpvOverflowModes;\n\ntypedef enum SpvPackedVectorFormat_ {\n  SpvPackedVectorFormatPackedVectorFormat4x8Bit = 0,\n  SpvPackedVectorFormatPackedVectorFormat4x8BitKHR = 0,\n  SpvPackedVectorFormatMax = 0x7fffffff,\n} SpvPackedVectorFormat;\n\ntypedef enum SpvCooperativeMatrixOperandsShift_ {\n  SpvCooperativeMatrixOperandsMatrixASignedComponentsShift = 0,\n  SpvCooperativeMatrixOperandsMatrixBSignedComponentsShift = 1,\n  SpvCooperativeMatrixOperandsMatrixCSignedComponentsShift = 2,\n  SpvCooperativeMatrixOperandsMatrixResultSignedComponentsShift = 3,\n  SpvCooperativeMatrixOperandsSaturatingAccumulationShift = 4,\n  SpvCooperativeMatrixOperandsMax = 0x7fffffff,\n} SpvCooperativeMatrixOperandsShift;\n\ntypedef enum SpvCooperativeMatrixOperandsMask_ {\n  SpvCooperativeMatrixOperandsMaskNone = 0,\n  SpvCooperativeMatrixOperandsMatrixASignedComponentsMask = 0x00000001,\n  SpvCooperativeMatrixOperandsMatrixBSignedComponentsMask = 0x00000002,\n  SpvCooperativeMatrixOperandsMatrixCSignedComponentsMask = 0x00000004,\n  SpvCooperativeMatrixOperandsMatrixResultSignedComponentsMask = 0x00000008,\n  SpvCooperativeMatrixOperandsSaturatingAccumulationMask = 0x00000010,\n} SpvCooperativeMatrixOperandsMask;\n\ntypedef enum SpvCooperativeMatrixLayout_ {\n  SpvCooperativeMatrixLayoutRowMajorKHR = 0,\n  SpvCooperativeMatrixLayoutColumnMajorKHR = 1,\n  SpvCooperativeMatrixLayoutMax = 0x7fffffff,\n} SpvCooperativeMatrixLayout;\n\ntypedef enum SpvCooperativeMatrixUse_ {\n  SpvCooperativeMatrixUseMatrixAKHR = 0,\n  SpvCooperativeMatrixUseMatrixBKHR = 1,\n  SpvCooperativeMatrixUseMatrixAccumulatorKHR = 2,\n  SpvCooperativeMatrixUseMax = 0x7fffffff,\n} SpvCooperativeMatrixUse;\n\ntypedef enum SpvOp_ {\n  SpvOpNop = 0,\n  SpvOpUndef = 1,\n  SpvOpSourceContinued = 2,\n  SpvOpSource = 3,\n  SpvOpSourceExtension = 4,\n  SpvOpName = 5,\n  SpvOpMemberName = 6,\n  SpvOpString = 7,\n  SpvOpLine = 8,\n  SpvOpExtension = 10,\n  SpvOpExtInstImport = 11,\n  SpvOpExtInst = 12,\n  SpvOpMemoryModel = 14,\n  SpvOpEntryPoint = 15,\n  SpvOpExecutionMode = 16,\n  SpvOpCapability = 17,\n  SpvOpTypeVoid = 19,\n  SpvOpTypeBool = 20,\n  SpvOpTypeInt = 21,\n  SpvOpTypeFloat = 22,\n  SpvOpTypeVector = 23,\n  SpvOpTypeMatrix = 24,\n  SpvOpTypeImage = 25,\n  SpvOpTypeSampler = 26,\n  SpvOpTypeSampledImage = 27,\n  SpvOpTypeArray = 28,\n  SpvOpTypeRuntimeArray = 29,\n  SpvOpTypeStruct = 30,\n  SpvOpTypeOpaque = 31,\n  SpvOpTypePointer = 32,\n  SpvOpTypeFunction = 33,\n  SpvOpTypeEvent = 34,\n  SpvOpTypeDeviceEvent = 35,\n  SpvOpTypeReserveId = 36,\n  SpvOpTypeQueue = 37,\n  SpvOpTypePipe = 38,\n  SpvOpTypeForwardPointer = 39,\n  SpvOpConstantTrue = 41,\n  SpvOpConstantFalse = 42,\n  SpvOpConstant = 43,\n  SpvOpConstantComposite = 44,\n  SpvOpConstantSampler = 45,\n  SpvOpConstantNull = 46,\n  SpvOpSpecConstantTrue = 48,\n  SpvOpSpecConstantFalse = 49,\n  SpvOpSpecConstant = 50,\n  SpvOpSpecConstantComposite = 51,\n  SpvOpSpecConstantOp = 52,\n  SpvOpFunction = 54,\n  SpvOpFunctionParameter = 55,\n  SpvOpFunctionEnd = 56,\n  SpvOpFunctionCall = 57,\n  SpvOpVariable = 59,\n  SpvOpImageTexelPointer = 60,\n  SpvOpLoad = 61,\n  SpvOpStore = 62,\n  SpvOpCopyMemory = 63,\n  SpvOpCopyMemorySized = 64,\n  SpvOpAccessChain = 65,\n  SpvOpInBoundsAccessChain = 66,\n  SpvOpPtrAccessChain = 67,\n  SpvOpArrayLength = 68,\n  SpvOpGenericPtrMemSemantics = 69,\n  SpvOpInBoundsPtrAccessChain = 70,\n  SpvOpDecorate = 71,\n  SpvOpMemberDecorate = 72,\n  SpvOpDecorationGroup = 73,\n  SpvOpGroupDecorate = 74,\n  SpvOpGroupMemberDecorate = 75,\n  SpvOpVectorExtractDynamic = 77,\n  SpvOpVectorInsertDynamic = 78,\n  SpvOpVectorShuffle = 79,\n  SpvOpCompositeConstruct = 80,\n  SpvOpCompositeExtract = 81,\n  SpvOpCompositeInsert = 82,\n  SpvOpCopyObject = 83,\n  SpvOpTranspose = 84,\n  SpvOpSampledImage = 86,\n  SpvOpImageSampleImplicitLod = 87,\n  SpvOpImageSampleExplicitLod = 88,\n  SpvOpImageSampleDrefImplicitLod = 89,\n  SpvOpImageSampleDrefExplicitLod = 90,\n  SpvOpImageSampleProjImplicitLod = 91,\n  SpvOpImageSampleProjExplicitLod = 92,\n  SpvOpImageSampleProjDrefImplicitLod = 93,\n  SpvOpImageSampleProjDrefExplicitLod = 94,\n  SpvOpImageFetch = 95,\n  SpvOpImageGather = 96,\n  SpvOpImageDrefGather = 97,\n  SpvOpImageRead = 98,\n  SpvOpImageWrite = 99,\n  SpvOpImage = 100,\n  SpvOpImageQueryFormat = 101,\n  SpvOpImageQueryOrder = 102,\n  SpvOpImageQuerySizeLod = 103,\n  SpvOpImageQuerySize = 104,\n  SpvOpImageQueryLod = 105,\n  SpvOpImageQueryLevels = 106,\n  SpvOpImageQuerySamples = 107,\n  SpvOpConvertFToU = 109,\n  SpvOpConvertFToS = 110,\n  SpvOpConvertSToF = 111,\n  SpvOpConvertUToF = 112,\n  SpvOpUConvert = 113,\n  SpvOpSConvert = 114,\n  SpvOpFConvert = 115,\n  SpvOpQuantizeToF16 = 116,\n  SpvOpConvertPtrToU = 117,\n  SpvOpSatConvertSToU = 118,\n  SpvOpSatConvertUToS = 119,\n  SpvOpConvertUToPtr = 120,\n  SpvOpPtrCastToGeneric = 121,\n  SpvOpGenericCastToPtr = 122,\n  SpvOpGenericCastToPtrExplicit = 123,\n  SpvOpBitcast = 124,\n  SpvOpSNegate = 126,\n  SpvOpFNegate = 127,\n  SpvOpIAdd = 128,\n  SpvOpFAdd = 129,\n  SpvOpISub = 130,\n  SpvOpFSub = 131,\n  SpvOpIMul = 132,\n  SpvOpFMul = 133,\n  SpvOpUDiv = 134,\n  SpvOpSDiv = 135,\n  SpvOpFDiv = 136,\n  SpvOpUMod = 137,\n  SpvOpSRem = 138,\n  SpvOpSMod = 139,\n  SpvOpFRem = 140,\n  SpvOpFMod = 141,\n  SpvOpVectorTimesScalar = 142,\n  SpvOpMatrixTimesScalar = 143,\n  SpvOpVectorTimesMatrix = 144,\n  SpvOpMatrixTimesVector = 145,\n  SpvOpMatrixTimesMatrix = 146,\n  SpvOpOuterProduct = 147,\n  SpvOpDot = 148,\n  SpvOpIAddCarry = 149,\n  SpvOpISubBorrow = 150,\n  SpvOpUMulExtended = 151,\n  SpvOpSMulExtended = 152,\n  SpvOpAny = 154,\n  SpvOpAll = 155,\n  SpvOpIsNan = 156,\n  SpvOpIsInf = 157,\n  SpvOpIsFinite = 158,\n  SpvOpIsNormal = 159,\n  SpvOpSignBitSet = 160,\n  SpvOpLessOrGreater = 161,\n  SpvOpOrdered = 162,\n  SpvOpUnordered = 163,\n  SpvOpLogicalEqual = 164,\n  SpvOpLogicalNotEqual = 165,\n  SpvOpLogicalOr = 166,\n  SpvOpLogicalAnd = 167,\n  SpvOpLogicalNot = 168,\n  SpvOpSelect = 169,\n  SpvOpIEqual = 170,\n  SpvOpINotEqual = 171,\n  SpvOpUGreaterThan = 172,\n  SpvOpSGreaterThan = 173,\n  SpvOpUGreaterThanEqual = 174,\n  SpvOpSGreaterThanEqual = 175,\n  SpvOpULessThan = 176,\n  SpvOpSLessThan = 177,\n  SpvOpULessThanEqual = 178,\n  SpvOpSLessThanEqual = 179,\n  SpvOpFOrdEqual = 180,\n  SpvOpFUnordEqual = 181,\n  SpvOpFOrdNotEqual = 182,\n  SpvOpFUnordNotEqual = 183,\n  SpvOpFOrdLessThan = 184,\n  SpvOpFUnordLessThan = 185,\n  SpvOpFOrdGreaterThan = 186,\n  SpvOpFUnordGreaterThan = 187,\n  SpvOpFOrdLessThanEqual = 188,\n  SpvOpFUnordLessThanEqual = 189,\n  SpvOpFOrdGreaterThanEqual = 190,\n  SpvOpFUnordGreaterThanEqual = 191,\n  SpvOpShiftRightLogical = 194,\n  SpvOpShiftRightArithmetic = 195,\n  SpvOpShiftLeftLogical = 196,\n  SpvOpBitwiseOr = 197,\n  SpvOpBitwiseXor = 198,\n  SpvOpBitwiseAnd = 199,\n  SpvOpNot = 200,\n  SpvOpBitFieldInsert = 201,\n  SpvOpBitFieldSExtract = 202,\n  SpvOpBitFieldUExtract = 203,\n  SpvOpBitReverse = 204,\n  SpvOpBitCount = 205,\n  SpvOpDPdx = 207,\n  SpvOpDPdy = 208,\n  SpvOpFwidth = 209,\n  SpvOpDPdxFine = 210,\n  SpvOpDPdyFine = 211,\n  SpvOpFwidthFine = 212,\n  SpvOpDPdxCoarse = 213,\n  SpvOpDPdyCoarse = 214,\n  SpvOpFwidthCoarse = 215,\n  SpvOpEmitVertex = 218,\n  SpvOpEndPrimitive = 219,\n  SpvOpEmitStreamVertex = 220,\n  SpvOpEndStreamPrimitive = 221,\n  SpvOpControlBarrier = 224,\n  SpvOpMemoryBarrier = 225,\n  SpvOpAtomicLoad = 227,\n  SpvOpAtomicStore = 228,\n  SpvOpAtomicExchange = 229,\n  SpvOpAtomicCompareExchange = 230,\n  SpvOpAtomicCompareExchangeWeak = 231,\n  SpvOpAtomicIIncrement = 232,\n  SpvOpAtomicIDecrement = 233,\n  SpvOpAtomicIAdd = 234,\n  SpvOpAtomicISub = 235,\n  SpvOpAtomicSMin = 236,\n  SpvOpAtomicUMin = 237,\n  SpvOpAtomicSMax = 238,\n  SpvOpAtomicUMax = 239,\n  SpvOpAtomicAnd = 240,\n  SpvOpAtomicOr = 241,\n  SpvOpAtomicXor = 242,\n  SpvOpPhi = 245,\n  SpvOpLoopMerge = 246,\n  SpvOpSelectionMerge = 247,\n  SpvOpLabel = 248,\n  SpvOpBranch = 249,\n  SpvOpBranchConditional = 250,\n  SpvOpSwitch = 251,\n  SpvOpKill = 252,\n  SpvOpReturn = 253,\n  SpvOpReturnValue = 254,\n  SpvOpUnreachable = 255,\n  SpvOpLifetimeStart = 256,\n  SpvOpLifetimeStop = 257,\n  SpvOpGroupAsyncCopy = 259,\n  SpvOpGroupWaitEvents = 260,\n  SpvOpGroupAll = 261,\n  SpvOpGroupAny = 262,\n  SpvOpGroupBroadcast = 263,\n  SpvOpGroupIAdd = 264,\n  SpvOpGroupFAdd = 265,\n  SpvOpGroupFMin = 266,\n  SpvOpGroupUMin = 267,\n  SpvOpGroupSMin = 268,\n  SpvOpGroupFMax = 269,\n  SpvOpGroupUMax = 270,\n  SpvOpGroupSMax = 271,\n  SpvOpReadPipe = 274,\n  SpvOpWritePipe = 275,\n  SpvOpReservedReadPipe = 276,\n  SpvOpReservedWritePipe = 277,\n  SpvOpReserveReadPipePackets = 278,\n  SpvOpReserveWritePipePackets = 279,\n  SpvOpCommitReadPipe = 280,\n  SpvOpCommitWritePipe = 281,\n  SpvOpIsValidReserveId = 282,\n  SpvOpGetNumPipePackets = 283,\n  SpvOpGetMaxPipePackets = 284,\n  SpvOpGroupReserveReadPipePackets = 285,\n  SpvOpGroupReserveWritePipePackets = 286,\n  SpvOpGroupCommitReadPipe = 287,\n  SpvOpGroupCommitWritePipe = 288,\n  SpvOpEnqueueMarker = 291,\n  SpvOpEnqueueKernel = 292,\n  SpvOpGetKernelNDrangeSubGroupCount = 293,\n  SpvOpGetKernelNDrangeMaxSubGroupSize = 294,\n  SpvOpGetKernelWorkGroupSize = 295,\n  SpvOpGetKernelPreferredWorkGroupSizeMultiple = 296,\n  SpvOpRetainEvent = 297,\n  SpvOpReleaseEvent = 298,\n  SpvOpCreateUserEvent = 299,\n  SpvOpIsValidEvent = 300,\n  SpvOpSetUserEventStatus = 301,\n  SpvOpCaptureEventProfilingInfo = 302,\n  SpvOpGetDefaultQueue = 303,\n  SpvOpBuildNDRange = 304,\n  SpvOpImageSparseSampleImplicitLod = 305,\n  SpvOpImageSparseSampleExplicitLod = 306,\n  SpvOpImageSparseSampleDrefImplicitLod = 307,\n  SpvOpImageSparseSampleDrefExplicitLod = 308,\n  SpvOpImageSparseSampleProjImplicitLod = 309,\n  SpvOpImageSparseSampleProjExplicitLod = 310,\n  SpvOpImageSparseSampleProjDrefImplicitLod = 311,\n  SpvOpImageSparseSampleProjDrefExplicitLod = 312,\n  SpvOpImageSparseFetch = 313,\n  SpvOpImageSparseGather = 314,\n  SpvOpImageSparseDrefGather = 315,\n  SpvOpImageSparseTexelsResident = 316,\n  SpvOpNoLine = 317,\n  SpvOpAtomicFlagTestAndSet = 318,\n  SpvOpAtomicFlagClear = 319,\n  SpvOpImageSparseRead = 320,\n  SpvOpSizeOf = 321,\n  SpvOpTypePipeStorage = 322,\n  SpvOpConstantPipeStorage = 323,\n  SpvOpCreatePipeFromPipeStorage = 324,\n  SpvOpGetKernelLocalSizeForSubgroupCount = 325,\n  SpvOpGetKernelMaxNumSubgroups = 326,\n  SpvOpTypeNamedBarrier = 327,\n  SpvOpNamedBarrierInitialize = 328,\n  SpvOpMemoryNamedBarrier = 329,\n  SpvOpModuleProcessed = 330,\n  SpvOpExecutionModeId = 331,\n  SpvOpDecorateId = 332,\n  SpvOpGroupNonUniformElect = 333,\n  SpvOpGroupNonUniformAll = 334,\n  SpvOpGroupNonUniformAny = 335,\n  SpvOpGroupNonUniformAllEqual = 336,\n  SpvOpGroupNonUniformBroadcast = 337,\n  SpvOpGroupNonUniformBroadcastFirst = 338,\n  SpvOpGroupNonUniformBallot = 339,\n  SpvOpGroupNonUniformInverseBallot = 340,\n  SpvOpGroupNonUniformBallotBitExtract = 341,\n  SpvOpGroupNonUniformBallotBitCount = 342,\n  SpvOpGroupNonUniformBallotFindLSB = 343,\n  SpvOpGroupNonUniformBallotFindMSB = 344,\n  SpvOpGroupNonUniformShuffle = 345,\n  SpvOpGroupNonUniformShuffleXor = 346,\n  SpvOpGroupNonUniformShuffleUp = 347,\n  SpvOpGroupNonUniformShuffleDown = 348,\n  SpvOpGroupNonUniformIAdd = 349,\n  SpvOpGroupNonUniformFAdd = 350,\n  SpvOpGroupNonUniformIMul = 351,\n  SpvOpGroupNonUniformFMul = 352,\n  SpvOpGroupNonUniformSMin = 353,\n  SpvOpGroupNonUniformUMin = 354,\n  SpvOpGroupNonUniformFMin = 355,\n  SpvOpGroupNonUniformSMax = 356,\n  SpvOpGroupNonUniformUMax = 357,\n  SpvOpGroupNonUniformFMax = 358,\n  SpvOpGroupNonUniformBitwiseAnd = 359,\n  SpvOpGroupNonUniformBitwiseOr = 360,\n  SpvOpGroupNonUniformBitwiseXor = 361,\n  SpvOpGroupNonUniformLogicalAnd = 362,\n  SpvOpGroupNonUniformLogicalOr = 363,\n  SpvOpGroupNonUniformLogicalXor = 364,\n  SpvOpGroupNonUniformQuadBroadcast = 365,\n  SpvOpGroupNonUniformQuadSwap = 366,\n  SpvOpCopyLogical = 400,\n  SpvOpPtrEqual = 401,\n  SpvOpPtrNotEqual = 402,\n  SpvOpPtrDiff = 403,\n  SpvOpColorAttachmentReadEXT = 4160,\n  SpvOpDepthAttachmentReadEXT = 4161,\n  SpvOpStencilAttachmentReadEXT = 4162,\n  SpvOpTerminateInvocation = 4416,\n  SpvOpSubgroupBallotKHR = 4421,\n  SpvOpSubgroupFirstInvocationKHR = 4422,\n  SpvOpSubgroupAllKHR = 4428,\n  SpvOpSubgroupAnyKHR = 4429,\n  SpvOpSubgroupAllEqualKHR = 4430,\n  SpvOpGroupNonUniformRotateKHR = 4431,\n  SpvOpSubgroupReadInvocationKHR = 4432,\n  SpvOpTraceRayKHR = 4445,\n  SpvOpExecuteCallableKHR = 4446,\n  SpvOpConvertUToAccelerationStructureKHR = 4447,\n  SpvOpIgnoreIntersectionKHR = 4448,\n  SpvOpTerminateRayKHR = 4449,\n  SpvOpSDot = 4450,\n  SpvOpSDotKHR = 4450,\n  SpvOpUDot = 4451,\n  SpvOpUDotKHR = 4451,\n  SpvOpSUDot = 4452,\n  SpvOpSUDotKHR = 4452,\n  SpvOpSDotAccSat = 4453,\n  SpvOpSDotAccSatKHR = 4453,\n  SpvOpUDotAccSat = 4454,\n  SpvOpUDotAccSatKHR = 4454,\n  SpvOpSUDotAccSat = 4455,\n  SpvOpSUDotAccSatKHR = 4455,\n  SpvOpTypeCooperativeMatrixKHR = 4456,\n  SpvOpCooperativeMatrixLoadKHR = 4457,\n  SpvOpCooperativeMatrixStoreKHR = 4458,\n  SpvOpCooperativeMatrixMulAddKHR = 4459,\n  SpvOpCooperativeMatrixLengthKHR = 4460,\n  SpvOpTypeRayQueryKHR = 4472,\n  SpvOpRayQueryInitializeKHR = 4473,\n  SpvOpRayQueryTerminateKHR = 4474,\n  SpvOpRayQueryGenerateIntersectionKHR = 4475,\n  SpvOpRayQueryConfirmIntersectionKHR = 4476,\n  SpvOpRayQueryProceedKHR = 4477,\n  SpvOpRayQueryGetIntersectionTypeKHR = 4479,\n  SpvOpImageSampleWeightedQCOM = 4480,\n  SpvOpImageBoxFilterQCOM = 4481,\n  SpvOpImageBlockMatchSSDQCOM = 4482,\n  SpvOpImageBlockMatchSADQCOM = 4483,\n  SpvOpGroupIAddNonUniformAMD = 5000,\n  SpvOpGroupFAddNonUniformAMD = 5001,\n  SpvOpGroupFMinNonUniformAMD = 5002,\n  SpvOpGroupUMinNonUniformAMD = 5003,\n  SpvOpGroupSMinNonUniformAMD = 5004,\n  SpvOpGroupFMaxNonUniformAMD = 5005,\n  SpvOpGroupUMaxNonUniformAMD = 5006,\n  SpvOpGroupSMaxNonUniformAMD = 5007,\n  SpvOpFragmentMaskFetchAMD = 5011,\n  SpvOpFragmentFetchAMD = 5012,\n  SpvOpReadClockKHR = 5056,\n  SpvOpHitObjectRecordHitMotionNV = 5249,\n  SpvOpHitObjectRecordHitWithIndexMotionNV = 5250,\n  SpvOpHitObjectRecordMissMotionNV = 5251,\n  SpvOpHitObjectGetWorldToObjectNV = 5252,\n  SpvOpHitObjectGetObjectToWorldNV = 5253,\n  SpvOpHitObjectGetObjectRayDirectionNV = 5254,\n  SpvOpHitObjectGetObjectRayOriginNV = 5255,\n  SpvOpHitObjectTraceRayMotionNV = 5256,\n  SpvOpHitObjectGetShaderRecordBufferHandleNV = 5257,\n  SpvOpHitObjectGetShaderBindingTableRecordIndexNV = 5258,\n  SpvOpHitObjectRecordEmptyNV = 5259,\n  SpvOpHitObjectTraceRayNV = 5260,\n  SpvOpHitObjectRecordHitNV = 5261,\n  SpvOpHitObjectRecordHitWithIndexNV = 5262,\n  SpvOpHitObjectRecordMissNV = 5263,\n  SpvOpHitObjectExecuteShaderNV = 5264,\n  SpvOpHitObjectGetCurrentTimeNV = 5265,\n  SpvOpHitObjectGetAttributesNV = 5266,\n  SpvOpHitObjectGetHitKindNV = 5267,\n  SpvOpHitObjectGetPrimitiveIndexNV = 5268,\n  SpvOpHitObjectGetGeometryIndexNV = 5269,\n  SpvOpHitObjectGetInstanceIdNV = 5270,\n  SpvOpHitObjectGetInstanceCustomIndexNV = 5271,\n  SpvOpHitObjectGetWorldRayDirectionNV = 5272,\n  SpvOpHitObjectGetWorldRayOriginNV = 5273,\n  SpvOpHitObjectGetRayTMaxNV = 5274,\n  SpvOpHitObjectGetRayTMinNV = 5275,\n  SpvOpHitObjectIsEmptyNV = 5276,\n  SpvOpHitObjectIsHitNV = 5277,\n  SpvOpHitObjectIsMissNV = 5278,\n  SpvOpReorderThreadWithHitObjectNV = 5279,\n  SpvOpReorderThreadWithHintNV = 5280,\n  SpvOpTypeHitObjectNV = 5281,\n  SpvOpImageSampleFootprintNV = 5283,\n  SpvOpEmitMeshTasksEXT = 5294,\n  SpvOpSetMeshOutputsEXT = 5295,\n  SpvOpGroupNonUniformPartitionNV = 5296,\n  SpvOpWritePackedPrimitiveIndices4x8NV = 5299,\n  SpvOpReportIntersectionKHR = 5334,\n  SpvOpReportIntersectionNV = 5334,\n  SpvOpIgnoreIntersectionNV = 5335,\n  SpvOpTerminateRayNV = 5336,\n  SpvOpTraceNV = 5337,\n  SpvOpTraceMotionNV = 5338,\n  SpvOpTraceRayMotionNV = 5339,\n  SpvOpRayQueryGetIntersectionTriangleVertexPositionsKHR = 5340,\n  SpvOpTypeAccelerationStructureKHR = 5341,\n  SpvOpTypeAccelerationStructureNV = 5341,\n  SpvOpExecuteCallableNV = 5344,\n  SpvOpTypeCooperativeMatrixNV = 5358,\n  SpvOpCooperativeMatrixLoadNV = 5359,\n  SpvOpCooperativeMatrixStoreNV = 5360,\n  SpvOpCooperativeMatrixMulAddNV = 5361,\n  SpvOpCooperativeMatrixLengthNV = 5362,\n  SpvOpBeginInvocationInterlockEXT = 5364,\n  SpvOpEndInvocationInterlockEXT = 5365,\n  SpvOpDemoteToHelperInvocation = 5380,\n  SpvOpDemoteToHelperInvocationEXT = 5380,\n  SpvOpIsHelperInvocationEXT = 5381,\n  SpvOpConvertUToImageNV = 5391,\n  SpvOpConvertUToSamplerNV = 5392,\n  SpvOpConvertImageToUNV = 5393,\n  SpvOpConvertSamplerToUNV = 5394,\n  SpvOpConvertUToSampledImageNV = 5395,\n  SpvOpConvertSampledImageToUNV = 5396,\n  SpvOpSamplerImageAddressingModeNV = 5397,\n  SpvOpSubgroupShuffleINTEL = 5571,\n  SpvOpSubgroupShuffleDownINTEL = 5572,\n  SpvOpSubgroupShuffleUpINTEL = 5573,\n  SpvOpSubgroupShuffleXorINTEL = 5574,\n  SpvOpSubgroupBlockReadINTEL = 5575,\n  SpvOpSubgroupBlockWriteINTEL = 5576,\n  SpvOpSubgroupImageBlockReadINTEL = 5577,\n  SpvOpSubgroupImageBlockWriteINTEL = 5578,\n  SpvOpSubgroupImageMediaBlockReadINTEL = 5580,\n  SpvOpSubgroupImageMediaBlockWriteINTEL = 5581,\n  SpvOpUCountLeadingZerosINTEL = 5585,\n  SpvOpUCountTrailingZerosINTEL = 5586,\n  SpvOpAbsISubINTEL = 5587,\n  SpvOpAbsUSubINTEL = 5588,\n  SpvOpIAddSatINTEL = 5589,\n  SpvOpUAddSatINTEL = 5590,\n  SpvOpIAverageINTEL = 5591,\n  SpvOpUAverageINTEL = 5592,\n  SpvOpIAverageRoundedINTEL = 5593,\n  SpvOpUAverageRoundedINTEL = 5594,\n  SpvOpISubSatINTEL = 5595,\n  SpvOpUSubSatINTEL = 5596,\n  SpvOpIMul32x16INTEL = 5597,\n  SpvOpUMul32x16INTEL = 5598,\n  SpvOpConstantFunctionPointerINTEL = 5600,\n  SpvOpFunctionPointerCallINTEL = 5601,\n  SpvOpAsmTargetINTEL = 5609,\n  SpvOpAsmINTEL = 5610,\n  SpvOpAsmCallINTEL = 5611,\n  SpvOpAtomicFMinEXT = 5614,\n  SpvOpAtomicFMaxEXT = 5615,\n  SpvOpAssumeTrueKHR = 5630,\n  SpvOpExpectKHR = 5631,\n  SpvOpDecorateString = 5632,\n  SpvOpDecorateStringGOOGLE = 5632,\n  SpvOpMemberDecorateString = 5633,\n  SpvOpMemberDecorateStringGOOGLE = 5633,\n  SpvOpVmeImageINTEL = 5699,\n  SpvOpTypeVmeImageINTEL = 5700,\n  SpvOpTypeAvcImePayloadINTEL = 5701,\n  SpvOpTypeAvcRefPayloadINTEL = 5702,\n  SpvOpTypeAvcSicPayloadINTEL = 5703,\n  SpvOpTypeAvcMcePayloadINTEL = 5704,\n  SpvOpTypeAvcMceResultINTEL = 5705,\n  SpvOpTypeAvcImeResultINTEL = 5706,\n  SpvOpTypeAvcImeResultSingleReferenceStreamoutINTEL = 5707,\n  SpvOpTypeAvcImeResultDualReferenceStreamoutINTEL = 5708,\n  SpvOpTypeAvcImeSingleReferenceStreaminINTEL = 5709,\n  SpvOpTypeAvcImeDualReferenceStreaminINTEL = 5710,\n  SpvOpTypeAvcRefResultINTEL = 5711,\n  SpvOpTypeAvcSicResultINTEL = 5712,\n  SpvOpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL = 5713,\n  SpvOpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL = 5714,\n  SpvOpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL = 5715,\n  SpvOpSubgroupAvcMceSetInterShapePenaltyINTEL = 5716,\n  SpvOpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL = 5717,\n  SpvOpSubgroupAvcMceSetInterDirectionPenaltyINTEL = 5718,\n  SpvOpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL = 5719,\n  SpvOpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL = 5720,\n  SpvOpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL = 5721,\n  SpvOpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL = 5722,\n  SpvOpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL = 5723,\n  SpvOpSubgroupAvcMceSetMotionVectorCostFunctionINTEL = 5724,\n  SpvOpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL = 5725,\n  SpvOpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL = 5726,\n  SpvOpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL = 5727,\n  SpvOpSubgroupAvcMceSetAcOnlyHaarINTEL = 5728,\n  SpvOpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL = 5729,\n  SpvOpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL = 5730,\n  SpvOpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL = 5731,\n  SpvOpSubgroupAvcMceConvertToImePayloadINTEL = 5732,\n  SpvOpSubgroupAvcMceConvertToImeResultINTEL = 5733,\n  SpvOpSubgroupAvcMceConvertToRefPayloadINTEL = 5734,\n  SpvOpSubgroupAvcMceConvertToRefResultINTEL = 5735,\n  SpvOpSubgroupAvcMceConvertToSicPayloadINTEL = 5736,\n  SpvOpSubgroupAvcMceConvertToSicResultINTEL = 5737,\n  SpvOpSubgroupAvcMceGetMotionVectorsINTEL = 5738,\n  SpvOpSubgroupAvcMceGetInterDistortionsINTEL = 5739,\n  SpvOpSubgroupAvcMceGetBestInterDistortionsINTEL = 5740,\n  SpvOpSubgroupAvcMceGetInterMajorShapeINTEL = 5741,\n  SpvOpSubgroupAvcMceGetInterMinorShapeINTEL = 5742,\n  SpvOpSubgroupAvcMceGetInterDirectionsINTEL = 5743,\n  SpvOpSubgroupAvcMceGetInterMotionVectorCountINTEL = 5744,\n  SpvOpSubgroupAvcMceGetInterReferenceIdsINTEL = 5745,\n  SpvOpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL = 5746,\n  SpvOpSubgroupAvcImeInitializeINTEL = 5747,\n  SpvOpSubgroupAvcImeSetSingleReferenceINTEL = 5748,\n  SpvOpSubgroupAvcImeSetDualReferenceINTEL = 5749,\n  SpvOpSubgroupAvcImeRefWindowSizeINTEL = 5750,\n  SpvOpSubgroupAvcImeAdjustRefOffsetINTEL = 5751,\n  SpvOpSubgroupAvcImeConvertToMcePayloadINTEL = 5752,\n  SpvOpSubgroupAvcImeSetMaxMotionVectorCountINTEL = 5753,\n  SpvOpSubgroupAvcImeSetUnidirectionalMixDisableINTEL = 5754,\n  SpvOpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL = 5755,\n  SpvOpSubgroupAvcImeSetWeightedSadINTEL = 5756,\n  SpvOpSubgroupAvcImeEvaluateWithSingleReferenceINTEL = 5757,\n  SpvOpSubgroupAvcImeEvaluateWithDualReferenceINTEL = 5758,\n  SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL = 5759,\n  SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL = 5760,\n  SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL = 5761,\n  SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL = 5762,\n  SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL = 5763,\n  SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL = 5764,\n  SpvOpSubgroupAvcImeConvertToMceResultINTEL = 5765,\n  SpvOpSubgroupAvcImeGetSingleReferenceStreaminINTEL = 5766,\n  SpvOpSubgroupAvcImeGetDualReferenceStreaminINTEL = 5767,\n  SpvOpSubgroupAvcImeStripSingleReferenceStreamoutINTEL = 5768,\n  SpvOpSubgroupAvcImeStripDualReferenceStreamoutINTEL = 5769,\n  SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL =\n      5770,\n  SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL =\n      5771,\n  SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL =\n      5772,\n  SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL =\n      5773,\n  SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL = 5774,\n  SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL =\n      5775,\n  SpvOpSubgroupAvcImeGetBorderReachedINTEL = 5776,\n  SpvOpSubgroupAvcImeGetTruncatedSearchIndicationINTEL = 5777,\n  SpvOpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL = 5778,\n  SpvOpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL = 5779,\n  SpvOpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL = 5780,\n  SpvOpSubgroupAvcFmeInitializeINTEL = 5781,\n  SpvOpSubgroupAvcBmeInitializeINTEL = 5782,\n  SpvOpSubgroupAvcRefConvertToMcePayloadINTEL = 5783,\n  SpvOpSubgroupAvcRefSetBidirectionalMixDisableINTEL = 5784,\n  SpvOpSubgroupAvcRefSetBilinearFilterEnableINTEL = 5785,\n  SpvOpSubgroupAvcRefEvaluateWithSingleReferenceINTEL = 5786,\n  SpvOpSubgroupAvcRefEvaluateWithDualReferenceINTEL = 5787,\n  SpvOpSubgroupAvcRefEvaluateWithMultiReferenceINTEL = 5788,\n  SpvOpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL = 5789,\n  SpvOpSubgroupAvcRefConvertToMceResultINTEL = 5790,\n  SpvOpSubgroupAvcSicInitializeINTEL = 5791,\n  SpvOpSubgroupAvcSicConfigureSkcINTEL = 5792,\n  SpvOpSubgroupAvcSicConfigureIpeLumaINTEL = 5793,\n  SpvOpSubgroupAvcSicConfigureIpeLumaChromaINTEL = 5794,\n  SpvOpSubgroupAvcSicGetMotionVectorMaskINTEL = 5795,\n  SpvOpSubgroupAvcSicConvertToMcePayloadINTEL = 5796,\n  SpvOpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL = 5797,\n  SpvOpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL = 5798,\n  SpvOpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL = 5799,\n  SpvOpSubgroupAvcSicSetBilinearFilterEnableINTEL = 5800,\n  SpvOpSubgroupAvcSicSetSkcForwardTransformEnableINTEL = 5801,\n  SpvOpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL = 5802,\n  SpvOpSubgroupAvcSicEvaluateIpeINTEL = 5803,\n  SpvOpSubgroupAvcSicEvaluateWithSingleReferenceINTEL = 5804,\n  SpvOpSubgroupAvcSicEvaluateWithDualReferenceINTEL = 5805,\n  SpvOpSubgroupAvcSicEvaluateWithMultiReferenceINTEL = 5806,\n  SpvOpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL = 5807,\n  SpvOpSubgroupAvcSicConvertToMceResultINTEL = 5808,\n  SpvOpSubgroupAvcSicGetIpeLumaShapeINTEL = 5809,\n  SpvOpSubgroupAvcSicGetBestIpeLumaDistortionINTEL = 5810,\n  SpvOpSubgroupAvcSicGetBestIpeChromaDistortionINTEL = 5811,\n  SpvOpSubgroupAvcSicGetPackedIpeLumaModesINTEL = 5812,\n  SpvOpSubgroupAvcSicGetIpeChromaModeINTEL = 5813,\n  SpvOpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814,\n  SpvOpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815,\n  SpvOpSubgroupAvcSicGetInterRawSadsINTEL = 5816,\n  SpvOpVariableLengthArrayINTEL = 5818,\n  SpvOpSaveMemoryINTEL = 5819,\n  SpvOpRestoreMemoryINTEL = 5820,\n  SpvOpArbitraryFloatSinCosPiINTEL = 5840,\n  SpvOpArbitraryFloatCastINTEL = 5841,\n  SpvOpArbitraryFloatCastFromIntINTEL = 5842,\n  SpvOpArbitraryFloatCastToIntINTEL = 5843,\n  SpvOpArbitraryFloatAddINTEL = 5846,\n  SpvOpArbitraryFloatSubINTEL = 5847,\n  SpvOpArbitraryFloatMulINTEL = 5848,\n  SpvOpArbitraryFloatDivINTEL = 5849,\n  SpvOpArbitraryFloatGTINTEL = 5850,\n  SpvOpArbitraryFloatGEINTEL = 5851,\n  SpvOpArbitraryFloatLTINTEL = 5852,\n  SpvOpArbitraryFloatLEINTEL = 5853,\n  SpvOpArbitraryFloatEQINTEL = 5854,\n  SpvOpArbitraryFloatRecipINTEL = 5855,\n  SpvOpArbitraryFloatRSqrtINTEL = 5856,\n  SpvOpArbitraryFloatCbrtINTEL = 5857,\n  SpvOpArbitraryFloatHypotINTEL = 5858,\n  SpvOpArbitraryFloatSqrtINTEL = 5859,\n  SpvOpArbitraryFloatLogINTEL = 5860,\n  SpvOpArbitraryFloatLog2INTEL = 5861,\n  SpvOpArbitraryFloatLog10INTEL = 5862,\n  SpvOpArbitraryFloatLog1pINTEL = 5863,\n  SpvOpArbitraryFloatExpINTEL = 5864,\n  SpvOpArbitraryFloatExp2INTEL = 5865,\n  SpvOpArbitraryFloatExp10INTEL = 5866,\n  SpvOpArbitraryFloatExpm1INTEL = 5867,\n  SpvOpArbitraryFloatSinINTEL = 5868,\n  SpvOpArbitraryFloatCosINTEL = 5869,\n  SpvOpArbitraryFloatSinCosINTEL = 5870,\n  SpvOpArbitraryFloatSinPiINTEL = 5871,\n  SpvOpArbitraryFloatCosPiINTEL = 5872,\n  SpvOpArbitraryFloatASinINTEL = 5873,\n  SpvOpArbitraryFloatASinPiINTEL = 5874,\n  SpvOpArbitraryFloatACosINTEL = 5875,\n  SpvOpArbitraryFloatACosPiINTEL = 5876,\n  SpvOpArbitraryFloatATanINTEL = 5877,\n  SpvOpArbitraryFloatATanPiINTEL = 5878,\n  SpvOpArbitraryFloatATan2INTEL = 5879,\n  SpvOpArbitraryFloatPowINTEL = 5880,\n  SpvOpArbitraryFloatPowRINTEL = 5881,\n  SpvOpArbitraryFloatPowNINTEL = 5882,\n  SpvOpLoopControlINTEL = 5887,\n  SpvOpAliasDomainDeclINTEL = 5911,\n  SpvOpAliasScopeDeclINTEL = 5912,\n  SpvOpAliasScopeListDeclINTEL = 5913,\n  SpvOpFixedSqrtINTEL = 5923,\n  SpvOpFixedRecipINTEL = 5924,\n  SpvOpFixedRsqrtINTEL = 5925,\n  SpvOpFixedSinINTEL = 5926,\n  SpvOpFixedCosINTEL = 5927,\n  SpvOpFixedSinCosINTEL = 5928,\n  SpvOpFixedSinPiINTEL = 5929,\n  SpvOpFixedCosPiINTEL = 5930,\n  SpvOpFixedSinCosPiINTEL = 5931,\n  SpvOpFixedLogINTEL = 5932,\n  SpvOpFixedExpINTEL = 5933,\n  SpvOpPtrCastToCrossWorkgroupINTEL = 5934,\n  SpvOpCrossWorkgroupCastToPtrINTEL = 5938,\n  SpvOpReadPipeBlockingINTEL = 5946,\n  SpvOpWritePipeBlockingINTEL = 5947,\n  SpvOpFPGARegINTEL = 5949,\n  SpvOpRayQueryGetRayTMinKHR = 6016,\n  SpvOpRayQueryGetRayFlagsKHR = 6017,\n  SpvOpRayQueryGetIntersectionTKHR = 6018,\n  SpvOpRayQueryGetIntersectionInstanceCustomIndexKHR = 6019,\n  SpvOpRayQueryGetIntersectionInstanceIdKHR = 6020,\n  SpvOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR = 6021,\n  SpvOpRayQueryGetIntersectionGeometryIndexKHR = 6022,\n  SpvOpRayQueryGetIntersectionPrimitiveIndexKHR = 6023,\n  SpvOpRayQueryGetIntersectionBarycentricsKHR = 6024,\n  SpvOpRayQueryGetIntersectionFrontFaceKHR = 6025,\n  SpvOpRayQueryGetIntersectionCandidateAABBOpaqueKHR = 6026,\n  SpvOpRayQueryGetIntersectionObjectRayDirectionKHR = 6027,\n  SpvOpRayQueryGetIntersectionObjectRayOriginKHR = 6028,\n  SpvOpRayQueryGetWorldRayDirectionKHR = 6029,\n  SpvOpRayQueryGetWorldRayOriginKHR = 6030,\n  SpvOpRayQueryGetIntersectionObjectToWorldKHR = 6031,\n  SpvOpRayQueryGetIntersectionWorldToObjectKHR = 6032,\n  SpvOpAtomicFAddEXT = 6035,\n  SpvOpTypeBufferSurfaceINTEL = 6086,\n  SpvOpTypeStructContinuedINTEL = 6090,\n  SpvOpConstantCompositeContinuedINTEL = 6091,\n  SpvOpSpecConstantCompositeContinuedINTEL = 6092,\n  SpvOpConvertFToBF16INTEL = 6116,\n  SpvOpConvertBF16ToFINTEL = 6117,\n  SpvOpControlBarrierArriveINTEL = 6142,\n  SpvOpControlBarrierWaitINTEL = 6143,\n  SpvOpGroupIMulKHR = 6401,\n  SpvOpGroupFMulKHR = 6402,\n  SpvOpGroupBitwiseAndKHR = 6403,\n  SpvOpGroupBitwiseOrKHR = 6404,\n  SpvOpGroupBitwiseXorKHR = 6405,\n  SpvOpGroupLogicalAndKHR = 6406,\n  SpvOpGroupLogicalOrKHR = 6407,\n  SpvOpGroupLogicalXorKHR = 6408,\n  SpvOpMax = 0x7fffffff,\n} SpvOp;\n\n#ifdef SPV_ENABLE_UTILITY_CODE\n#ifndef __cplusplus\n#include <stdbool.h>\n#endif\ninline void SpvHasResultAndType(SpvOp opcode, bool* hasResult,\n                                bool* hasResultType) {\n  *hasResult = *hasResultType = false;\n  switch (opcode) {\n    default: /* unknown opcode */\n      break;\n    case SpvOpNop:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpUndef:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSourceContinued:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpSource:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpSourceExtension:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpName:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpMemberName:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpString:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpLine:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpExtension:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpExtInstImport:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpExtInst:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpMemoryModel:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpEntryPoint:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpExecutionMode:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpCapability:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeVoid:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeBool:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeInt:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeFloat:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeVector:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeMatrix:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeImage:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeSampler:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeSampledImage:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeArray:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeRuntimeArray:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeStruct:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeOpaque:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypePointer:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeFunction:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeEvent:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeDeviceEvent:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeReserveId:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeQueue:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypePipe:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeForwardPointer:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpConstantTrue:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConstantFalse:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConstant:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConstantComposite:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConstantSampler:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConstantNull:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSpecConstantTrue:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSpecConstantFalse:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSpecConstant:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSpecConstantComposite:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSpecConstantOp:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFunction:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFunctionParameter:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFunctionEnd:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpFunctionCall:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpVariable:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageTexelPointer:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpLoad:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpStore:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpCopyMemory:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpCopyMemorySized:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpAccessChain:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpInBoundsAccessChain:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpPtrAccessChain:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArrayLength:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGenericPtrMemSemantics:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpInBoundsPtrAccessChain:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpDecorate:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpMemberDecorate:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpDecorationGroup:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpGroupDecorate:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpGroupMemberDecorate:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpVectorExtractDynamic:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpVectorInsertDynamic:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpVectorShuffle:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpCompositeConstruct:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpCompositeExtract:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpCompositeInsert:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpCopyObject:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpTranspose:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSampledImage:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSampleImplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSampleExplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSampleDrefImplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSampleDrefExplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSampleProjImplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSampleProjExplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSampleProjDrefImplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSampleProjDrefExplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageFetch:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageGather:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageDrefGather:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageRead:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageWrite:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpImage:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageQueryFormat:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageQueryOrder:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageQuerySizeLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageQuerySize:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageQueryLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageQueryLevels:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageQuerySamples:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConvertFToU:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConvertFToS:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConvertSToF:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConvertUToF:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUConvert:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSConvert:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFConvert:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpQuantizeToF16:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConvertPtrToU:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSatConvertSToU:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSatConvertUToS:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConvertUToPtr:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpPtrCastToGeneric:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGenericCastToPtr:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGenericCastToPtrExplicit:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpBitcast:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSNegate:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFNegate:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIAdd:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFAdd:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpISub:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFSub:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIMul:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFMul:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUDiv:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSDiv:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFDiv:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUMod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSRem:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSMod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFRem:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFMod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpVectorTimesScalar:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpMatrixTimesScalar:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpVectorTimesMatrix:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpMatrixTimesVector:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpMatrixTimesMatrix:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpOuterProduct:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpDot:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIAddCarry:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpISubBorrow:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUMulExtended:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSMulExtended:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAny:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAll:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIsNan:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIsInf:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIsFinite:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIsNormal:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSignBitSet:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpLessOrGreater:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpOrdered:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUnordered:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpLogicalEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpLogicalNotEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpLogicalOr:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpLogicalAnd:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpLogicalNot:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSelect:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpINotEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUGreaterThan:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSGreaterThan:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUGreaterThanEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSGreaterThanEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpULessThan:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSLessThan:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpULessThanEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSLessThanEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFOrdEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFUnordEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFOrdNotEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFUnordNotEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFOrdLessThan:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFUnordLessThan:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFOrdGreaterThan:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFUnordGreaterThan:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFOrdLessThanEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFUnordLessThanEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFOrdGreaterThanEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFUnordGreaterThanEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpShiftRightLogical:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpShiftRightArithmetic:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpShiftLeftLogical:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpBitwiseOr:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpBitwiseXor:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpBitwiseAnd:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpNot:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpBitFieldInsert:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpBitFieldSExtract:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpBitFieldUExtract:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpBitReverse:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpBitCount:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpDPdx:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpDPdy:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFwidth:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpDPdxFine:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpDPdyFine:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFwidthFine:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpDPdxCoarse:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpDPdyCoarse:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFwidthCoarse:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpEmitVertex:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpEndPrimitive:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpEmitStreamVertex:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpEndStreamPrimitive:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpControlBarrier:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpMemoryBarrier:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpAtomicLoad:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicStore:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpAtomicExchange:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicCompareExchange:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicCompareExchangeWeak:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicIIncrement:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicIDecrement:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicIAdd:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicISub:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicSMin:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicUMin:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicSMax:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicUMax:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicAnd:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicOr:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicXor:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpPhi:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpLoopMerge:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpSelectionMerge:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpLabel:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpBranch:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpBranchConditional:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpSwitch:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpKill:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpReturn:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpReturnValue:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpUnreachable:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpLifetimeStart:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpLifetimeStop:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpGroupAsyncCopy:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupWaitEvents:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpGroupAll:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupAny:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupBroadcast:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupIAdd:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupFAdd:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupFMin:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupUMin:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupSMin:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupFMax:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupUMax:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupSMax:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpReadPipe:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpWritePipe:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpReservedReadPipe:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpReservedWritePipe:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpReserveReadPipePackets:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpReserveWritePipePackets:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpCommitReadPipe:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpCommitWritePipe:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpIsValidReserveId:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGetNumPipePackets:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGetMaxPipePackets:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupReserveReadPipePackets:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupReserveWritePipePackets:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupCommitReadPipe:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpGroupCommitWritePipe:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpEnqueueMarker:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpEnqueueKernel:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGetKernelNDrangeSubGroupCount:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGetKernelNDrangeMaxSubGroupSize:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGetKernelWorkGroupSize:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGetKernelPreferredWorkGroupSizeMultiple:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRetainEvent:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpReleaseEvent:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpCreateUserEvent:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIsValidEvent:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSetUserEventStatus:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpCaptureEventProfilingInfo:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpGetDefaultQueue:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpBuildNDRange:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSparseSampleImplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSparseSampleExplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSparseSampleDrefImplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSparseSampleDrefExplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSparseSampleProjImplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSparseSampleProjExplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSparseSampleProjDrefImplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSparseSampleProjDrefExplicitLod:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSparseFetch:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSparseGather:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSparseDrefGather:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSparseTexelsResident:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpNoLine:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpAtomicFlagTestAndSet:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicFlagClear:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpImageSparseRead:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSizeOf:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpTypePipeStorage:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpConstantPipeStorage:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpCreatePipeFromPipeStorage:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGetKernelLocalSizeForSubgroupCount:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGetKernelMaxNumSubgroups:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpTypeNamedBarrier:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpNamedBarrierInitialize:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpMemoryNamedBarrier:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpModuleProcessed:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpExecutionModeId:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpDecorateId:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpGroupNonUniformElect:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformAll:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformAny:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformAllEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformBroadcast:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformBroadcastFirst:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformBallot:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformInverseBallot:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformBallotBitExtract:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformBallotBitCount:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformBallotFindLSB:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformBallotFindMSB:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformShuffle:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformShuffleXor:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformShuffleUp:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformShuffleDown:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformIAdd:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformFAdd:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformIMul:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformFMul:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformSMin:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformUMin:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformFMin:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformSMax:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformUMax:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformFMax:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformBitwiseAnd:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformBitwiseOr:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformBitwiseXor:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformLogicalAnd:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformLogicalOr:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformLogicalXor:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformQuadBroadcast:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformQuadSwap:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpCopyLogical:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpPtrEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpPtrNotEqual:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpPtrDiff:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpColorAttachmentReadEXT:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpDepthAttachmentReadEXT:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpStencilAttachmentReadEXT:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpTerminateInvocation:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpSubgroupBallotKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupFirstInvocationKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAllKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAnyKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAllEqualKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupNonUniformRotateKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupReadInvocationKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpTraceRayKHR:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpExecuteCallableKHR:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpConvertUToAccelerationStructureKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIgnoreIntersectionKHR:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpTerminateRayKHR:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpSDot:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUDot:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSUDot:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSDotAccSat:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUDotAccSat:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSUDotAccSat:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpTypeCooperativeMatrixKHR:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpCooperativeMatrixLoadKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpCooperativeMatrixStoreKHR:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpCooperativeMatrixMulAddKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpCooperativeMatrixLengthKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpTypeRayQueryKHR:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpRayQueryInitializeKHR:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpRayQueryTerminateKHR:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpRayQueryGenerateIntersectionKHR:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpRayQueryConfirmIntersectionKHR:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpRayQueryProceedKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionTypeKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageSampleWeightedQCOM:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageBoxFilterQCOM:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageBlockMatchSSDQCOM:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpImageBlockMatchSADQCOM:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupIAddNonUniformAMD:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupFAddNonUniformAMD:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupFMinNonUniformAMD:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupUMinNonUniformAMD:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupSMinNonUniformAMD:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupFMaxNonUniformAMD:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupUMaxNonUniformAMD:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupSMaxNonUniformAMD:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFragmentMaskFetchAMD:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFragmentFetchAMD:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpReadClockKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectRecordHitMotionNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpHitObjectRecordHitWithIndexMotionNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpHitObjectRecordMissMotionNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpHitObjectGetWorldToObjectNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectGetObjectToWorldNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectGetObjectRayDirectionNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectGetObjectRayOriginNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectTraceRayMotionNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpHitObjectGetShaderRecordBufferHandleNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectGetShaderBindingTableRecordIndexNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectRecordEmptyNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpHitObjectTraceRayNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpHitObjectRecordHitNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpHitObjectRecordHitWithIndexNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpHitObjectRecordMissNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpHitObjectExecuteShaderNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpHitObjectGetCurrentTimeNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectGetAttributesNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpHitObjectGetHitKindNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectGetPrimitiveIndexNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectGetGeometryIndexNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectGetInstanceIdNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectGetInstanceCustomIndexNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectGetWorldRayDirectionNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectGetWorldRayOriginNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectGetRayTMaxNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectGetRayTMinNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectIsEmptyNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectIsHitNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpHitObjectIsMissNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpReorderThreadWithHitObjectNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpReorderThreadWithHintNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeHitObjectNV:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpImageSampleFootprintNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpEmitMeshTasksEXT:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpSetMeshOutputsEXT:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpGroupNonUniformPartitionNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpWritePackedPrimitiveIndices4x8NV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpReportIntersectionNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIgnoreIntersectionNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpTerminateRayNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpTraceNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpTraceMotionNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpTraceRayMotionNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpRayQueryGetIntersectionTriangleVertexPositionsKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpTypeAccelerationStructureNV:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpExecuteCallableNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeCooperativeMatrixNV:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpCooperativeMatrixLoadNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpCooperativeMatrixStoreNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpCooperativeMatrixMulAddNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpCooperativeMatrixLengthNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpBeginInvocationInterlockEXT:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpEndInvocationInterlockEXT:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpDemoteToHelperInvocation:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpIsHelperInvocationEXT:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConvertUToImageNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConvertUToSamplerNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConvertImageToUNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConvertSamplerToUNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConvertUToSampledImageNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConvertSampledImageToUNV:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSamplerImageAddressingModeNV:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpSubgroupShuffleINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupShuffleDownINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupShuffleUpINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupShuffleXorINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupBlockReadINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupBlockWriteINTEL:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpSubgroupImageBlockReadINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupImageBlockWriteINTEL:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpSubgroupImageMediaBlockReadINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupImageMediaBlockWriteINTEL:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpUCountLeadingZerosINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUCountTrailingZerosINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAbsISubINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAbsUSubINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIAddSatINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUAddSatINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIAverageINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUAverageINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIAverageRoundedINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUAverageRoundedINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpISubSatINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUSubSatINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpIMul32x16INTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpUMul32x16INTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConstantFunctionPointerINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFunctionPointerCallINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAsmTargetINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAsmINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAsmCallINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicFMinEXT:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicFMaxEXT:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAssumeTrueKHR:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpExpectKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpDecorateString:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpMemberDecorateString:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpVmeImageINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpTypeVmeImageINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeAvcImePayloadINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeAvcRefPayloadINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeAvcSicPayloadINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeAvcMcePayloadINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeAvcMceResultINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeAvcImeResultINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeAvcImeResultSingleReferenceStreamoutINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeAvcImeResultDualReferenceStreamoutINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeAvcImeSingleReferenceStreaminINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeAvcImeDualReferenceStreaminINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeAvcRefResultINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeAvcSicResultINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceSetInterShapePenaltyINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceSetInterDirectionPenaltyINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceSetMotionVectorCostFunctionINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceSetAcOnlyHaarINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceConvertToImePayloadINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceConvertToImeResultINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceConvertToRefPayloadINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceConvertToRefResultINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceConvertToSicPayloadINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceConvertToSicResultINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetMotionVectorsINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetInterDistortionsINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetBestInterDistortionsINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetInterMajorShapeINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetInterMinorShapeINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetInterDirectionsINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetInterMotionVectorCountINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetInterReferenceIdsINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeInitializeINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeSetSingleReferenceINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeSetDualReferenceINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeRefWindowSizeINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeAdjustRefOffsetINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeConvertToMcePayloadINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeSetMaxMotionVectorCountINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeSetUnidirectionalMixDisableINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeSetWeightedSadINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeEvaluateWithDualReferenceINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeConvertToMceResultINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeGetSingleReferenceStreaminINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeGetDualReferenceStreaminINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeStripSingleReferenceStreamoutINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeStripDualReferenceStreamoutINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeGetBorderReachedINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeGetTruncatedSearchIndicationINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcFmeInitializeINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcBmeInitializeINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcRefConvertToMcePayloadINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcRefSetBidirectionalMixDisableINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcRefSetBilinearFilterEnableINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcRefEvaluateWithSingleReferenceINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcRefEvaluateWithDualReferenceINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcRefEvaluateWithMultiReferenceINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcRefConvertToMceResultINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicInitializeINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicConfigureSkcINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicConfigureIpeLumaINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicConfigureIpeLumaChromaINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicGetMotionVectorMaskINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicConvertToMcePayloadINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicSetBilinearFilterEnableINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicSetSkcForwardTransformEnableINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicEvaluateIpeINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicEvaluateWithSingleReferenceINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicEvaluateWithDualReferenceINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicEvaluateWithMultiReferenceINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicConvertToMceResultINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicGetIpeLumaShapeINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicGetBestIpeLumaDistortionINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicGetBestIpeChromaDistortionINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicGetPackedIpeLumaModesINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicGetIpeChromaModeINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSubgroupAvcSicGetInterRawSadsINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpVariableLengthArrayINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpSaveMemoryINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRestoreMemoryINTEL:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpArbitraryFloatSinCosPiINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatCastINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatCastFromIntINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatCastToIntINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatAddINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatSubINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatMulINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatDivINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatGTINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatGEINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatLTINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatLEINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatEQINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatRecipINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatRSqrtINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatCbrtINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatHypotINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatSqrtINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatLogINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatLog2INTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatLog10INTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatLog1pINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatExpINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatExp2INTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatExp10INTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatExpm1INTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatSinINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatCosINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatSinCosINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatSinPiINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatCosPiINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatASinINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatASinPiINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatACosINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatACosPiINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatATanINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatATanPiINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatATan2INTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatPowINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatPowRINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpArbitraryFloatPowNINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpLoopControlINTEL:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpAliasDomainDeclINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpAliasScopeDeclINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpAliasScopeListDeclINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpFixedSqrtINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFixedRecipINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFixedRsqrtINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFixedSinINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFixedCosINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFixedSinCosINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFixedSinPiINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFixedCosPiINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFixedSinCosPiINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFixedLogINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFixedExpINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpPtrCastToCrossWorkgroupINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpCrossWorkgroupCastToPtrINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpReadPipeBlockingINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpWritePipeBlockingINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpFPGARegINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetRayTMinKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetRayFlagsKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionTKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionInstanceCustomIndexKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionInstanceIdKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionGeometryIndexKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionPrimitiveIndexKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionBarycentricsKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionFrontFaceKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionCandidateAABBOpaqueKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionObjectRayDirectionKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionObjectRayOriginKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetWorldRayDirectionKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetWorldRayOriginKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionObjectToWorldKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpRayQueryGetIntersectionWorldToObjectKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpAtomicFAddEXT:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpTypeBufferSurfaceINTEL:\n      *hasResult = true;\n      *hasResultType = false;\n      break;\n    case SpvOpTypeStructContinuedINTEL:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpConstantCompositeContinuedINTEL:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpSpecConstantCompositeContinuedINTEL:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpConvertFToBF16INTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpConvertBF16ToFINTEL:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpControlBarrierArriveINTEL:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpControlBarrierWaitINTEL:\n      *hasResult = false;\n      *hasResultType = false;\n      break;\n    case SpvOpGroupIMulKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupFMulKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupBitwiseAndKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupBitwiseOrKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupBitwiseXorKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupLogicalAndKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupLogicalOrKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n    case SpvOpGroupLogicalXorKHR:\n      *hasResult = true;\n      *hasResultType = true;\n      break;\n  }\n}\n#endif /* SPV_ENABLE_UTILITY_CODE */\n\n#endif\n"
  },
  {
    "path": "deps/SPIRV-reflect/spirv_reflect.c",
    "content": "/*\n Copyright 2017-2022 Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*/\n\n#include \"spirv_reflect.h\"\n\n#include <assert.h>\n#include <stdbool.h>\n#include <string.h>\n\n#if defined(WIN32)\n#define _CRTDBG_MAP_ALLOC\n#include <crtdbg.h>\n#include <stdlib.h>\n#else\n#include <stdlib.h>\n#endif\n\n#if defined(__clang__) || defined(__GNUC__) || defined(__APPLE_CC__)\n#define FALLTHROUGH __attribute__((fallthrough))\n#else\n#define FALLTHROUGH\n#endif\n\n#if defined(SPIRV_REFLECT_ENABLE_ASSERTS)\n#define SPV_REFLECT_ASSERT(COND) assert(COND);\n#else\n#define SPV_REFLECT_ASSERT(COND)\n#endif\n\n// clang-format off\nenum {\n  SPIRV_STARTING_WORD_INDEX       = 5,\n  SPIRV_WORD_SIZE                 = sizeof(uint32_t),\n  SPIRV_BYTE_WIDTH                = 8,\n  SPIRV_MINIMUM_FILE_SIZE         = SPIRV_STARTING_WORD_INDEX * SPIRV_WORD_SIZE,\n  SPIRV_DATA_ALIGNMENT            = 4 * SPIRV_WORD_SIZE, // 16\n  SPIRV_ACCESS_CHAIN_INDEX_OFFSET = 4,\n};\n\nenum {\n  INVALID_VALUE  = 0xFFFFFFFF,\n};\n\nenum {\n  MAX_NODE_NAME_LENGTH        = 1024,\n  // Number of unique PhysicalStorageBuffer structs tracked to detect recursion\n  MAX_RECURSIVE_PHYSICAL_POINTER_CHECK = 128,\n};\n\nenum {\n  IMAGE_SAMPLED = 1,\n  IMAGE_STORAGE = 2,\n};\n\ntypedef struct SpvReflectPrvArrayTraits {\n  uint32_t                        element_type_id;\n  uint32_t                        length_id;\n} SpvReflectPrvArrayTraits;\n\ntypedef struct SpvReflectPrvImageTraits {\n  uint32_t                        sampled_type_id;\n  SpvDim                          dim;\n  uint32_t                        depth;\n  uint32_t                        arrayed;\n  uint32_t                        ms;\n  uint32_t                        sampled;\n  SpvImageFormat                  image_format;\n} SpvReflectPrvImageTraits;\n\ntypedef struct SpvReflectPrvNumberDecoration {\n  uint32_t                        word_offset;\n  uint32_t                        value;\n} SpvReflectPrvNumberDecoration;\n\ntypedef struct SpvReflectPrvStringDecoration {\n  uint32_t                        word_offset;\n  const char*                     value;\n} SpvReflectPrvStringDecoration;\n\ntypedef struct SpvReflectPrvDecorations {\n  bool                            is_relaxed_precision;\n  bool                            is_block;\n  bool                            is_buffer_block;\n  bool                            is_row_major;\n  bool                            is_column_major;\n  bool                            is_built_in;\n  bool                            is_noperspective;\n  bool                            is_flat;\n  bool                            is_non_writable;\n  bool                            is_non_readable;\n  bool                            is_patch;\n  bool                            is_per_vertex;\n  bool                            is_per_task;\n  bool                            is_weight_texture;\n  bool                            is_block_match_texture;\n  SpvReflectUserType              user_type;\n  SpvReflectPrvNumberDecoration   set;\n  SpvReflectPrvNumberDecoration   binding;\n  SpvReflectPrvNumberDecoration   input_attachment_index;\n  SpvReflectPrvNumberDecoration   location;\n  SpvReflectPrvNumberDecoration   component;\n  SpvReflectPrvNumberDecoration   offset;\n  SpvReflectPrvNumberDecoration   uav_counter_buffer;\n  SpvReflectPrvStringDecoration   semantic;\n  uint32_t                        array_stride;\n  uint32_t                        matrix_stride;\n  uint32_t                        spec_id;\n  SpvBuiltIn                      built_in;\n} SpvReflectPrvDecorations;\n\ntypedef struct SpvReflectPrvNode {\n  uint32_t                        result_id;\n  SpvOp                           op;\n  uint32_t                        result_type_id;\n  uint32_t                        type_id;\n  SpvCapability                   capability;\n  SpvStorageClass                 storage_class;\n  uint32_t                        word_offset;\n  uint32_t                        word_count;\n  bool                            is_type;\n\n  SpvReflectPrvArrayTraits        array_traits;\n  SpvReflectPrvImageTraits        image_traits;\n  uint32_t                        image_type_id;\n\n  const char*                     name;\n  SpvReflectPrvDecorations        decorations;\n  uint32_t                        member_count;\n  const char**                    member_names;\n  SpvReflectPrvDecorations*       member_decorations;\n} SpvReflectPrvNode;\n\ntypedef struct SpvReflectPrvString {\n  uint32_t                        result_id;\n  const char*                     string;\n} SpvReflectPrvString;\n\n// There are a limit set of instructions that can touch an OpVariable,\n// these are represented here with how it was accessed\n// Examples:\n//    OpImageRead  -> OpLoad -> OpVariable\n//    OpImageWrite -> OpLoad -> OpVariable\n//    OpStore      -> OpAccessChain -> OpAccessChain -> OpVariable\n//    OpAtomicIAdd -> OpAccessChain -> OpVariable\n//    OpAtomicLoad -> OpImageTexelPointer -> OpVariable\ntypedef struct SpvReflectPrvAccessedVariable {\n  SpvReflectPrvNode*     p_node;\n  uint32_t               result_id;\n  uint32_t               variable_ptr;\n} SpvReflectPrvAccessedVariable;\n\ntypedef struct SpvReflectPrvFunction {\n  uint32_t                        id;\n  uint32_t                        callee_count;\n  uint32_t*                       callees;\n  struct SpvReflectPrvFunction**  callee_ptrs;\n  uint32_t                        accessed_variable_count;\n  SpvReflectPrvAccessedVariable*  accessed_variables;\n} SpvReflectPrvFunction;\n\ntypedef struct SpvReflectPrvAccessChain {\n  uint32_t                        result_id;\n  uint32_t                        result_type_id;\n  //\n  // Pointing to the base of a composite object.\n  // Generally the id of descriptor block variable\n  uint32_t                        base_id;\n  //\n  // From spec:\n  //   The first index in Indexes will select the\n  //   top-level member/element/component/element\n  //   of the base composite\n  uint32_t                        index_count;\n  uint32_t*                       indexes;\n  //\n  // Block variable ac is pointing to (for block references)\n  SpvReflectBlockVariable*        block_var;\n} SpvReflectPrvAccessChain;\n\n// To prevent infinite recursion, we never walk down a\n// PhysicalStorageBuffer struct twice, but incase a 2nd variable\n// needs to use that struct, save a copy\ntypedef struct SpvReflectPrvPhysicalPointerStruct {\n    uint32_t struct_id;\n    // first variable to see the PhysicalStorageBuffer struct\n    SpvReflectBlockVariable* p_var;\n} SpvReflectPrvPhysicalPointerStruct;\n\ntypedef struct SpvReflectPrvParser {\n  size_t                          spirv_word_count;\n  uint32_t*                       spirv_code;\n  uint32_t                        string_count;\n  SpvReflectPrvString*            strings;\n  SpvSourceLanguage               source_language;\n  uint32_t                        source_language_version;\n  uint32_t                        source_file_id;\n  const char*                     source_embedded;\n  size_t                          node_count;\n  SpvReflectPrvNode*              nodes;\n  uint32_t                        entry_point_count;\n  uint32_t                        capability_count;\n  uint32_t                        function_count;\n  SpvReflectPrvFunction*          functions;\n  uint32_t                        access_chain_count;\n  SpvReflectPrvAccessChain*       access_chains;\n\n  uint32_t                        type_count;\n  uint32_t                        descriptor_count;\n  uint32_t                        push_constant_count;\n\n  SpvReflectTypeDescription*      physical_pointer_check[MAX_RECURSIVE_PHYSICAL_POINTER_CHECK];\n  uint32_t                        physical_pointer_count;\n\n  SpvReflectPrvPhysicalPointerStruct* physical_pointer_structs;\n  uint32_t                            physical_pointer_struct_count;\n} SpvReflectPrvParser;\n// clang-format on\n\nstatic uint32_t Max(uint32_t a, uint32_t b) { return a > b ? a : b; }\nstatic uint32_t Min(uint32_t a, uint32_t b) { return a < b ? a : b; }\n\nstatic uint32_t RoundUp(uint32_t value, uint32_t multiple) {\n  assert(multiple && ((multiple & (multiple - 1)) == 0));\n  return (value + multiple - 1) & ~(multiple - 1);\n}\n\n#define IsNull(ptr) (ptr == NULL)\n\n#define IsNotNull(ptr) (ptr != NULL)\n\n#define SafeFree(ptr) \\\n  {                   \\\n    free((void*)ptr); \\\n    ptr = NULL;       \\\n  }\n\nstatic int SortCompareUint32(const void* a, const void* b) {\n  const uint32_t* p_a = (const uint32_t*)a;\n  const uint32_t* p_b = (const uint32_t*)b;\n\n  return (int)*p_a - (int)*p_b;\n}\n\nstatic int SortCompareAccessedVariable(const void* a, const void* b) {\n  const SpvReflectPrvAccessedVariable* p_a = (const SpvReflectPrvAccessedVariable*)a;\n  const SpvReflectPrvAccessedVariable* p_b = (const SpvReflectPrvAccessedVariable*)b;\n\n  return (int)p_a->variable_ptr - (int)p_b->variable_ptr;\n}\n\n//\n// De-duplicates a sorted array and returns the new size.\n//\n// Note: The array doesn't actually need to be sorted, just\n// arranged into \"runs\" so that all the entries with one\n// value are adjacent.\n//\nstatic size_t DedupSortedUint32(uint32_t* arr, size_t size) {\n  if (size == 0) {\n    return 0;\n  }\n  size_t dedup_idx = 0;\n  for (size_t i = 0; i < size; ++i) {\n    if (arr[dedup_idx] != arr[i]) {\n      ++dedup_idx;\n      arr[dedup_idx] = arr[i];\n    }\n  }\n  return dedup_idx + 1;\n}\n\nstatic bool SearchSortedUint32(const uint32_t* arr, size_t size, uint32_t target) {\n  size_t lo = 0;\n  size_t hi = size;\n  while (lo < hi) {\n    size_t mid = (hi - lo) / 2 + lo;\n    if (arr[mid] == target) {\n      return true;\n    } else if (arr[mid] < target) {\n      lo = mid + 1;\n    } else {\n      hi = mid;\n    }\n  }\n  return false;\n}\n\nstatic SpvReflectResult IntersectSortedAccessedVariable(const SpvReflectPrvAccessedVariable* p_arr0, size_t arr0_size,\n                                                        const uint32_t* p_arr1, size_t arr1_size, uint32_t** pp_res,\n                                                        size_t* res_size) {\n  *pp_res = NULL;\n  *res_size = 0;\n  if (IsNull(p_arr0) || IsNull(p_arr1)) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  const SpvReflectPrvAccessedVariable* p_arr0_end = p_arr0 + arr0_size;\n  const uint32_t* p_arr1_end = p_arr1 + arr1_size;\n\n  const SpvReflectPrvAccessedVariable* p_idx0 = p_arr0;\n  const uint32_t* p_idx1 = p_arr1;\n  while (p_idx0 != p_arr0_end && p_idx1 != p_arr1_end) {\n    if (p_idx0->variable_ptr < *p_idx1) {\n      ++p_idx0;\n    } else if (p_idx0->variable_ptr > *p_idx1) {\n      ++p_idx1;\n    } else {\n      ++*res_size;\n      ++p_idx0;\n      ++p_idx1;\n    }\n  }\n\n  if (*res_size > 0) {\n    *pp_res = (uint32_t*)calloc(*res_size, sizeof(**pp_res));\n    if (IsNull(*pp_res)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n    uint32_t* p_idxr = *pp_res;\n    p_idx0 = p_arr0;\n    p_idx1 = p_arr1;\n    while (p_idx0 != p_arr0_end && p_idx1 != p_arr1_end) {\n      if (p_idx0->variable_ptr < *p_idx1) {\n        ++p_idx0;\n      } else if (p_idx0->variable_ptr > *p_idx1) {\n        ++p_idx1;\n      } else {\n        *(p_idxr++) = p_idx0->variable_ptr;\n        ++p_idx0;\n        ++p_idx1;\n      }\n    }\n  }\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic bool InRange(const SpvReflectPrvParser* p_parser, uint32_t index) {\n  bool in_range = false;\n  if (IsNotNull(p_parser)) {\n    in_range = (index < p_parser->spirv_word_count);\n  }\n  return in_range;\n}\n\nstatic SpvReflectResult ReadU32(SpvReflectPrvParser* p_parser, uint32_t word_offset, uint32_t* p_value) {\n  assert(IsNotNull(p_parser));\n  assert(IsNotNull(p_parser->spirv_code));\n  assert(InRange(p_parser, word_offset));\n  SpvReflectResult result = SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_EOF;\n  if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && InRange(p_parser, word_offset)) {\n    *p_value = *(p_parser->spirv_code + word_offset);\n    result = SPV_REFLECT_RESULT_SUCCESS;\n  }\n  return result;\n}\n\n#define UNCHECKED_READU32(parser, word_offset, value) \\\n  { (void)ReadU32(parser, word_offset, (uint32_t*)&(value)); }\n\n#define CHECKED_READU32(parser, word_offset, value)                                              \\\n  {                                                                                              \\\n    SpvReflectResult checked_readu32_result = ReadU32(parser, word_offset, (uint32_t*)&(value)); \\\n    if (checked_readu32_result != SPV_REFLECT_RESULT_SUCCESS) {                                  \\\n      return checked_readu32_result;                                                             \\\n    }                                                                                            \\\n  }\n\n#define CHECKED_READU32_CAST(parser, word_offset, cast_to_type, value)                                                   \\\n  {                                                                                                                      \\\n    uint32_t checked_readu32_cast_u32 = UINT32_MAX;                                                                      \\\n    SpvReflectResult checked_readu32_cast_result = ReadU32(parser, word_offset, (uint32_t*)&(checked_readu32_cast_u32)); \\\n    if (checked_readu32_cast_result != SPV_REFLECT_RESULT_SUCCESS) {                                                     \\\n      return checked_readu32_cast_result;                                                                                \\\n    }                                                                                                                    \\\n    value = (cast_to_type)checked_readu32_cast_u32;                                                                      \\\n  }\n\n#define IF_READU32(result, parser, word_offset, value)          \\\n  if ((result) == SPV_REFLECT_RESULT_SUCCESS) {                 \\\n    result = ReadU32(parser, word_offset, (uint32_t*)&(value)); \\\n  }\n\n#define IF_READU32_CAST(result, parser, word_offset, cast_to_type, value) \\\n  if ((result) == SPV_REFLECT_RESULT_SUCCESS) {                           \\\n    uint32_t if_readu32_cast_u32 = UINT32_MAX;                            \\\n    result = ReadU32(parser, word_offset, &if_readu32_cast_u32);          \\\n    if ((result) == SPV_REFLECT_RESULT_SUCCESS) {                         \\\n      value = (cast_to_type)if_readu32_cast_u32;                          \\\n    }                                                                     \\\n  }\n\nstatic SpvReflectResult ReadStr(SpvReflectPrvParser* p_parser, uint32_t word_offset, uint32_t word_index, uint32_t word_count,\n                                uint32_t* p_buf_size, char* p_buf) {\n  uint32_t limit = (word_offset + word_count);\n  assert(IsNotNull(p_parser));\n  assert(IsNotNull(p_parser->spirv_code));\n  assert(InRange(p_parser, limit));\n  SpvReflectResult result = SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_EOF;\n  if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && InRange(p_parser, limit)) {\n    const char* c_str = (const char*)(p_parser->spirv_code + word_offset + word_index);\n    uint32_t n = word_count * SPIRV_WORD_SIZE;\n    uint32_t length_with_terminator = 0;\n    for (uint32_t i = 0; i < n; ++i) {\n      char c = *(c_str + i);\n      if (c == 0) {\n        length_with_terminator = i + 1;\n        break;\n      }\n    }\n\n    if (length_with_terminator > 0) {\n      result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n      if (IsNotNull(p_buf_size) && IsNotNull(p_buf)) {\n        result = SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED;\n        if (length_with_terminator <= *p_buf_size) {\n          memset(p_buf, 0, *p_buf_size);\n          memcpy(p_buf, c_str, length_with_terminator);\n          result = SPV_REFLECT_RESULT_SUCCESS;\n        }\n      } else {\n        if (IsNotNull(p_buf_size)) {\n          *p_buf_size = length_with_terminator;\n          result = SPV_REFLECT_RESULT_SUCCESS;\n        }\n      }\n    }\n  }\n  return result;\n}\n\nstatic SpvReflectDecorationFlags ApplyDecorations(const SpvReflectPrvDecorations* p_decoration_fields) {\n  SpvReflectDecorationFlags decorations = SPV_REFLECT_DECORATION_NONE;\n  if (p_decoration_fields->is_relaxed_precision) {\n    decorations |= SPV_REFLECT_DECORATION_RELAXED_PRECISION;\n  }\n  if (p_decoration_fields->is_block) {\n    decorations |= SPV_REFLECT_DECORATION_BLOCK;\n  }\n  if (p_decoration_fields->is_buffer_block) {\n    decorations |= SPV_REFLECT_DECORATION_BUFFER_BLOCK;\n  }\n  if (p_decoration_fields->is_row_major) {\n    decorations |= SPV_REFLECT_DECORATION_ROW_MAJOR;\n  }\n  if (p_decoration_fields->is_column_major) {\n    decorations |= SPV_REFLECT_DECORATION_COLUMN_MAJOR;\n  }\n  if (p_decoration_fields->is_built_in) {\n    decorations |= SPV_REFLECT_DECORATION_BUILT_IN;\n  }\n  if (p_decoration_fields->is_noperspective) {\n    decorations |= SPV_REFLECT_DECORATION_NOPERSPECTIVE;\n  }\n  if (p_decoration_fields->is_flat) {\n    decorations |= SPV_REFLECT_DECORATION_FLAT;\n  }\n  if (p_decoration_fields->is_non_writable) {\n    decorations |= SPV_REFLECT_DECORATION_NON_WRITABLE;\n  }\n  if (p_decoration_fields->is_non_readable) {\n    decorations |= SPV_REFLECT_DECORATION_NON_READABLE;\n  }\n  if (p_decoration_fields->is_patch) {\n    decorations |= SPV_REFLECT_DECORATION_PATCH;\n  }\n  if (p_decoration_fields->is_per_vertex) {\n    decorations |= SPV_REFLECT_DECORATION_PER_VERTEX;\n  }\n  if (p_decoration_fields->is_per_task) {\n    decorations |= SPV_REFLECT_DECORATION_PER_TASK;\n  }\n  if (p_decoration_fields->is_weight_texture) {\n    decorations |= SPV_REFLECT_DECORATION_WEIGHT_TEXTURE;\n  }\n  if (p_decoration_fields->is_block_match_texture) {\n    decorations |= SPV_REFLECT_DECORATION_BLOCK_MATCH_TEXTURE;\n  }\n  return decorations;\n}\n\nstatic void ApplyNumericTraits(const SpvReflectTypeDescription* p_type, SpvReflectNumericTraits* p_numeric_traits) {\n  memcpy(p_numeric_traits, &p_type->traits.numeric, sizeof(p_type->traits.numeric));\n}\n\nstatic void ApplyArrayTraits(const SpvReflectTypeDescription* p_type, SpvReflectArrayTraits* p_array_traits) {\n  memcpy(p_array_traits, &p_type->traits.array, sizeof(p_type->traits.array));\n}\n\nstatic bool IsSpecConstant(const SpvReflectPrvNode* p_node) {\n  return (p_node->op == SpvOpSpecConstant || p_node->op == SpvOpSpecConstantOp || p_node->op == SpvOpSpecConstantTrue ||\n          p_node->op == SpvOpSpecConstantFalse);\n}\n\nstatic SpvReflectPrvNode* FindNode(SpvReflectPrvParser* p_parser, uint32_t result_id) {\n  SpvReflectPrvNode* p_node = NULL;\n  for (size_t i = 0; i < p_parser->node_count; ++i) {\n    SpvReflectPrvNode* p_elem = &(p_parser->nodes[i]);\n    if (p_elem->result_id == result_id) {\n      p_node = p_elem;\n      break;\n    }\n  }\n  return p_node;\n}\n\nstatic SpvReflectTypeDescription* FindType(SpvReflectShaderModule* p_module, uint32_t type_id) {\n  SpvReflectTypeDescription* p_type = NULL;\n  for (size_t i = 0; i < p_module->_internal->type_description_count; ++i) {\n    SpvReflectTypeDescription* p_elem = &(p_module->_internal->type_descriptions[i]);\n    if (p_elem->id == type_id) {\n      p_type = p_elem;\n      break;\n    }\n  }\n  return p_type;\n}\n\nstatic SpvReflectPrvAccessChain* FindAccessChain(SpvReflectPrvParser* p_parser, uint32_t id) {\n  uint32_t ac_cnt = p_parser->access_chain_count;\n  for (uint32_t i = 0; i < ac_cnt; i++) {\n    if (p_parser->access_chains[i].result_id == id) {\n      return &p_parser->access_chains[i];\n    }\n  }\n  return 0;\n}\n\nstatic uint32_t FindBaseId(SpvReflectPrvParser* p_parser, SpvReflectPrvAccessChain* ac) {\n  uint32_t base_id = ac->base_id;\n  SpvReflectPrvNode* base_node = FindNode(p_parser, base_id);\n  // TODO - This is just a band-aid to fix crashes.\n  // Need to understand why here and hopefully remove\n  // https://github.com/KhronosGroup/SPIRV-Reflect/pull/206\n  if (IsNull(base_node)) {\n    return 0;\n  }\n  while (base_node->op != SpvOpVariable) {\n    switch (base_node->op) {\n      case SpvOpLoad: {\n        UNCHECKED_READU32(p_parser, base_node->word_offset + 3, base_id);\n      } break;\n      case SpvOpFunctionParameter: {\n        UNCHECKED_READU32(p_parser, base_node->word_offset + 2, base_id);\n      } break;\n      default: {\n        assert(false);\n      } break;\n    }\n\n    SpvReflectPrvAccessChain* base_ac = FindAccessChain(p_parser, base_id);\n    if (base_ac == 0) {\n      return 0;\n    }\n    base_id = base_ac->base_id;\n    base_node = FindNode(p_parser, base_id);\n    if (IsNull(base_node)) {\n      return 0;\n    }\n  }\n  return base_id;\n}\n\nstatic SpvReflectBlockVariable* GetRefBlkVar(SpvReflectPrvParser* p_parser, SpvReflectPrvAccessChain* ac) {\n  uint32_t base_id = ac->base_id;\n  SpvReflectPrvNode* base_node = FindNode(p_parser, base_id);\n  assert(base_node->op == SpvOpLoad);\n  UNCHECKED_READU32(p_parser, base_node->word_offset + 3, base_id);\n  SpvReflectPrvAccessChain* base_ac = FindAccessChain(p_parser, base_id);\n  assert(base_ac != 0);\n  SpvReflectBlockVariable* base_var = base_ac->block_var;\n  assert(base_var != 0);\n  return base_var;\n}\n\nbool IsPointerToPointer(SpvReflectPrvParser* p_parser, uint32_t type_id) {\n  SpvReflectPrvNode* ptr_node = FindNode(p_parser, type_id);\n  if (IsNull(ptr_node) || (ptr_node->op != SpvOpTypePointer)) {\n    return false;\n  }\n  uint32_t pte_id = 0;\n  UNCHECKED_READU32(p_parser, ptr_node->word_offset + 3, pte_id);\n  SpvReflectPrvNode* pte_node = FindNode(p_parser, pte_id);\n  if (IsNull(pte_node)) {\n    return false;\n  }\n  return pte_node->op == SpvOpTypePointer;\n}\n\nstatic SpvReflectResult CreateParser(size_t size, void* p_code, SpvReflectPrvParser* p_parser) {\n  if (p_code == NULL) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  if (size < SPIRV_MINIMUM_FILE_SIZE) {\n    return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_CODE_SIZE;\n  }\n  if ((size % 4) != 0) {\n    return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_CODE_SIZE;\n  }\n\n  p_parser->spirv_word_count = size / SPIRV_WORD_SIZE;\n  p_parser->spirv_code = (uint32_t*)p_code;\n\n  if (p_parser->spirv_code[0] != SpvMagicNumber) {\n    return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_MAGIC_NUMBER;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic void DestroyParser(SpvReflectPrvParser* p_parser) {\n  if (!IsNull(p_parser->nodes)) {\n    // Free nodes\n    for (size_t i = 0; i < p_parser->node_count; ++i) {\n      SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n      if (IsNotNull(p_node->member_names)) {\n        SafeFree(p_node->member_names);\n      }\n      if (IsNotNull(p_node->member_decorations)) {\n        SafeFree(p_node->member_decorations);\n      }\n    }\n\n    // Free functions\n    for (size_t i = 0; i < p_parser->function_count; ++i) {\n      SafeFree(p_parser->functions[i].callees);\n      SafeFree(p_parser->functions[i].callee_ptrs);\n      SafeFree(p_parser->functions[i].accessed_variables);\n    }\n\n    // Free access chains\n    for (uint32_t i = 0; i < p_parser->access_chain_count; ++i) {\n      SafeFree(p_parser->access_chains[i].indexes);\n    }\n\n    SafeFree(p_parser->nodes);\n    SafeFree(p_parser->strings);\n    SafeFree(p_parser->source_embedded);\n    SafeFree(p_parser->functions);\n    SafeFree(p_parser->access_chains);\n\n    if (IsNotNull(p_parser->physical_pointer_structs)) {\n      SafeFree(p_parser->physical_pointer_structs);\n    }\n    p_parser->node_count = 0;\n  }\n}\n\nstatic SpvReflectResult ParseNodes(SpvReflectPrvParser* p_parser) {\n  assert(IsNotNull(p_parser));\n  assert(IsNotNull(p_parser->spirv_code));\n\n  uint32_t* p_spirv = p_parser->spirv_code;\n  uint32_t spirv_word_index = SPIRV_STARTING_WORD_INDEX;\n\n  // Count nodes\n  uint32_t node_count = 0;\n  while (spirv_word_index < p_parser->spirv_word_count) {\n    uint32_t word = p_spirv[spirv_word_index];\n    SpvOp op = (SpvOp)(word & 0xFFFF);\n    uint32_t node_word_count = (word >> 16) & 0xFFFF;\n    if (node_word_count == 0) {\n      return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_INSTRUCTION;\n    }\n    if (op == SpvOpAccessChain) {\n      ++(p_parser->access_chain_count);\n    }\n    spirv_word_index += node_word_count;\n    ++node_count;\n  }\n\n  if (node_count == 0) {\n    return SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_EOF;\n  }\n\n  // Allocate nodes\n  p_parser->node_count = node_count;\n  p_parser->nodes = (SpvReflectPrvNode*)calloc(p_parser->node_count, sizeof(*(p_parser->nodes)));\n  if (IsNull(p_parser->nodes)) {\n    return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n  }\n  // Mark all nodes with an invalid state\n  for (uint32_t i = 0; i < node_count; ++i) {\n    p_parser->nodes[i].op = (SpvOp)INVALID_VALUE;\n    p_parser->nodes[i].storage_class = (SpvStorageClass)INVALID_VALUE;\n    p_parser->nodes[i].decorations.set.value = (uint32_t)INVALID_VALUE;\n    p_parser->nodes[i].decorations.binding.value = (uint32_t)INVALID_VALUE;\n    p_parser->nodes[i].decorations.location.value = (uint32_t)INVALID_VALUE;\n    p_parser->nodes[i].decorations.component.value = (uint32_t)INVALID_VALUE;\n    p_parser->nodes[i].decorations.offset.value = (uint32_t)INVALID_VALUE;\n    p_parser->nodes[i].decorations.uav_counter_buffer.value = (uint32_t)INVALID_VALUE;\n    p_parser->nodes[i].decorations.spec_id = (uint32_t)INVALID_VALUE;\n    p_parser->nodes[i].decorations.built_in = (SpvBuiltIn)INVALID_VALUE;\n  }\n  // Mark source file id node\n  p_parser->source_file_id = (uint32_t)INVALID_VALUE;\n  p_parser->source_embedded = NULL;\n\n  // Function node\n  uint32_t function_node = (uint32_t)INVALID_VALUE;\n\n  // Allocate access chain\n  if (p_parser->access_chain_count > 0) {\n    p_parser->access_chains = (SpvReflectPrvAccessChain*)calloc(p_parser->access_chain_count, sizeof(*(p_parser->access_chains)));\n    if (IsNull(p_parser->access_chains)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n  }\n\n  // Parse nodes\n  uint32_t node_index = 0;\n  uint32_t access_chain_index = 0;\n  spirv_word_index = SPIRV_STARTING_WORD_INDEX;\n  while (spirv_word_index < p_parser->spirv_word_count) {\n    uint32_t word = p_spirv[spirv_word_index];\n    SpvOp op = (SpvOp)(word & 0xFFFF);\n    uint32_t node_word_count = (word >> 16) & 0xFFFF;\n\n    SpvReflectPrvNode* p_node = &(p_parser->nodes[node_index]);\n    p_node->op = op;\n    p_node->word_offset = spirv_word_index;\n    p_node->word_count = node_word_count;\n\n    switch (p_node->op) {\n      default:\n        break;\n\n      case SpvOpString: {\n        ++(p_parser->string_count);\n      } break;\n\n      case SpvOpSource: {\n        CHECKED_READU32_CAST(p_parser, p_node->word_offset + 1, SpvSourceLanguage, p_parser->source_language);\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_parser->source_language_version);\n        if (p_node->word_count >= 4) {\n          CHECKED_READU32(p_parser, p_node->word_offset + 3, p_parser->source_file_id);\n        }\n        if (p_node->word_count >= 5) {\n          const char* p_source = (const char*)(p_parser->spirv_code + p_node->word_offset + 4);\n\n          const size_t source_len = strlen(p_source);\n          char* p_source_temp = (char*)calloc(source_len + 1, sizeof(char));\n\n          if (IsNull(p_source_temp)) {\n            return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n          }\n\n#ifdef _WIN32\n          strcpy_s(p_source_temp, source_len + 1, p_source);\n#else\n          strcpy(p_source_temp, p_source);\n#endif\n\n          SafeFree(p_parser->source_embedded);\n          p_parser->source_embedded = p_source_temp;\n        }\n      } break;\n\n      case SpvOpSourceContinued: {\n        const char* p_source = (const char*)(p_parser->spirv_code + p_node->word_offset + 1);\n\n        const size_t source_len = strlen(p_source);\n        const size_t embedded_source_len = strlen(p_parser->source_embedded);\n        char* p_continued_source = (char*)calloc(source_len + embedded_source_len + 1, sizeof(char));\n\n        if (IsNull(p_continued_source)) {\n          return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n        }\n\n#ifdef _WIN32\n        strcpy_s(p_continued_source, embedded_source_len + 1, p_parser->source_embedded);\n        strcat_s(p_continued_source, embedded_source_len + source_len + 1, p_source);\n#else\n        strcpy(p_continued_source, p_parser->source_embedded);\n        strcat(p_continued_source, p_source);\n#endif\n\n        SafeFree(p_parser->source_embedded);\n        p_parser->source_embedded = p_continued_source;\n      } break;\n\n      case SpvOpEntryPoint: {\n        ++(p_parser->entry_point_count);\n      } break;\n\n      case SpvOpCapability: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->capability);\n        ++(p_parser->capability_count);\n      } break;\n\n      case SpvOpName:\n      case SpvOpMemberName: {\n        uint32_t member_offset = (p_node->op == SpvOpMemberName) ? 1 : 0;\n        uint32_t name_start = p_node->word_offset + member_offset + 2;\n        p_node->name = (const char*)(p_parser->spirv_code + name_start);\n      } break;\n\n      case SpvOpTypeStruct: {\n        p_node->member_count = p_node->word_count - 2;\n        FALLTHROUGH;\n      }  // Fall through\n\n      // This is all the rest of OpType* that need to be tracked\n      // Possible new extensions might expose new type, will need to be added\n      // here\n      case SpvOpTypeVoid:\n      case SpvOpTypeBool:\n      case SpvOpTypeInt:\n      case SpvOpTypeFloat:\n      case SpvOpTypeVector:\n      case SpvOpTypeMatrix:\n      case SpvOpTypeSampler:\n      case SpvOpTypeOpaque:\n      case SpvOpTypeFunction:\n      case SpvOpTypeEvent:\n      case SpvOpTypeDeviceEvent:\n      case SpvOpTypeReserveId:\n      case SpvOpTypeQueue:\n      case SpvOpTypePipe:\n      case SpvOpTypeAccelerationStructureKHR:\n      case SpvOpTypeRayQueryKHR:\n      case SpvOpTypeHitObjectNV:\n      case SpvOpTypeCooperativeMatrixNV:\n      case SpvOpTypeCooperativeMatrixKHR: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id);\n        p_node->is_type = true;\n      } break;\n\n      case SpvOpTypeImage: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->image_traits.sampled_type_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 3, p_node->image_traits.dim);\n        CHECKED_READU32(p_parser, p_node->word_offset + 4, p_node->image_traits.depth);\n        CHECKED_READU32(p_parser, p_node->word_offset + 5, p_node->image_traits.arrayed);\n        CHECKED_READU32(p_parser, p_node->word_offset + 6, p_node->image_traits.ms);\n        CHECKED_READU32(p_parser, p_node->word_offset + 7, p_node->image_traits.sampled);\n        CHECKED_READU32(p_parser, p_node->word_offset + 8, p_node->image_traits.image_format);\n        p_node->is_type = true;\n      } break;\n\n      case SpvOpTypeSampledImage: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->image_type_id);\n        p_node->is_type = true;\n      } break;\n\n      case SpvOpTypeArray: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->array_traits.element_type_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 3, p_node->array_traits.length_id);\n        p_node->is_type = true;\n      } break;\n\n      case SpvOpTypeRuntimeArray: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->array_traits.element_type_id);\n        p_node->is_type = true;\n      } break;\n\n      case SpvOpTypePointer: {\n        uint32_t result_id;\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, result_id);\n        // Look for forward pointer. Clear result id if found\n        SpvReflectPrvNode* p_fwd_node = FindNode(p_parser, result_id);\n        if (p_fwd_node) {\n          p_fwd_node->result_id = 0;\n        }\n        // Register pointer type\n        p_node->result_id = result_id;\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->storage_class);\n        CHECKED_READU32(p_parser, p_node->word_offset + 3, p_node->type_id);\n        p_node->is_type = true;\n      } break;\n\n      case SpvOpTypeForwardPointer: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->storage_class);\n        p_node->is_type = true;\n      } break;\n\n      case SpvOpConstantTrue:\n      case SpvOpConstantFalse:\n      case SpvOpConstant:\n      case SpvOpConstantComposite:\n      case SpvOpConstantSampler:\n      case SpvOpConstantNull: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_type_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id);\n      } break;\n\n      case SpvOpSpecConstantTrue:\n      case SpvOpSpecConstantFalse:\n      case SpvOpSpecConstant:\n      case SpvOpSpecConstantComposite:\n      case SpvOpSpecConstantOp: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_type_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id);\n      } break;\n\n      case SpvOpVariable: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->type_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 3, p_node->storage_class);\n      } break;\n\n      case SpvOpLoad: {\n        // Only load enough so OpDecorate can reference the node, skip the remaining operands.\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_type_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id);\n      } break;\n\n      case SpvOpAccessChain: {\n        SpvReflectPrvAccessChain* p_access_chain = &(p_parser->access_chains[access_chain_index]);\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, p_access_chain->result_type_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_access_chain->result_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 3, p_access_chain->base_id);\n        //\n        // SPIRV_ACCESS_CHAIN_INDEX_OFFSET (4) is the number of words up until the first index:\n        //   [Node, Result Type Id, Result Id, Base Id, <Indexes>]\n        //\n        p_access_chain->index_count = (node_word_count - SPIRV_ACCESS_CHAIN_INDEX_OFFSET);\n        if (p_access_chain->index_count > 0) {\n          p_access_chain->indexes = (uint32_t*)calloc(p_access_chain->index_count, sizeof(*(p_access_chain->indexes)));\n          if (IsNull(p_access_chain->indexes)) {\n            return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n          }\n          // Parse any index values for access chain\n          for (uint32_t index_index = 0; index_index < p_access_chain->index_count; ++index_index) {\n            // Read index id\n            uint32_t index_id = 0;\n            CHECKED_READU32(p_parser, p_node->word_offset + SPIRV_ACCESS_CHAIN_INDEX_OFFSET + index_index, index_id);\n            // Find OpConstant node that contains index value\n            SpvReflectPrvNode* p_index_value_node = FindNode(p_parser, index_id);\n            if ((p_index_value_node != NULL) &&\n                (p_index_value_node->op == SpvOpConstant || p_index_value_node->op == SpvOpSpecConstant)) {\n              // Read index value\n              uint32_t index_value = UINT32_MAX;\n              CHECKED_READU32(p_parser, p_index_value_node->word_offset + 3, index_value);\n              assert(index_value != UINT32_MAX);\n              // Write index value to array\n              p_access_chain->indexes[index_index] = index_value;\n            }\n          }\n        }\n        ++access_chain_index;\n      } break;\n\n      case SpvOpFunction: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id);\n        // Count function definitions, not function declarations.  To determine\n        // the difference, set an in-function variable, and then if an OpLabel\n        // is reached before the end of the function increment the function\n        // count.\n        function_node = node_index;\n      } break;\n\n      case SpvOpLabel: {\n        if (function_node != (uint32_t)INVALID_VALUE) {\n          SpvReflectPrvNode* p_func_node = &(p_parser->nodes[function_node]);\n          CHECKED_READU32(p_parser, p_func_node->word_offset + 2, p_func_node->result_id);\n          ++(p_parser->function_count);\n        }\n        FALLTHROUGH;\n      }  // Fall through\n\n      case SpvOpFunctionEnd: {\n        function_node = (uint32_t)INVALID_VALUE;\n      } break;\n      case SpvOpFunctionParameter: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id);\n      } break;\n      case SpvOpBitcast:\n      case SpvOpShiftRightLogical:\n      case SpvOpIAdd:\n      case SpvOpISub:\n      case SpvOpIMul:\n      case SpvOpUDiv:\n      case SpvOpSDiv: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id);\n      } break;\n    }\n\n    if (p_node->is_type) {\n      ++(p_parser->type_count);\n    }\n\n    spirv_word_index += node_word_count;\n    ++node_index;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseStrings(SpvReflectPrvParser* p_parser) {\n  assert(IsNotNull(p_parser));\n  assert(IsNotNull(p_parser->spirv_code));\n  assert(IsNotNull(p_parser->nodes));\n\n  // Early out\n  if (p_parser->string_count == 0) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) {\n    // Allocate string storage\n    p_parser->strings = (SpvReflectPrvString*)calloc(p_parser->string_count, sizeof(*(p_parser->strings)));\n\n    uint32_t string_index = 0;\n    for (size_t i = 0; i < p_parser->node_count; ++i) {\n      SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n      if (p_node->op != SpvOpString) {\n        continue;\n      }\n\n      // Paranoid check against string count\n      assert(string_index < p_parser->string_count);\n      if (string_index >= p_parser->string_count) {\n        return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n      }\n\n      // Result id\n      SpvReflectPrvString* p_string = &(p_parser->strings[string_index]);\n      CHECKED_READU32(p_parser, p_node->word_offset + 1, p_string->result_id);\n\n      // String\n      uint32_t string_start = p_node->word_offset + 2;\n      p_string->string = (const char*)(p_parser->spirv_code + string_start);\n\n      // Increment string index\n      ++string_index;\n    }\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseSource(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) {\n  assert(IsNotNull(p_parser));\n  assert(IsNotNull(p_parser->spirv_code));\n\n  if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code)) {\n    // Source file\n    if (IsNotNull(p_parser->strings)) {\n      for (uint32_t i = 0; i < p_parser->string_count; ++i) {\n        SpvReflectPrvString* p_string = &(p_parser->strings[i]);\n        if (p_string->result_id == p_parser->source_file_id) {\n          p_module->source_file = p_string->string;\n          break;\n        }\n      }\n    }\n\n    // Source code\n    if (IsNotNull(p_parser->source_embedded)) {\n      const size_t source_len = strlen(p_parser->source_embedded);\n      char* p_source = (char*)calloc(source_len + 1, sizeof(char));\n\n      if (IsNull(p_source)) {\n        return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n      }\n\n#ifdef _WIN32\n      strcpy_s(p_source, source_len + 1, p_parser->source_embedded);\n#else\n      strcpy(p_source, p_parser->source_embedded);\n#endif\n\n      p_module->source_source = p_source;\n    }\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseFunction(SpvReflectPrvParser* p_parser, SpvReflectPrvNode* p_func_node, SpvReflectPrvFunction* p_func,\n                                      size_t first_label_index) {\n  p_func->id = p_func_node->result_id;\n\n  p_func->callee_count = 0;\n  p_func->accessed_variable_count = 0;\n\n  // First get count to know how much to allocate\n  for (size_t i = first_label_index; i < p_parser->node_count; ++i) {\n    SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n    if (p_node->op == SpvOpFunctionEnd) {\n      break;\n    }\n    switch (p_node->op) {\n      case SpvOpFunctionCall: {\n        ++(p_func->callee_count);\n      } break;\n      case SpvOpLoad:\n      case SpvOpAccessChain:\n      case SpvOpInBoundsAccessChain:\n      case SpvOpPtrAccessChain:\n      case SpvOpArrayLength:\n      case SpvOpGenericPtrMemSemantics:\n      case SpvOpInBoundsPtrAccessChain:\n      case SpvOpStore:\n      case SpvOpImageTexelPointer: {\n        ++(p_func->accessed_variable_count);\n      } break;\n      case SpvOpCopyMemory:\n      case SpvOpCopyMemorySized: {\n        p_func->accessed_variable_count += 2;\n      } break;\n      default:\n        break;\n    }\n  }\n\n  if (p_func->callee_count > 0) {\n    p_func->callees = (uint32_t*)calloc(p_func->callee_count, sizeof(*(p_func->callees)));\n    if (IsNull(p_func->callees)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n  }\n\n  if (p_func->accessed_variable_count > 0) {\n    p_func->accessed_variables =\n        (SpvReflectPrvAccessedVariable*)calloc(p_func->accessed_variable_count, sizeof(*(p_func->accessed_variables)));\n    if (IsNull(p_func->accessed_variables)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n  }\n\n  p_func->callee_count = 0;\n  p_func->accessed_variable_count = 0;\n  // Now have allocation, fill in values\n  for (size_t i = first_label_index; i < p_parser->node_count; ++i) {\n    SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n    if (p_node->op == SpvOpFunctionEnd) {\n      break;\n    }\n    switch (p_node->op) {\n      case SpvOpFunctionCall: {\n        CHECKED_READU32(p_parser, p_node->word_offset + 3, p_func->callees[p_func->callee_count]);\n        (++p_func->callee_count);\n      } break;\n      case SpvOpLoad:\n      case SpvOpAccessChain:\n      case SpvOpInBoundsAccessChain:\n      case SpvOpPtrAccessChain:\n      case SpvOpArrayLength:\n      case SpvOpGenericPtrMemSemantics:\n      case SpvOpInBoundsPtrAccessChain:\n      case SpvOpImageTexelPointer: {\n        const uint32_t result_index = p_node->word_offset + 2;\n        const uint32_t ptr_index = p_node->word_offset + 3;\n        SpvReflectPrvAccessedVariable* access_ptr = &p_func->accessed_variables[p_func->accessed_variable_count];\n\n        access_ptr->p_node = p_node;\n        // Need to track Result ID as not sure there has been any memory access through here yet\n        CHECKED_READU32(p_parser, result_index, access_ptr->result_id);\n        CHECKED_READU32(p_parser, ptr_index, access_ptr->variable_ptr);\n        (++p_func->accessed_variable_count);\n      } break;\n      case SpvOpStore: {\n        const uint32_t result_index = p_node->word_offset + 2;\n        CHECKED_READU32(p_parser, result_index, p_func->accessed_variables[p_func->accessed_variable_count].variable_ptr);\n        p_func->accessed_variables[p_func->accessed_variable_count].p_node = p_node;\n        (++p_func->accessed_variable_count);\n      } break;\n      case SpvOpCopyMemory:\n      case SpvOpCopyMemorySized: {\n        // There is no result_id or node, being zero is same as being invalid\n        CHECKED_READU32(p_parser, p_node->word_offset + 1,\n                        p_func->accessed_variables[p_func->accessed_variable_count].variable_ptr);\n        (++p_func->accessed_variable_count);\n        CHECKED_READU32(p_parser, p_node->word_offset + 2,\n                        p_func->accessed_variables[p_func->accessed_variable_count].variable_ptr);\n        (++p_func->accessed_variable_count);\n      } break;\n      default:\n        break;\n    }\n  }\n\n  if (p_func->callee_count > 0) {\n    qsort(p_func->callees, p_func->callee_count, sizeof(*(p_func->callees)), SortCompareUint32);\n  }\n  p_func->callee_count = (uint32_t)DedupSortedUint32(p_func->callees, p_func->callee_count);\n\n  if (p_func->accessed_variable_count > 0) {\n    qsort(p_func->accessed_variables, p_func->accessed_variable_count, sizeof(*(p_func->accessed_variables)),\n          SortCompareAccessedVariable);\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic int SortCompareFunctions(const void* a, const void* b) {\n  const SpvReflectPrvFunction* af = (const SpvReflectPrvFunction*)a;\n  const SpvReflectPrvFunction* bf = (const SpvReflectPrvFunction*)b;\n  return (int)af->id - (int)bf->id;\n}\n\nstatic SpvReflectResult ParseFunctions(SpvReflectPrvParser* p_parser) {\n  assert(IsNotNull(p_parser));\n  assert(IsNotNull(p_parser->spirv_code));\n  assert(IsNotNull(p_parser->nodes));\n\n  if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) {\n    if (p_parser->function_count == 0) {\n      return SPV_REFLECT_RESULT_SUCCESS;\n    }\n\n    p_parser->functions = (SpvReflectPrvFunction*)calloc(p_parser->function_count, sizeof(*(p_parser->functions)));\n    if (IsNull(p_parser->functions)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n\n    size_t function_index = 0;\n    for (size_t i = 0; i < p_parser->node_count; ++i) {\n      SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n      if (p_node->op != SpvOpFunction) {\n        continue;\n      }\n\n      // Skip over function declarations that aren't definitions\n      bool func_definition = false;\n      // Intentionally reuse i to avoid iterating over these nodes more than\n      // once\n      for (; i < p_parser->node_count; ++i) {\n        if (p_parser->nodes[i].op == SpvOpLabel) {\n          func_definition = true;\n          break;\n        }\n        if (p_parser->nodes[i].op == SpvOpFunctionEnd) {\n          break;\n        }\n      }\n      if (!func_definition) {\n        continue;\n      }\n\n      SpvReflectPrvFunction* p_function = &(p_parser->functions[function_index]);\n\n      SpvReflectResult result = ParseFunction(p_parser, p_node, p_function, i);\n      if (result != SPV_REFLECT_RESULT_SUCCESS) {\n        return result;\n      }\n\n      ++function_index;\n    }\n\n    qsort(p_parser->functions, p_parser->function_count, sizeof(*(p_parser->functions)), SortCompareFunctions);\n\n    // Once they're sorted, link the functions with pointers to improve graph\n    // traversal efficiency\n    for (size_t i = 0; i < p_parser->function_count; ++i) {\n      SpvReflectPrvFunction* p_func = &(p_parser->functions[i]);\n      if (p_func->callee_count == 0) {\n        continue;\n      }\n      p_func->callee_ptrs = (SpvReflectPrvFunction**)calloc(p_func->callee_count, sizeof(*(p_func->callee_ptrs)));\n      for (size_t j = 0, k = 0; j < p_func->callee_count; ++j) {\n        while (p_parser->functions[k].id != p_func->callees[j]) {\n          ++k;\n          if (k >= p_parser->function_count) {\n            // Invalid called function ID somewhere\n            return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n          }\n        }\n        p_func->callee_ptrs[j] = &(p_parser->functions[k]);\n      }\n    }\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseMemberCounts(SpvReflectPrvParser* p_parser) {\n  assert(IsNotNull(p_parser));\n  assert(IsNotNull(p_parser->spirv_code));\n  assert(IsNotNull(p_parser->nodes));\n\n  if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) {\n    for (size_t i = 0; i < p_parser->node_count; ++i) {\n      SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n      if ((p_node->op != SpvOpMemberName) && (p_node->op != SpvOpMemberDecorate)) {\n        continue;\n      }\n\n      uint32_t target_id = 0;\n      uint32_t member_index = (uint32_t)INVALID_VALUE;\n      CHECKED_READU32(p_parser, p_node->word_offset + 1, target_id);\n      CHECKED_READU32(p_parser, p_node->word_offset + 2, member_index);\n      SpvReflectPrvNode* p_target_node = FindNode(p_parser, target_id);\n      // Not all nodes get parsed, so FindNode returning NULL is expected.\n      if (IsNull(p_target_node)) {\n        continue;\n      }\n\n      if (member_index == INVALID_VALUE) {\n        return SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED;\n      }\n\n      p_target_node->member_count = Max(p_target_node->member_count, member_index + 1);\n    }\n\n    for (uint32_t i = 0; i < p_parser->node_count; ++i) {\n      SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n      if (p_node->member_count == 0) {\n        continue;\n      }\n\n      p_node->member_names = (const char**)calloc(p_node->member_count, sizeof(*(p_node->member_names)));\n      if (IsNull(p_node->member_names)) {\n        return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n      }\n\n      p_node->member_decorations = (SpvReflectPrvDecorations*)calloc(p_node->member_count, sizeof(*(p_node->member_decorations)));\n      if (IsNull(p_node->member_decorations)) {\n        return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n      }\n    }\n  }\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseNames(SpvReflectPrvParser* p_parser) {\n  assert(IsNotNull(p_parser));\n  assert(IsNotNull(p_parser->spirv_code));\n  assert(IsNotNull(p_parser->nodes));\n\n  if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) {\n    for (size_t i = 0; i < p_parser->node_count; ++i) {\n      SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n      if ((p_node->op != SpvOpName) && (p_node->op != SpvOpMemberName)) {\n        continue;\n      }\n\n      uint32_t target_id = 0;\n      CHECKED_READU32(p_parser, p_node->word_offset + 1, target_id);\n      SpvReflectPrvNode* p_target_node = FindNode(p_parser, target_id);\n      // Not all nodes get parsed, so FindNode returning NULL is expected.\n      if (IsNull(p_target_node)) {\n        continue;\n      }\n\n      const char** pp_target_name = &(p_target_node->name);\n      if (p_node->op == SpvOpMemberName) {\n        uint32_t member_index = UINT32_MAX;\n        CHECKED_READU32(p_parser, p_node->word_offset + 2, member_index);\n        pp_target_name = &(p_target_node->member_names[member_index]);\n      }\n\n      *pp_target_name = p_node->name;\n    }\n  }\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\n// Returns true if user_type matches pattern or if user_type begins with pattern and the next character is ':'\n// For example, UserTypeMatches(\"rwbuffer\", \"rwbuffer\") will be true, UserTypeMatches(\"rwbuffer\", \"rwbuffer:<S>\") will be true, and\n// UserTypeMatches(\"rwbuffer\", \"rwbufferfoo\") will be false.\nstatic bool UserTypeMatches(const char* user_type, const char* pattern) {\n  const size_t pattern_length = strlen(pattern);\n  if (strncmp(user_type, pattern, pattern_length) == 0) {\n    if (user_type[pattern_length] == ':' || user_type[pattern_length] == '\\0') {\n      return true;\n    }\n  }\n  return false;\n}\n\nstatic SpvReflectResult ParseDecorations(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) {\n  uint32_t spec_constant_count = 0;\n  for (uint32_t i = 0; i < p_parser->node_count; ++i) {\n    SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n\n    if ((p_node->op != SpvOpDecorate) && (p_node->op != SpvOpMemberDecorate) && (p_node->op != SpvOpDecorateId) &&\n        (p_node->op != SpvOpDecorateString) && (p_node->op != SpvOpMemberDecorateString)) {\n      continue;\n    }\n\n    // Need to adjust the read offset if this is a member decoration\n    uint32_t member_offset = 0;\n    if (p_node->op == SpvOpMemberDecorate) {\n      member_offset = 1;\n    }\n\n    // Get decoration\n    uint32_t decoration = (uint32_t)INVALID_VALUE;\n    CHECKED_READU32(p_parser, p_node->word_offset + member_offset + 2, decoration);\n\n    // Filter out the decoration that do not affect reflection, otherwise\n    // there will be random crashes because the nodes aren't found.\n    bool skip = false;\n    switch (decoration) {\n      default: {\n        skip = true;\n      } break;\n      case SpvDecorationRelaxedPrecision:\n      case SpvDecorationBlock:\n      case SpvDecorationBufferBlock:\n      case SpvDecorationColMajor:\n      case SpvDecorationRowMajor:\n      case SpvDecorationArrayStride:\n      case SpvDecorationMatrixStride:\n      case SpvDecorationBuiltIn:\n      case SpvDecorationNoPerspective:\n      case SpvDecorationFlat:\n      case SpvDecorationNonWritable:\n      case SpvDecorationNonReadable:\n      case SpvDecorationPatch:\n      case SpvDecorationPerVertexKHR:\n      case SpvDecorationPerTaskNV:\n      case SpvDecorationLocation:\n      case SpvDecorationComponent:\n      case SpvDecorationBinding:\n      case SpvDecorationDescriptorSet:\n      case SpvDecorationOffset:\n      case SpvDecorationInputAttachmentIndex:\n      case SpvDecorationSpecId:\n      case SpvDecorationWeightTextureQCOM:\n      case SpvDecorationBlockMatchTextureQCOM:\n      case SpvDecorationUserTypeGOOGLE:\n      case SpvDecorationHlslCounterBufferGOOGLE:\n      case SpvDecorationHlslSemanticGOOGLE: {\n        skip = false;\n      } break;\n    }\n    if (skip) {\n      continue;\n    }\n\n    // Find target node\n    uint32_t target_id = 0;\n    CHECKED_READU32(p_parser, p_node->word_offset + 1, target_id);\n    SpvReflectPrvNode* p_target_node = FindNode(p_parser, target_id);\n    if (IsNull(p_target_node)) {\n      if ((p_node->op == (uint32_t)SpvOpDecorate) && (decoration == SpvDecorationRelaxedPrecision)) {\n        // Many OPs can be decorated that we don't care about. Ignore those.\n        // See https://github.com/KhronosGroup/SPIRV-Reflect/issues/134\n        continue;\n      }\n      return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n    }\n    // Get decorations\n    SpvReflectPrvDecorations* p_target_decorations = &(p_target_node->decorations);\n    // Update pointer if this is a member decoration\n    if (p_node->op == SpvOpMemberDecorate) {\n      uint32_t member_index = (uint32_t)INVALID_VALUE;\n      CHECKED_READU32(p_parser, p_node->word_offset + 2, member_index);\n      p_target_decorations = &(p_target_node->member_decorations[member_index]);\n    }\n\n    switch (decoration) {\n      default:\n        break;\n\n      case SpvDecorationRelaxedPrecision: {\n        p_target_decorations->is_relaxed_precision = true;\n      } break;\n\n      case SpvDecorationBlock: {\n        p_target_decorations->is_block = true;\n      } break;\n\n      case SpvDecorationBufferBlock: {\n        p_target_decorations->is_buffer_block = true;\n      } break;\n\n      case SpvDecorationColMajor: {\n        p_target_decorations->is_column_major = true;\n      } break;\n\n      case SpvDecorationRowMajor: {\n        p_target_decorations->is_row_major = true;\n      } break;\n\n      case SpvDecorationArrayStride: {\n        uint32_t word_offset = p_node->word_offset + member_offset + 3;\n        CHECKED_READU32(p_parser, word_offset, p_target_decorations->array_stride);\n      } break;\n\n      case SpvDecorationMatrixStride: {\n        uint32_t word_offset = p_node->word_offset + member_offset + 3;\n        CHECKED_READU32(p_parser, word_offset, p_target_decorations->matrix_stride);\n      } break;\n\n      case SpvDecorationBuiltIn: {\n        p_target_decorations->is_built_in = true;\n        uint32_t word_offset = p_node->word_offset + member_offset + 3;\n        CHECKED_READU32_CAST(p_parser, word_offset, SpvBuiltIn, p_target_decorations->built_in);\n      } break;\n\n      case SpvDecorationNoPerspective: {\n        p_target_decorations->is_noperspective = true;\n      } break;\n\n      case SpvDecorationFlat: {\n        p_target_decorations->is_flat = true;\n      } break;\n\n      case SpvDecorationNonWritable: {\n        p_target_decorations->is_non_writable = true;\n      } break;\n\n      case SpvDecorationNonReadable: {\n        p_target_decorations->is_non_readable = true;\n      } break;\n\n      case SpvDecorationPatch: {\n        p_target_decorations->is_patch = true;\n      } break;\n\n      case SpvDecorationPerVertexKHR: {\n        p_target_decorations->is_per_vertex = true;\n      } break;\n\n      case SpvDecorationPerTaskNV: {\n        p_target_decorations->is_per_task = true;\n      } break;\n\n      case SpvDecorationLocation: {\n        uint32_t word_offset = p_node->word_offset + member_offset + 3;\n        CHECKED_READU32(p_parser, word_offset, p_target_decorations->location.value);\n        p_target_decorations->location.word_offset = word_offset;\n      } break;\n\n      case SpvDecorationComponent: {\n        uint32_t word_offset = p_node->word_offset + member_offset + 3;\n        CHECKED_READU32(p_parser, word_offset, p_target_decorations->component.value);\n        p_target_decorations->component.word_offset = word_offset;\n      } break;\n\n      case SpvDecorationBinding: {\n        uint32_t word_offset = p_node->word_offset + member_offset + 3;\n        CHECKED_READU32(p_parser, word_offset, p_target_decorations->binding.value);\n        p_target_decorations->binding.word_offset = word_offset;\n      } break;\n\n      case SpvDecorationDescriptorSet: {\n        uint32_t word_offset = p_node->word_offset + member_offset + 3;\n        CHECKED_READU32(p_parser, word_offset, p_target_decorations->set.value);\n        p_target_decorations->set.word_offset = word_offset;\n      } break;\n\n      case SpvDecorationOffset: {\n        uint32_t word_offset = p_node->word_offset + member_offset + 3;\n        CHECKED_READU32(p_parser, word_offset, p_target_decorations->offset.value);\n        p_target_decorations->offset.word_offset = word_offset;\n      } break;\n\n      case SpvDecorationInputAttachmentIndex: {\n        uint32_t word_offset = p_node->word_offset + member_offset + 3;\n        CHECKED_READU32(p_parser, word_offset, p_target_decorations->input_attachment_index.value);\n        p_target_decorations->input_attachment_index.word_offset = word_offset;\n      } break;\n\n      case SpvDecorationSpecId: {\n        spec_constant_count++;\n      } break;\n\n      case SpvDecorationHlslCounterBufferGOOGLE: {\n        uint32_t word_offset = p_node->word_offset + member_offset + 3;\n        CHECKED_READU32(p_parser, word_offset, p_target_decorations->uav_counter_buffer.value);\n        p_target_decorations->uav_counter_buffer.word_offset = word_offset;\n      } break;\n\n      case SpvDecorationHlslSemanticGOOGLE: {\n        uint32_t word_offset = p_node->word_offset + member_offset + 3;\n        p_target_decorations->semantic.value = (const char*)(p_parser->spirv_code + word_offset);\n        p_target_decorations->semantic.word_offset = word_offset;\n      } break;\n\n      case SpvDecorationWeightTextureQCOM: {\n        p_target_decorations->is_weight_texture = true;\n      } break;\n\n      case SpvDecorationBlockMatchTextureQCOM: {\n        p_target_decorations->is_block_match_texture = true;\n      } break;\n    }\n\n    if (p_node->op == SpvOpDecorateString && decoration == SpvDecorationUserTypeGOOGLE) {\n      uint32_t terminator = 0;\n      SpvReflectResult result = ReadStr(p_parser, p_node->word_offset + 3, 0, p_node->word_count, &terminator, NULL);\n      if (result != SPV_REFLECT_RESULT_SUCCESS) {\n        return result;\n      }\n      const char* name = (const char*)(p_parser->spirv_code + p_node->word_offset + 3);\n      if (UserTypeMatches(name, \"cbuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_CBUFFER;\n      } else if (UserTypeMatches(name, \"tbuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TBUFFER;\n      } else if (UserTypeMatches(name, \"appendstructuredbuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_APPEND_STRUCTURED_BUFFER;\n      } else if (UserTypeMatches(name, \"buffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_BUFFER;\n      } else if (UserTypeMatches(name, \"byteaddressbuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_BYTE_ADDRESS_BUFFER;\n      } else if (UserTypeMatches(name, \"constantbuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_CONSTANT_BUFFER;\n      } else if (UserTypeMatches(name, \"consumestructuredbuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_CONSUME_STRUCTURED_BUFFER;\n      } else if (UserTypeMatches(name, \"inputpatch\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_INPUT_PATCH;\n      } else if (UserTypeMatches(name, \"outputpatch\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_OUTPUT_PATCH;\n      } else if (UserTypeMatches(name, \"rasterizerorderedbuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_BUFFER;\n      } else if (UserTypeMatches(name, \"rasterizerorderedbyteaddressbuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_BYTE_ADDRESS_BUFFER;\n      } else if (UserTypeMatches(name, \"rasterizerorderedstructuredbuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_STRUCTURED_BUFFER;\n      } else if (UserTypeMatches(name, \"rasterizerorderedtexture1d\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_1D;\n      } else if (UserTypeMatches(name, \"rasterizerorderedtexture1darray\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_1D_ARRAY;\n      } else if (UserTypeMatches(name, \"rasterizerorderedtexture2d\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_2D;\n      } else if (UserTypeMatches(name, \"rasterizerorderedtexture2darray\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_2D_ARRAY;\n      } else if (UserTypeMatches(name, \"rasterizerorderedtexture3d\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_3D;\n      } else if (UserTypeMatches(name, \"raytracingaccelerationstructure\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RAYTRACING_ACCELERATION_STRUCTURE;\n      } else if (UserTypeMatches(name, \"rwbuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_BUFFER;\n      } else if (UserTypeMatches(name, \"rwbyteaddressbuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_BYTE_ADDRESS_BUFFER;\n      } else if (UserTypeMatches(name, \"rwstructuredbuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_STRUCTURED_BUFFER;\n      } else if (UserTypeMatches(name, \"rwtexture1d\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_TEXTURE_1D;\n      } else if (UserTypeMatches(name, \"rwtexture1darray\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_TEXTURE_1D_ARRAY;\n      } else if (UserTypeMatches(name, \"rwtexture2d\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_TEXTURE_2D;\n      } else if (UserTypeMatches(name, \"rwtexture2darray\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_TEXTURE_2D_ARRAY;\n      } else if (UserTypeMatches(name, \"rwtexture3d\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_TEXTURE_3D;\n      } else if (UserTypeMatches(name, \"structuredbuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_STRUCTURED_BUFFER;\n      } else if (UserTypeMatches(name, \"subpassinput\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_SUBPASS_INPUT;\n      } else if (UserTypeMatches(name, \"subpassinputms\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_SUBPASS_INPUT_MS;\n      } else if (UserTypeMatches(name, \"texture1d\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_1D;\n      } else if (UserTypeMatches(name, \"texture1darray\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_1D_ARRAY;\n      } else if (UserTypeMatches(name, \"texture2d\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_2D;\n      } else if (UserTypeMatches(name, \"texture2darray\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_2D_ARRAY;\n      } else if (UserTypeMatches(name, \"texture2dms\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_2DMS;\n      } else if (UserTypeMatches(name, \"texture2dmsarray\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_2DMS_ARRAY;\n      } else if (UserTypeMatches(name, \"texture3d\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_3D;\n      } else if (UserTypeMatches(name, \"texturebuffer\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_BUFFER;\n      } else if (UserTypeMatches(name, \"texturecube\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_CUBE;\n      } else if (UserTypeMatches(name, \"texturecubearray\")) {\n        p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_CUBE_ARRAY;\n      }\n    }\n  }\n\n  if (spec_constant_count > 0) {\n    p_module->spec_constants = (SpvReflectSpecializationConstant*)calloc(spec_constant_count, sizeof(*p_module->spec_constants));\n    if (IsNull(p_module->spec_constants)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n  }\n  for (uint32_t i = 0; i < p_parser->node_count; ++i) {\n    SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n    if (p_node->op == SpvOpDecorate) {\n      uint32_t decoration = (uint32_t)INVALID_VALUE;\n      CHECKED_READU32(p_parser, p_node->word_offset + 2, decoration);\n      if (decoration == SpvDecorationSpecId) {\n        const uint32_t count = p_module->spec_constant_count;\n        CHECKED_READU32(p_parser, p_node->word_offset + 1, p_module->spec_constants[count].spirv_id);\n        CHECKED_READU32(p_parser, p_node->word_offset + 3, p_module->spec_constants[count].constant_id);\n        // If being used for a OpSpecConstantComposite (ex. LocalSizeId), there won't be a name\n        SpvReflectPrvNode* target_node = FindNode(p_parser, p_module->spec_constants[count].spirv_id);\n        if (IsNotNull(target_node)) {\n          p_module->spec_constants[count].name = target_node->name;\n        }\n        p_module->spec_constant_count++;\n      }\n    }\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult EnumerateAllUniforms(SpvReflectShaderModule* p_module, size_t* p_uniform_count, uint32_t** pp_uniforms) {\n  *p_uniform_count = p_module->descriptor_binding_count;\n  if (*p_uniform_count == 0) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n  *pp_uniforms = (uint32_t*)calloc(*p_uniform_count, sizeof(**pp_uniforms));\n\n  if (IsNull(*pp_uniforms)) {\n    return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n  }\n\n  for (size_t i = 0; i < *p_uniform_count; ++i) {\n    (*pp_uniforms)[i] = p_module->descriptor_bindings[i].spirv_id;\n  }\n  qsort(*pp_uniforms, *p_uniform_count, sizeof(**pp_uniforms), SortCompareUint32);\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseType(SpvReflectPrvParser* p_parser, SpvReflectPrvNode* p_node,\n                                  SpvReflectPrvDecorations* p_struct_member_decorations, SpvReflectShaderModule* p_module,\n                                  SpvReflectTypeDescription* p_type) {\n  SpvReflectResult result = SPV_REFLECT_RESULT_SUCCESS;\n\n  if (p_node->member_count > 0) {\n    p_type->struct_type_description = FindType(p_module, p_node->result_id);\n    p_type->member_count = p_node->member_count;\n    p_type->members = (SpvReflectTypeDescription*)calloc(p_type->member_count, sizeof(*(p_type->members)));\n    if (IsNotNull(p_type->members)) {\n      // Mark all members types with an invalid state\n      for (size_t i = 0; i < p_type->members->member_count; ++i) {\n        SpvReflectTypeDescription* p_member_type = &(p_type->members[i]);\n        p_member_type->id = (uint32_t)INVALID_VALUE;\n        p_member_type->op = (SpvOp)INVALID_VALUE;\n        p_member_type->storage_class = (SpvStorageClass)INVALID_VALUE;\n      }\n    } else {\n      result = SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n  }\n\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    // Since the parse descends on type information, these will get overwritten\n    // if not guarded against assignment. Only assign if the id is invalid.\n    if (p_type->id == INVALID_VALUE) {\n      p_type->id = p_node->result_id;\n      p_type->op = p_node->op;\n      p_type->decoration_flags = 0;\n    }\n    // Top level types need to pick up decorations from all types below it.\n    // Issue and fix here: https://github.com/chaoticbob/SPIRV-Reflect/issues/64\n    p_type->decoration_flags = ApplyDecorations(&p_node->decorations);\n\n    switch (p_node->op) {\n      default:\n        break;\n      case SpvOpTypeVoid:\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_VOID;\n        break;\n\n      case SpvOpTypeBool:\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_BOOL;\n        break;\n\n      case SpvOpTypeInt: {\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_INT;\n        IF_READU32(result, p_parser, p_node->word_offset + 2, p_type->traits.numeric.scalar.width);\n        IF_READU32(result, p_parser, p_node->word_offset + 3, p_type->traits.numeric.scalar.signedness);\n      } break;\n\n      case SpvOpTypeFloat: {\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_FLOAT;\n        IF_READU32(result, p_parser, p_node->word_offset + 2, p_type->traits.numeric.scalar.width);\n      } break;\n\n      case SpvOpTypeVector: {\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_VECTOR;\n        uint32_t component_type_id = (uint32_t)INVALID_VALUE;\n        IF_READU32(result, p_parser, p_node->word_offset + 2, component_type_id);\n        IF_READU32(result, p_parser, p_node->word_offset + 3, p_type->traits.numeric.vector.component_count);\n        // Parse component type\n        SpvReflectPrvNode* p_next_node = FindNode(p_parser, component_type_id);\n        if (IsNotNull(p_next_node)) {\n          result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);\n        } else {\n          result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n          SPV_REFLECT_ASSERT(false);\n        }\n      } break;\n\n      case SpvOpTypeMatrix: {\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_MATRIX;\n        uint32_t column_type_id = (uint32_t)INVALID_VALUE;\n        IF_READU32(result, p_parser, p_node->word_offset + 2, column_type_id);\n        IF_READU32(result, p_parser, p_node->word_offset + 3, p_type->traits.numeric.matrix.column_count);\n        SpvReflectPrvNode* p_next_node = FindNode(p_parser, column_type_id);\n        if (IsNotNull(p_next_node)) {\n          result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);\n        } else {\n          result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n          SPV_REFLECT_ASSERT(false);\n        }\n        p_type->traits.numeric.matrix.row_count = p_type->traits.numeric.vector.component_count;\n        p_type->traits.numeric.matrix.stride = p_node->decorations.matrix_stride;\n        // NOTE: Matrix stride is decorated using OpMemberDecoreate - not OpDecoreate.\n        if (IsNotNull(p_struct_member_decorations)) {\n          p_type->traits.numeric.matrix.stride = p_struct_member_decorations->matrix_stride;\n        }\n      } break;\n\n      case SpvOpTypeImage: {\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE;\n        uint32_t sampled_type_id = (uint32_t)INVALID_VALUE;\n        IF_READU32(result, p_parser, p_node->word_offset + 2, sampled_type_id);\n        SpvReflectPrvNode* p_next_node = FindNode(p_parser, sampled_type_id);\n        if (IsNotNull(p_next_node)) {\n          result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);\n        } else {\n          result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n        }\n        IF_READU32_CAST(result, p_parser, p_node->word_offset + 3, SpvDim, p_type->traits.image.dim);\n        IF_READU32(result, p_parser, p_node->word_offset + 4, p_type->traits.image.depth);\n        IF_READU32(result, p_parser, p_node->word_offset + 5, p_type->traits.image.arrayed);\n        IF_READU32(result, p_parser, p_node->word_offset + 6, p_type->traits.image.ms);\n        IF_READU32(result, p_parser, p_node->word_offset + 7, p_type->traits.image.sampled);\n        IF_READU32_CAST(result, p_parser, p_node->word_offset + 8, SpvImageFormat, p_type->traits.image.image_format);\n      } break;\n\n      case SpvOpTypeSampler: {\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLER;\n      } break;\n\n      case SpvOpTypeSampledImage: {\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLED_IMAGE;\n        uint32_t image_type_id = (uint32_t)INVALID_VALUE;\n        IF_READU32(result, p_parser, p_node->word_offset + 2, image_type_id);\n        SpvReflectPrvNode* p_next_node = FindNode(p_parser, image_type_id);\n        if (IsNotNull(p_next_node)) {\n          result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);\n        } else {\n          result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n          SPV_REFLECT_ASSERT(false);\n        }\n      } break;\n\n      case SpvOpTypeArray: {\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_ARRAY;\n        if (result == SPV_REFLECT_RESULT_SUCCESS) {\n          uint32_t element_type_id = (uint32_t)INVALID_VALUE;\n          uint32_t length_id = (uint32_t)INVALID_VALUE;\n          IF_READU32(result, p_parser, p_node->word_offset + 2, element_type_id);\n          IF_READU32(result, p_parser, p_node->word_offset + 3, length_id);\n          // NOTE: Array stride is decorated using OpDecorate instead of\n          //       OpMemberDecorate, even if the array is apart of a struct.\n          p_type->traits.array.stride = p_node->decorations.array_stride;\n          // Get length for current dimension\n          SpvReflectPrvNode* p_length_node = FindNode(p_parser, length_id);\n          if (IsNotNull(p_length_node)) {\n            uint32_t dim_index = p_type->traits.array.dims_count;\n            uint32_t length = 0;\n            IF_READU32(result, p_parser, p_length_node->word_offset + 3, length);\n            if (result == SPV_REFLECT_RESULT_SUCCESS) {\n              p_type->traits.array.dims[dim_index] = length;\n              p_type->traits.array.dims_count += 1;\n              p_type->traits.array.spec_constant_op_ids[dim_index] =\n                  IsSpecConstant(p_length_node) ? p_length_node->decorations.spec_id : (uint32_t)INVALID_VALUE;\n            } else {\n              result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n              SPV_REFLECT_ASSERT(false);\n            }\n            // Parse next dimension or element type\n            SpvReflectPrvNode* p_next_node = FindNode(p_parser, element_type_id);\n            if (IsNotNull(p_next_node)) {\n              result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);\n            }\n          } else {\n            result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n            SPV_REFLECT_ASSERT(false);\n          }\n        }\n      } break;\n\n      case SpvOpTypeRuntimeArray: {\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_ARRAY;\n        uint32_t element_type_id = (uint32_t)INVALID_VALUE;\n        IF_READU32(result, p_parser, p_node->word_offset + 2, element_type_id);\n        p_type->traits.array.stride = p_node->decorations.array_stride;\n        uint32_t dim_index = p_type->traits.array.dims_count;\n        p_type->traits.array.dims[dim_index] = (uint32_t)SPV_REFLECT_ARRAY_DIM_RUNTIME;\n        p_type->traits.array.spec_constant_op_ids[dim_index] = (uint32_t)INVALID_VALUE;\n        p_type->traits.array.dims_count += 1;\n        // Parse next dimension or element type\n        SpvReflectPrvNode* p_next_node = FindNode(p_parser, element_type_id);\n        if (IsNotNull(p_next_node)) {\n          result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);\n        } else {\n          result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n          SPV_REFLECT_ASSERT(false);\n        }\n      } break;\n\n      case SpvOpTypeStruct: {\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_STRUCT;\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_BLOCK;\n        uint32_t word_index = 2;\n        uint32_t member_index = 0;\n        for (; word_index < p_node->word_count; ++word_index, ++member_index) {\n          uint32_t member_id = (uint32_t)INVALID_VALUE;\n          IF_READU32(result, p_parser, p_node->word_offset + word_index, member_id);\n          // Find member node\n          SpvReflectPrvNode* p_member_node = FindNode(p_parser, member_id);\n          if (IsNull(p_member_node)) {\n            result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n            SPV_REFLECT_ASSERT(false);\n            break;\n          }\n\n          // Member decorations\n          SpvReflectPrvDecorations* p_member_decorations = &p_node->member_decorations[member_index];\n\n          assert(member_index < p_type->member_count);\n          // Parse member type\n          SpvReflectTypeDescription* p_member_type = &(p_type->members[member_index]);\n          p_member_type->id = member_id;\n          p_member_type->op = p_member_node->op;\n          result = ParseType(p_parser, p_member_node, p_member_decorations, p_module, p_member_type);\n          if (result != SPV_REFLECT_RESULT_SUCCESS) {\n            break;\n          }\n          // This looks wrong\n          // p_member_type->type_name = p_member_node->name;\n          p_member_type->struct_member_name = p_node->member_names[member_index];\n        }\n      } break;\n\n      case SpvOpTypeOpaque:\n        break;\n\n      case SpvOpTypePointer: {\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_REF;\n        IF_READU32_CAST(result, p_parser, p_node->word_offset + 2, SpvStorageClass, p_type->storage_class);\n\n        bool found_recursion = false;\n        if (p_type->storage_class == SpvStorageClassPhysicalStorageBuffer) {\n          // Need to make sure we haven't started an infinite recursive loop\n          for (uint32_t i = 0; i < p_parser->physical_pointer_count; i++) {\n            if (p_type->id == p_parser->physical_pointer_check[i]->id) {\n              found_recursion = true;\n              memcpy(p_type, p_parser->physical_pointer_check[i], sizeof(SpvReflectTypeDescription));\n              p_type->copied = 1;\n              return SPV_REFLECT_RESULT_SUCCESS;\n            }\n          }\n          if (!found_recursion) {\n            p_parser->physical_pointer_struct_count++;\n            p_parser->physical_pointer_check[p_parser->physical_pointer_count] = p_type;\n            p_parser->physical_pointer_count++;\n            if (p_parser->physical_pointer_count >= MAX_RECURSIVE_PHYSICAL_POINTER_CHECK) {\n              return SPV_REFLECT_RESULT_ERROR_SPIRV_MAX_RECURSIVE_EXCEEDED;\n            }\n          }\n        }\n\n        // Parse type\n        SpvReflectPrvNode* p_next_node = FindNode(p_parser, p_node->type_id);\n        if (IsNull(p_next_node)) {\n          result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n          SPV_REFLECT_ASSERT(false);\n        } else if (!found_recursion) {\n          if (p_next_node->op == SpvOpTypeStruct) {\n            p_type->struct_type_description = FindType(p_module, p_next_node->result_id);\n          }\n\n          result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);\n        }\n      } break;\n\n      case SpvOpTypeAccelerationStructureKHR: {\n        p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_ACCELERATION_STRUCTURE;\n      } break;\n    }\n\n    if (result == SPV_REFLECT_RESULT_SUCCESS) {\n      // Names get assigned on the way down. Guard against names\n      // get overwritten on the way up.\n      if (IsNull(p_type->type_name)) {\n        p_type->type_name = p_node->name;\n      }\n    }\n  }\n\n  return result;\n}\n\nstatic SpvReflectResult ParseTypes(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) {\n  if (p_parser->type_count == 0) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  p_module->_internal->type_description_count = p_parser->type_count;\n  p_module->_internal->type_descriptions = (SpvReflectTypeDescription*)calloc(p_module->_internal->type_description_count,\n                                                                              sizeof(*(p_module->_internal->type_descriptions)));\n  if (IsNull(p_module->_internal->type_descriptions)) {\n    return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n  }\n\n  // Mark all types with an invalid state\n  for (size_t i = 0; i < p_module->_internal->type_description_count; ++i) {\n    SpvReflectTypeDescription* p_type = &(p_module->_internal->type_descriptions[i]);\n    p_type->id = (uint32_t)INVALID_VALUE;\n    p_type->op = (SpvOp)INVALID_VALUE;\n    p_type->storage_class = (SpvStorageClass)INVALID_VALUE;\n  }\n\n  size_t type_index = 0;\n  for (size_t i = 0; i < p_parser->node_count; ++i) {\n    SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n    if (!p_node->is_type) {\n      continue;\n    }\n\n    SpvReflectTypeDescription* p_type = &(p_module->_internal->type_descriptions[type_index]);\n    p_parser->physical_pointer_count = 0;\n    SpvReflectResult result = ParseType(p_parser, p_node, NULL, p_module, p_type);\n    if (result != SPV_REFLECT_RESULT_SUCCESS) {\n      return result;\n    }\n    ++type_index;\n  }\n\n  // allocate now and fill in when parsing struct variable later\n  if (p_parser->physical_pointer_struct_count > 0) {\n    p_parser->physical_pointer_structs = (SpvReflectPrvPhysicalPointerStruct*)calloc(p_parser->physical_pointer_struct_count,\n                                                                                     sizeof(*(p_parser->physical_pointer_structs)));\n    if (IsNull(p_parser->physical_pointer_structs)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n  }\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseCapabilities(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) {\n  if (p_parser->capability_count == 0) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  p_module->capability_count = p_parser->capability_count;\n  p_module->capabilities = (SpvReflectCapability*)calloc(p_module->capability_count, sizeof(*(p_module->capabilities)));\n  if (IsNull(p_module->capabilities)) {\n    return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n  }\n\n  // Mark all types with an invalid state\n  for (size_t i = 0; i < p_module->capability_count; ++i) {\n    SpvReflectCapability* p_cap = &(p_module->capabilities[i]);\n    p_cap->value = SpvCapabilityMax;\n    p_cap->word_offset = (uint32_t)INVALID_VALUE;\n  }\n\n  size_t capability_index = 0;\n  for (size_t i = 0; i < p_parser->node_count; ++i) {\n    SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n    if (SpvOpCapability != p_node->op) {\n      continue;\n    }\n\n    SpvReflectCapability* p_cap = &(p_module->capabilities[capability_index]);\n    p_cap->value = p_node->capability;\n    p_cap->word_offset = p_node->word_offset + 1;\n    ++capability_index;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic int SortCompareDescriptorBinding(const void* a, const void* b) {\n  const SpvReflectDescriptorBinding* p_elem_a = (const SpvReflectDescriptorBinding*)a;\n  const SpvReflectDescriptorBinding* p_elem_b = (const SpvReflectDescriptorBinding*)b;\n  int value = (int)(p_elem_a->binding) - (int)(p_elem_b->binding);\n  if (value == 0) {\n    // use spirv-id as a tiebreaker to ensure a stable ordering, as they're guaranteed\n    // unique.\n    assert(p_elem_a->spirv_id != p_elem_b->spirv_id);\n    value = (int)(p_elem_a->spirv_id) - (int)(p_elem_b->spirv_id);\n  }\n  return value;\n}\n\nstatic SpvReflectResult ParseDescriptorBindings(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) {\n  p_module->descriptor_binding_count = 0;\n  for (size_t i = 0; i < p_parser->node_count; ++i) {\n    SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n    if ((p_node->op != SpvOpVariable) ||\n        ((p_node->storage_class != SpvStorageClassUniform) && (p_node->storage_class != SpvStorageClassStorageBuffer) &&\n         (p_node->storage_class != SpvStorageClassUniformConstant))) {\n      continue;\n    }\n    if ((p_node->decorations.set.value == INVALID_VALUE) || (p_node->decorations.binding.value == INVALID_VALUE)) {\n      continue;\n    }\n\n    p_module->descriptor_binding_count += 1;\n  }\n\n  if (p_module->descriptor_binding_count == 0) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  p_module->descriptor_bindings =\n      (SpvReflectDescriptorBinding*)calloc(p_module->descriptor_binding_count, sizeof(*(p_module->descriptor_bindings)));\n  if (IsNull(p_module->descriptor_bindings)) {\n    return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n  }\n\n  // Mark all types with an invalid state\n  for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) {\n    SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]);\n    p_descriptor->binding = (uint32_t)INVALID_VALUE;\n    p_descriptor->input_attachment_index = (uint32_t)INVALID_VALUE;\n    p_descriptor->set = (uint32_t)INVALID_VALUE;\n    p_descriptor->descriptor_type = (SpvReflectDescriptorType)INVALID_VALUE;\n    p_descriptor->uav_counter_id = (uint32_t)INVALID_VALUE;\n  }\n\n  size_t descriptor_index = 0;\n  for (size_t i = 0; i < p_parser->node_count; ++i) {\n    SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n    if ((p_node->op != SpvOpVariable) ||\n        ((p_node->storage_class != SpvStorageClassUniform) && (p_node->storage_class != SpvStorageClassStorageBuffer) &&\n         (p_node->storage_class != SpvStorageClassUniformConstant))) {\n      continue;\n    }\n    if ((p_node->decorations.set.value == INVALID_VALUE) || (p_node->decorations.binding.value == INVALID_VALUE)) {\n      continue;\n    }\n\n    SpvReflectTypeDescription* p_type = FindType(p_module, p_node->type_id);\n    if (IsNull(p_type)) {\n      return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n    }\n    // If the type is a pointer, resolve it. We need to retain the storage class\n    // from the pointer so that we can use it to deduce deescriptor types.\n    SpvStorageClass pointer_storage_class = SpvStorageClassMax;\n    if (p_type->op == SpvOpTypePointer) {\n      pointer_storage_class = p_type->storage_class;\n      // Find the type's node\n      SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);\n      if (IsNull(p_type_node)) {\n        return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n      }\n      // Should be the resolved type\n      p_type = FindType(p_module, p_type_node->type_id);\n      if (IsNull(p_type)) {\n        return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n      }\n    }\n\n    SpvReflectDescriptorBinding* p_descriptor = &p_module->descriptor_bindings[descriptor_index];\n    p_descriptor->spirv_id = p_node->result_id;\n    p_descriptor->name = p_node->name;\n    p_descriptor->binding = p_node->decorations.binding.value;\n    p_descriptor->input_attachment_index = p_node->decorations.input_attachment_index.value;\n    p_descriptor->set = p_node->decorations.set.value;\n    p_descriptor->count = 1;\n    p_descriptor->uav_counter_id = p_node->decorations.uav_counter_buffer.value;\n    p_descriptor->type_description = p_type;\n    p_descriptor->decoration_flags = ApplyDecorations(&p_node->decorations);\n    p_descriptor->user_type = p_node->decorations.user_type;\n\n    // Flags like non-writable and non-readable are found as member decorations only.\n    // If all members have one of those decorations set, promote the decoration up\n    // to the whole descriptor.\n    const SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);\n    if (IsNotNull(p_type_node) && p_type_node->member_count) {\n      SpvReflectPrvDecorations common_flags = p_type_node->member_decorations[0];\n\n      for (uint32_t m = 1; m < p_type_node->member_count; ++m) {\n        common_flags.is_relaxed_precision &= p_type_node->member_decorations[m].is_relaxed_precision;\n        common_flags.is_block &= p_type_node->member_decorations[m].is_block;\n        common_flags.is_buffer_block &= p_type_node->member_decorations[m].is_buffer_block;\n        common_flags.is_row_major &= p_type_node->member_decorations[m].is_row_major;\n        common_flags.is_column_major &= p_type_node->member_decorations[m].is_column_major;\n        common_flags.is_built_in &= p_type_node->member_decorations[m].is_built_in;\n        common_flags.is_noperspective &= p_type_node->member_decorations[m].is_noperspective;\n        common_flags.is_flat &= p_type_node->member_decorations[m].is_flat;\n        common_flags.is_non_writable &= p_type_node->member_decorations[m].is_non_writable;\n        common_flags.is_non_readable &= p_type_node->member_decorations[m].is_non_readable;\n        common_flags.is_patch &= p_type_node->member_decorations[m].is_patch;\n        common_flags.is_per_vertex &= p_type_node->member_decorations[m].is_per_vertex;\n        common_flags.is_per_task &= p_type_node->member_decorations[m].is_per_task;\n        common_flags.is_weight_texture &= p_type_node->member_decorations[m].is_weight_texture;\n        common_flags.is_block_match_texture &= p_type_node->member_decorations[m].is_block_match_texture;\n      }\n\n      p_descriptor->decoration_flags |= ApplyDecorations(&common_flags);\n    }\n\n    // If this is in the StorageBuffer storage class, it's for sure a storage\n    // buffer descriptor. We need to handle this case earlier because in SPIR-V\n    // there are two ways to indicate a storage buffer:\n    // 1) Uniform storage class + BufferBlock decoration, or\n    // 2) StorageBuffer storage class + Buffer decoration.\n    // The 1) way is deprecated since SPIR-V v1.3. But the Buffer decoration is\n    // also used together with Uniform storage class to mean uniform buffer..\n    // We'll handle the pre-v1.3 cases in ParseDescriptorType().\n    if (pointer_storage_class == SpvStorageClassStorageBuffer) {\n      p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER;\n    }\n\n    // Copy image traits\n    if ((p_type->type_flags & SPV_REFLECT_TYPE_FLAG_EXTERNAL_MASK) == SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE) {\n      memcpy(&p_descriptor->image, &p_type->traits.image, sizeof(p_descriptor->image));\n    }\n\n    // This is a workaround for: https://github.com/KhronosGroup/glslang/issues/1096\n    {\n      const uint32_t resource_mask = SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLED_IMAGE | SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE;\n      if ((p_type->type_flags & resource_mask) == resource_mask) {\n        memcpy(&p_descriptor->image, &p_type->traits.image, sizeof(p_descriptor->image));\n      }\n    }\n\n    // Copy array traits\n    if (p_type->traits.array.dims_count > 0) {\n      p_descriptor->array.dims_count = p_type->traits.array.dims_count;\n      for (uint32_t dim_index = 0; dim_index < p_type->traits.array.dims_count; ++dim_index) {\n        uint32_t dim_value = p_type->traits.array.dims[dim_index];\n        p_descriptor->array.dims[dim_index] = dim_value;\n        p_descriptor->count *= dim_value;\n      }\n    }\n\n    // Count\n\n    p_descriptor->word_offset.binding = p_node->decorations.binding.word_offset;\n    p_descriptor->word_offset.set = p_node->decorations.set.word_offset;\n\n    ++descriptor_index;\n  }\n\n  if (p_module->descriptor_binding_count > 0) {\n    qsort(p_module->descriptor_bindings, p_module->descriptor_binding_count, sizeof(*(p_module->descriptor_bindings)),\n          SortCompareDescriptorBinding);\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseDescriptorType(SpvReflectShaderModule* p_module) {\n  if (p_module->descriptor_binding_count == 0) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) {\n    SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]);\n    SpvReflectTypeDescription* p_type = p_descriptor->type_description;\n\n    if ((int)p_descriptor->descriptor_type == (int)INVALID_VALUE) {\n      switch (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_EXTERNAL_MASK) {\n        default:\n          assert(false && \"unknown type flag\");\n          break;\n\n        case SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE: {\n          if (p_descriptor->image.dim == SpvDimBuffer) {\n            switch (p_descriptor->image.sampled) {\n              default:\n                assert(false && \"unknown texel buffer sampled value\");\n                break;\n              case IMAGE_SAMPLED:\n                p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;\n                break;\n              case IMAGE_STORAGE:\n                p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;\n                break;\n            }\n          } else if (p_descriptor->image.dim == SpvDimSubpassData) {\n            p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;\n          } else {\n            switch (p_descriptor->image.sampled) {\n              default:\n                assert(false && \"unknown image sampled value\");\n                break;\n              case IMAGE_SAMPLED:\n                p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE;\n                break;\n              case IMAGE_STORAGE:\n                p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE;\n                break;\n            }\n          }\n        } break;\n\n        case SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLER: {\n          p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER;\n        } break;\n\n        case (SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLED_IMAGE | SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE): {\n          // This is a workaround for: https://github.com/KhronosGroup/glslang/issues/1096\n          if (p_descriptor->image.dim == SpvDimBuffer) {\n            switch (p_descriptor->image.sampled) {\n              default:\n                assert(false && \"unknown texel buffer sampled value\");\n                break;\n              case IMAGE_SAMPLED:\n                p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;\n                break;\n              case IMAGE_STORAGE:\n                p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;\n                break;\n            }\n          } else {\n            p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;\n          }\n        } break;\n\n        case SPV_REFLECT_TYPE_FLAG_EXTERNAL_BLOCK: {\n          if (p_type->decoration_flags & SPV_REFLECT_DECORATION_BLOCK) {\n            p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER;\n          } else if (p_type->decoration_flags & SPV_REFLECT_DECORATION_BUFFER_BLOCK) {\n            p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER;\n          } else {\n            assert(false && \"unknown struct\");\n          }\n        } break;\n\n        case SPV_REFLECT_TYPE_FLAG_EXTERNAL_ACCELERATION_STRUCTURE: {\n          p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR;\n        } break;\n      }\n    }\n\n    switch (p_descriptor->descriptor_type) {\n      case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER:\n        p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SAMPLER;\n        break;\n      case SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:\n        p_descriptor->resource_type = (SpvReflectResourceType)(SPV_REFLECT_RESOURCE_FLAG_SAMPLER | SPV_REFLECT_RESOURCE_FLAG_SRV);\n        break;\n      case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE:\n        p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SRV;\n        break;\n      case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE:\n        p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_UAV;\n        break;\n      case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:\n        p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SRV;\n        break;\n      case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:\n        p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_UAV;\n        break;\n      case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER:\n        p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_CBV;\n        break;\n      case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:\n        p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_CBV;\n        break;\n      case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER:\n        p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_UAV;\n        break;\n      case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:\n        p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_UAV;\n        break;\n      case SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:\n        break;\n      case SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:\n        p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SRV;\n        break;\n    }\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseUAVCounterBindings(SpvReflectShaderModule* p_module) {\n  char name[MAX_NODE_NAME_LENGTH];\n  const char* k_count_tag = \"@count\";\n\n  for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) {\n    SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]);\n\n    if (p_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER) {\n      continue;\n    }\n\n    SpvReflectDescriptorBinding* p_counter_descriptor = NULL;\n    // Use UAV counter buffer id if present...\n    if (p_descriptor->uav_counter_id != UINT32_MAX) {\n      for (uint32_t counter_descriptor_index = 0; counter_descriptor_index < p_module->descriptor_binding_count;\n           ++counter_descriptor_index) {\n        SpvReflectDescriptorBinding* p_test_counter_descriptor = &(p_module->descriptor_bindings[counter_descriptor_index]);\n        if (p_test_counter_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER) {\n          continue;\n        }\n        if (p_descriptor->uav_counter_id == p_test_counter_descriptor->spirv_id) {\n          p_counter_descriptor = p_test_counter_descriptor;\n          break;\n        }\n      }\n    }\n    // ...otherwise use old @count convention.\n    else {\n      const size_t descriptor_name_length = p_descriptor->name ? strlen(p_descriptor->name) : 0;\n\n      memset(name, 0, MAX_NODE_NAME_LENGTH);\n      memcpy(name, p_descriptor->name, descriptor_name_length);\n#if defined(_WIN32)\n      strcat_s(name, MAX_NODE_NAME_LENGTH, k_count_tag);\n#else\n      strcat(name, k_count_tag);\n#endif\n\n      for (uint32_t counter_descriptor_index = 0; counter_descriptor_index < p_module->descriptor_binding_count;\n           ++counter_descriptor_index) {\n        SpvReflectDescriptorBinding* p_test_counter_descriptor = &(p_module->descriptor_bindings[counter_descriptor_index]);\n        if (p_test_counter_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER) {\n          continue;\n        }\n        if (p_test_counter_descriptor->name && strcmp(name, p_test_counter_descriptor->name) == 0) {\n          p_counter_descriptor = p_test_counter_descriptor;\n          break;\n        }\n      }\n    }\n\n    if (p_counter_descriptor != NULL) {\n      p_descriptor->uav_counter_binding = p_counter_descriptor;\n    }\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseDescriptorBlockVariable(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module,\n                                                     SpvReflectTypeDescription* p_type, SpvReflectBlockVariable* p_var) {\n  bool has_non_writable = false;\n\n  if (IsNotNull(p_type->members) && (p_type->member_count > 0)) {\n    p_var->member_count = p_type->member_count;\n    p_var->members = (SpvReflectBlockVariable*)calloc(p_var->member_count, sizeof(*p_var->members));\n    if (IsNull(p_var->members)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n\n    SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);\n    if (IsNull(p_type_node)) {\n      return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n    }\n    // Resolve to element type if current type is array or run time array\n    while (p_type_node->op == SpvOpTypeArray || p_type_node->op == SpvOpTypeRuntimeArray) {\n      if (p_type_node->op == SpvOpTypeArray) {\n        p_type_node = FindNode(p_parser, p_type_node->array_traits.element_type_id);\n      } else {\n        // Element type description\n        SpvReflectTypeDescription* p_type_temp = FindType(p_module, p_type_node->array_traits.element_type_id);\n        if (IsNull(p_type_temp)) {\n          return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n        }\n        // Element type node\n        p_type_node = FindNode(p_parser, p_type_temp->id);\n      }\n      if (IsNull(p_type_node)) {\n        return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n      }\n    }\n\n    // Parse members\n    for (uint32_t member_index = 0; member_index < p_type->member_count; ++member_index) {\n      SpvReflectTypeDescription* p_member_type = &p_type->members[member_index];\n      SpvReflectBlockVariable* p_member_var = &p_var->members[member_index];\n      // If pointer type, treat like reference and resolve to pointee type\n      SpvReflectTypeDescription* p_member_ptr_type = 0;\n      bool found_recursion = false;\n\n      if ((p_member_type->storage_class == SpvStorageClassPhysicalStorageBuffer) &&\n          (p_member_type->type_flags & SPV_REFLECT_TYPE_FLAG_REF)) {\n        // Remember the original type\n        p_member_ptr_type = p_member_type;\n\n        // strip array\n        if (p_member_type->op == SpvOpTypeArray) {\n          SpvReflectPrvNode* p_node = FindNode(p_parser, p_member_type->id);\n          if (p_node == NULL) {\n            return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n          }\n          uint32_t element_type_id = p_node->array_traits.element_type_id;\n          p_member_type = FindType(p_module, element_type_id);\n          if (p_member_type == NULL) {\n            return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n          }\n        }\n\n        // Need to make sure we haven't started an infinite recursive loop\n        for (uint32_t i = 0; i < p_parser->physical_pointer_count; i++) {\n          if (p_member_type->id == p_parser->physical_pointer_check[i]->id) {\n            found_recursion = true;\n            break;  // still need to fill in p_member_type values\n          }\n        }\n        if (!found_recursion) {\n          uint32_t struct_id = FindType(p_module, p_member_type->id)->struct_type_description->id;\n          p_parser->physical_pointer_structs[p_parser->physical_pointer_struct_count].struct_id = struct_id;\n          p_parser->physical_pointer_structs[p_parser->physical_pointer_struct_count].p_var = p_member_var;\n          p_parser->physical_pointer_struct_count++;\n\n          p_parser->physical_pointer_check[p_parser->physical_pointer_count] = p_member_type;\n          p_parser->physical_pointer_count++;\n          if (p_parser->physical_pointer_count >= MAX_RECURSIVE_PHYSICAL_POINTER_CHECK) {\n            return SPV_REFLECT_RESULT_ERROR_SPIRV_MAX_RECURSIVE_EXCEEDED;\n          }\n        }\n\n        SpvReflectPrvNode* p_member_type_node = FindNode(p_parser, p_member_type->id);\n        if (IsNull(p_member_type_node)) {\n          return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n        }\n        // Should be the pointee type\n        p_member_type = FindType(p_module, p_member_type_node->type_id);\n        if (IsNull(p_member_type)) {\n          return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n        }\n      }\n      bool is_struct = (p_member_type->type_flags & SPV_REFLECT_TYPE_FLAG_STRUCT) == SPV_REFLECT_TYPE_FLAG_STRUCT;\n      if (is_struct) {\n        if (!found_recursion) {\n          SpvReflectResult result = ParseDescriptorBlockVariable(p_parser, p_module, p_member_type, p_member_var);\n          if (result != SPV_REFLECT_RESULT_SUCCESS) {\n            return result;\n          }\n        } else {\n          // if 2 member of structs are same PhysicalPointer type, copy the\n          // members values that aren't found skipping the recursion call\n          for (uint32_t i = 0; i < p_parser->physical_pointer_struct_count; i++) {\n            if (p_parser->physical_pointer_structs[i].struct_id == p_member_type->id) {\n              p_member_var->members = p_parser->physical_pointer_structs[i].p_var->members;\n              p_member_var->member_count = p_parser->physical_pointer_structs[i].p_var->member_count;\n              // Set here as it is the first time we need to walk down structs\n              p_member_var->flags |= SPV_REFLECT_VARIABLE_FLAGS_PHYSICAL_POINTER_COPY;\n            }\n          }\n        }\n      }\n\n      if (p_type_node->storage_class == SpvStorageClassPhysicalStorageBuffer && !p_type_node->member_names) {\n        // TODO 212 - If a buffer ref has an array of itself, all members are null\n        continue;\n      }\n\n      p_member_var->name = p_type_node->member_names[member_index];\n      p_member_var->offset = p_type_node->member_decorations[member_index].offset.value;\n      p_member_var->decoration_flags = ApplyDecorations(&p_type_node->member_decorations[member_index]);\n      p_member_var->flags |= SPV_REFLECT_VARIABLE_FLAGS_UNUSED;\n      if (!has_non_writable && (p_member_var->decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE)) {\n        has_non_writable = true;\n      }\n      ApplyNumericTraits(p_member_type, &p_member_var->numeric);\n      if (p_member_type->op == SpvOpTypeArray) {\n        ApplyArrayTraits(p_member_type, &p_member_var->array);\n      }\n\n      p_member_var->word_offset.offset = p_type_node->member_decorations[member_index].offset.word_offset;\n      p_member_var->type_description = p_member_ptr_type ? p_member_ptr_type : p_member_type;\n    }\n  }\n\n  p_var->name = p_type->type_name;\n  p_var->type_description = p_type;\n  if (has_non_writable) {\n    p_var->decoration_flags |= SPV_REFLECT_DECORATION_NON_WRITABLE;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic uint32_t GetPhysicalPointerStructSize(SpvReflectPrvParser* p_parser, uint32_t id) {\n  for (uint32_t i = 0; i < p_parser->physical_pointer_struct_count; i++) {\n    if (p_parser->physical_pointer_structs[i].struct_id == id) {\n      return p_parser->physical_pointer_structs[i].p_var->size;\n    }\n  }\n  return 0;\n}\n\nstatic SpvReflectResult ParseDescriptorBlockVariableSizes(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module,\n                                                          bool is_parent_root, bool is_parent_aos, bool is_parent_rta,\n                                                          SpvReflectBlockVariable* p_var) {\n  if (p_var->member_count == 0) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  bool is_parent_ref = p_var->type_description->op == SpvOpTypePointer;\n\n  // Absolute offsets\n  for (uint32_t member_index = 0; member_index < p_var->member_count; ++member_index) {\n    SpvReflectBlockVariable* p_member_var = &p_var->members[member_index];\n    if (is_parent_root) {\n      p_member_var->absolute_offset = p_member_var->offset;\n    } else {\n      p_member_var->absolute_offset =\n          is_parent_aos ? 0 : (is_parent_ref ? p_member_var->offset : p_member_var->offset + p_var->absolute_offset);\n    }\n  }\n\n  // Size\n  for (uint32_t member_index = 0; member_index < p_var->member_count; ++member_index) {\n    SpvReflectBlockVariable* p_member_var = &p_var->members[member_index];\n    SpvReflectTypeDescription* p_member_type = p_member_var->type_description;\n\n    if (!p_member_type) {\n      // TODO 212 - If a buffer ref has an array of itself, all members are null\n      continue;\n    }\n    switch (p_member_type->op) {\n      case SpvOpTypeBool: {\n        p_member_var->size = SPIRV_WORD_SIZE;\n      } break;\n\n      case SpvOpTypeInt:\n      case SpvOpTypeFloat: {\n        p_member_var->size = p_member_type->traits.numeric.scalar.width / SPIRV_BYTE_WIDTH;\n      } break;\n\n      case SpvOpTypeVector: {\n        uint32_t size =\n            p_member_type->traits.numeric.vector.component_count * (p_member_type->traits.numeric.scalar.width / SPIRV_BYTE_WIDTH);\n        p_member_var->size = size;\n      } break;\n\n      case SpvOpTypeMatrix: {\n        if (p_member_var->decoration_flags & SPV_REFLECT_DECORATION_COLUMN_MAJOR) {\n          p_member_var->size = p_member_var->numeric.matrix.column_count * p_member_var->numeric.matrix.stride;\n        } else if (p_member_var->decoration_flags & SPV_REFLECT_DECORATION_ROW_MAJOR) {\n          p_member_var->size = p_member_var->numeric.matrix.row_count * p_member_var->numeric.matrix.stride;\n        }\n      } break;\n\n      case SpvOpTypeArray: {\n        // If array of structs, parse members first...\n        bool is_struct = (p_member_type->type_flags & SPV_REFLECT_TYPE_FLAG_STRUCT) == SPV_REFLECT_TYPE_FLAG_STRUCT;\n        if (is_struct) {\n          if (p_member_var->flags & SPV_REFLECT_VARIABLE_FLAGS_PHYSICAL_POINTER_COPY) {\n            p_member_var->size = GetPhysicalPointerStructSize(p_parser, p_member_type->id);\n          } else {\n            SpvReflectResult result =\n                ParseDescriptorBlockVariableSizes(p_parser, p_module, false, true, is_parent_rta, p_member_var);\n            if (result != SPV_REFLECT_RESULT_SUCCESS) {\n              return result;\n            }\n          }\n        }\n        // ...then array\n        uint32_t element_count = (p_member_var->array.dims_count > 0 ? 1 : 0);\n        for (uint32_t i = 0; i < p_member_var->array.dims_count; ++i) {\n          element_count *= p_member_var->array.dims[i];\n        }\n        p_member_var->size = element_count * p_member_var->array.stride;\n      } break;\n\n      case SpvOpTypeRuntimeArray: {\n        bool is_struct = (p_member_type->type_flags & SPV_REFLECT_TYPE_FLAG_STRUCT) == SPV_REFLECT_TYPE_FLAG_STRUCT;\n        if (is_struct) {\n          SpvReflectResult result = ParseDescriptorBlockVariableSizes(p_parser, p_module, false, true, true, p_member_var);\n          if (result != SPV_REFLECT_RESULT_SUCCESS) {\n            return result;\n          }\n        }\n      } break;\n\n      case SpvOpTypePointer: {\n        // Reference. Get to underlying struct type.\n        SpvReflectPrvNode* p_member_type_node = FindNode(p_parser, p_member_type->id);\n        if (IsNull(p_member_type_node)) {\n          return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n        }\n        // Get the pointee type\n        p_member_type = FindType(p_module, p_member_type_node->type_id);\n        if (IsNull(p_member_type)) {\n          return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n        }\n        assert(p_member_type->op == SpvOpTypeStruct);\n        FALLTHROUGH;\n      }\n\n      case SpvOpTypeStruct: {\n        if (p_member_var->flags & SPV_REFLECT_VARIABLE_FLAGS_PHYSICAL_POINTER_COPY) {\n          p_member_var->size = GetPhysicalPointerStructSize(p_parser, p_member_type->id);\n        } else {\n          SpvReflectResult result =\n              ParseDescriptorBlockVariableSizes(p_parser, p_module, false, is_parent_aos, is_parent_rta, p_member_var);\n          if (result != SPV_REFLECT_RESULT_SUCCESS) {\n            return result;\n          }\n        }\n      } break;\n\n      default:\n        break;\n    }\n  }\n\n  // Parse padded size using offset difference for all member except for the last entry...\n  for (uint32_t member_index = 0; member_index < (p_var->member_count - 1); ++member_index) {\n    SpvReflectBlockVariable* p_member_var = &p_var->members[member_index];\n    SpvReflectBlockVariable* p_next_member_var = &p_var->members[member_index + 1];\n    p_member_var->padded_size = p_next_member_var->offset - p_member_var->offset;\n    if (p_member_var->size > p_member_var->padded_size) {\n      p_member_var->size = p_member_var->padded_size;\n    }\n    if (is_parent_rta) {\n      p_member_var->padded_size = p_member_var->size;\n    }\n  }\n  // ...last entry just gets rounded up to near multiple of SPIRV_DATA_ALIGNMENT, which is 16 and\n  // subtract the offset.\n  if (p_var->member_count > 0) {\n    SpvReflectBlockVariable* p_member_var = &p_var->members[p_var->member_count - 1];\n    p_member_var->padded_size = RoundUp(p_member_var->offset + p_member_var->size, SPIRV_DATA_ALIGNMENT) - p_member_var->offset;\n    if (p_member_var->size > p_member_var->padded_size) {\n      p_member_var->size = p_member_var->padded_size;\n    }\n    if (is_parent_rta) {\n      p_member_var->padded_size = p_member_var->size;\n    }\n  }\n\n  // If buffer ref, sizes are same as uint64_t\n  if (is_parent_ref) {\n    p_var->size = p_var->padded_size = 8;\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  // @TODO validate this with assertion\n  p_var->size = p_var->members[p_var->member_count - 1].offset + p_var->members[p_var->member_count - 1].padded_size;\n  p_var->padded_size = p_var->size;\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic void MarkSelfAndAllMemberVarsAsUsed(SpvReflectBlockVariable* p_var) {\n  // Clear the current variable's UNUSED flag\n  p_var->flags &= ~SPV_REFLECT_VARIABLE_FLAGS_UNUSED;\n\n  SpvOp op_type = p_var->type_description->op;\n  switch (op_type) {\n    default:\n      break;\n\n    case SpvOpTypeArray: {\n    } break;\n\n    case SpvOpTypeStruct: {\n      for (uint32_t i = 0; i < p_var->member_count; ++i) {\n        SpvReflectBlockVariable* p_member_var = &p_var->members[i];\n        MarkSelfAndAllMemberVarsAsUsed(p_member_var);\n      }\n    } break;\n  }\n}\n\nstatic SpvReflectResult ParseDescriptorBlockVariableUsage(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module,\n                                                          SpvReflectPrvAccessChain* p_access_chain, uint32_t index_index,\n                                                          SpvOp override_op_type, SpvReflectBlockVariable* p_var) {\n  // Clear the current variable's UNUSED flag\n  p_var->flags &= ~SPV_REFLECT_VARIABLE_FLAGS_UNUSED;\n\n  // Parsing arrays requires overriding the op type for\n  // for the lowest dim's element type.\n  SpvReflectTypeDescription* p_type = p_var->type_description;\n  SpvOp op_type = p_type->op;\n  if (override_op_type != (SpvOp)INVALID_VALUE) {\n    op_type = override_op_type;\n  }\n\n  switch (op_type) {\n    default:\n      break;\n\n    case SpvOpTypeArray: {\n      // Parse through array's type hierarchy to find the actual/non-array element type\n      while ((p_type->op == SpvOpTypeArray) && (index_index < p_access_chain->index_count)) {\n        // Find the array element type id\n        SpvReflectPrvNode* p_node = FindNode(p_parser, p_type->id);\n        if (p_node == NULL) {\n          return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n        }\n        uint32_t element_type_id = p_node->array_traits.element_type_id;\n        // Get the array element type\n        p_type = FindType(p_module, element_type_id);\n        if (p_type == NULL) {\n          return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n        }\n        // Next access chain index\n        index_index += 1;\n      }\n\n      // Only continue parsing if there's remaining indices in the access\n      // chain. If the end of the access chain has been reached then all\n      // remaining variables (including those in struct hierarchies)\n      // are considered USED.\n      //\n      // See: https://github.com/KhronosGroup/SPIRV-Reflect/issues/78\n      //\n      if (index_index < p_access_chain->index_count) {\n        // Parse current var again with a type override and advanced index index\n        SpvReflectResult result =\n            ParseDescriptorBlockVariableUsage(p_parser, p_module, p_access_chain, index_index, p_type->op, p_var);\n        if (result != SPV_REFLECT_RESULT_SUCCESS) {\n          return result;\n        }\n      } else {\n        // Clear UNUSED flag for remaining variables\n        MarkSelfAndAllMemberVarsAsUsed(p_var);\n      }\n    } break;\n\n    case SpvOpTypePointer: {\n      // Reference. Get to underlying struct type.\n      SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);\n      if (IsNull(p_type_node)) {\n        return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n      }\n      // Get the pointee type\n      p_type = FindType(p_module, p_type_node->type_id);\n      if (IsNull(p_type)) {\n        return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n      }\n      if (p_type->op != SpvOpTypeStruct) {\n        break;\n      }\n      FALLTHROUGH;\n    }\n\n    case SpvOpTypeStruct: {\n      assert(p_var->member_count > 0);\n      if (p_var->member_count == 0) {\n        return SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_BLOCK_DATA;\n      }\n\n      // The access chain can have zero indexes, if used for a runtime array\n      if (p_access_chain->index_count == 0) {\n        return SPV_REFLECT_RESULT_SUCCESS;\n      }\n\n      // Get member variable at the access's chain current index\n      uint32_t index = p_access_chain->indexes[index_index];\n      if (index >= p_var->member_count) {\n        return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_BLOCK_MEMBER_REFERENCE;\n      }\n      SpvReflectBlockVariable* p_member_var = &p_var->members[index];\n\n      bool is_pointer_to_pointer = IsPointerToPointer(p_parser, p_access_chain->result_type_id);\n      if (is_pointer_to_pointer) {\n        // Remember block var for this access chain for downstream dereference\n        p_access_chain->block_var = p_member_var;\n      }\n\n      // Next access chain index\n      index_index += 1;\n\n      // Only continue parsing if there's remaining indices in the access\n      // chain. If the end of the access chain has been reach then all\n      // remaining variables (including those in struct hierarchies)\n      // are considered USED.\n      //\n      // See: https://github.com/KhronosGroup/SPIRV-Reflect/issues/78\n      //\n      if (index_index < p_access_chain->index_count) {\n        SpvReflectResult result =\n            ParseDescriptorBlockVariableUsage(p_parser, p_module, p_access_chain, index_index, (SpvOp)INVALID_VALUE, p_member_var);\n        if (result != SPV_REFLECT_RESULT_SUCCESS) {\n          return result;\n        }\n      } else if (!is_pointer_to_pointer) {\n        // Clear UNUSED flag for remaining variables\n        MarkSelfAndAllMemberVarsAsUsed(p_member_var);\n      }\n    } break;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseDescriptorBlocks(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) {\n  if (p_module->descriptor_binding_count == 0) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  p_parser->physical_pointer_struct_count = 0;\n\n  for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) {\n    SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]);\n    SpvReflectTypeDescription* p_type = p_descriptor->type_description;\n    if ((p_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER) &&\n        (p_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER)) {\n      continue;\n    }\n\n    // Mark UNUSED\n    p_descriptor->block.flags |= SPV_REFLECT_VARIABLE_FLAGS_UNUSED;\n    p_parser->physical_pointer_count = 0;\n    // Parse descriptor block\n    SpvReflectResult result = ParseDescriptorBlockVariable(p_parser, p_module, p_type, &p_descriptor->block);\n    if (result != SPV_REFLECT_RESULT_SUCCESS) {\n      return result;\n    }\n\n    for (uint32_t access_chain_index = 0; access_chain_index < p_parser->access_chain_count; ++access_chain_index) {\n      SpvReflectPrvAccessChain* p_access_chain = &(p_parser->access_chains[access_chain_index]);\n      // Skip any access chains that aren't touching this descriptor block\n      if (p_descriptor->spirv_id != p_access_chain->base_id) {\n        continue;\n      }\n      result = ParseDescriptorBlockVariableUsage(p_parser, p_module, p_access_chain, 0, (SpvOp)INVALID_VALUE, &p_descriptor->block);\n      if (result != SPV_REFLECT_RESULT_SUCCESS) {\n        return result;\n      }\n    }\n\n    p_descriptor->block.name = p_descriptor->name;\n\n    bool is_parent_rta = (p_descriptor->descriptor_type == SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER);\n    result = ParseDescriptorBlockVariableSizes(p_parser, p_module, true, false, is_parent_rta, &p_descriptor->block);\n    if (result != SPV_REFLECT_RESULT_SUCCESS) {\n      return result;\n    }\n\n    if (is_parent_rta) {\n      p_descriptor->block.size = 0;\n      p_descriptor->block.padded_size = 0;\n    }\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseFormat(const SpvReflectTypeDescription* p_type, SpvReflectFormat* p_format) {\n  SpvReflectResult result = SPV_REFLECT_RESULT_ERROR_INTERNAL_ERROR;\n  bool signedness = (p_type->traits.numeric.scalar.signedness != 0);\n  uint32_t bit_width = p_type->traits.numeric.scalar.width;\n  if (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_VECTOR) {\n    uint32_t component_count = p_type->traits.numeric.vector.component_count;\n    if (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_FLOAT) {\n      switch (bit_width) {\n        case 16: {\n          switch (component_count) {\n            case 2:\n              *p_format = SPV_REFLECT_FORMAT_R16G16_SFLOAT;\n              break;\n            case 3:\n              *p_format = SPV_REFLECT_FORMAT_R16G16B16_SFLOAT;\n              break;\n            case 4:\n              *p_format = SPV_REFLECT_FORMAT_R16G16B16A16_SFLOAT;\n              break;\n          }\n        } break;\n\n        case 32: {\n          switch (component_count) {\n            case 2:\n              *p_format = SPV_REFLECT_FORMAT_R32G32_SFLOAT;\n              break;\n            case 3:\n              *p_format = SPV_REFLECT_FORMAT_R32G32B32_SFLOAT;\n              break;\n            case 4:\n              *p_format = SPV_REFLECT_FORMAT_R32G32B32A32_SFLOAT;\n              break;\n          }\n        } break;\n\n        case 64: {\n          switch (component_count) {\n            case 2:\n              *p_format = SPV_REFLECT_FORMAT_R64G64_SFLOAT;\n              break;\n            case 3:\n              *p_format = SPV_REFLECT_FORMAT_R64G64B64_SFLOAT;\n              break;\n            case 4:\n              *p_format = SPV_REFLECT_FORMAT_R64G64B64A64_SFLOAT;\n              break;\n          }\n        }\n      }\n      result = SPV_REFLECT_RESULT_SUCCESS;\n    } else if (p_type->type_flags & (SPV_REFLECT_TYPE_FLAG_INT | SPV_REFLECT_TYPE_FLAG_BOOL)) {\n      switch (bit_width) {\n        case 16: {\n          switch (component_count) {\n            case 2:\n              *p_format = signedness ? SPV_REFLECT_FORMAT_R16G16_SINT : SPV_REFLECT_FORMAT_R16G16_UINT;\n              break;\n            case 3:\n              *p_format = signedness ? SPV_REFLECT_FORMAT_R16G16B16_SINT : SPV_REFLECT_FORMAT_R16G16B16_UINT;\n              break;\n            case 4:\n              *p_format = signedness ? SPV_REFLECT_FORMAT_R16G16B16A16_SINT : SPV_REFLECT_FORMAT_R16G16B16A16_UINT;\n              break;\n          }\n        } break;\n\n        case 32: {\n          switch (component_count) {\n            case 2:\n              *p_format = signedness ? SPV_REFLECT_FORMAT_R32G32_SINT : SPV_REFLECT_FORMAT_R32G32_UINT;\n              break;\n            case 3:\n              *p_format = signedness ? SPV_REFLECT_FORMAT_R32G32B32_SINT : SPV_REFLECT_FORMAT_R32G32B32_UINT;\n              break;\n            case 4:\n              *p_format = signedness ? SPV_REFLECT_FORMAT_R32G32B32A32_SINT : SPV_REFLECT_FORMAT_R32G32B32A32_UINT;\n              break;\n          }\n        } break;\n\n        case 64: {\n          switch (component_count) {\n            case 2:\n              *p_format = signedness ? SPV_REFLECT_FORMAT_R64G64_SINT : SPV_REFLECT_FORMAT_R64G64_UINT;\n              break;\n            case 3:\n              *p_format = signedness ? SPV_REFLECT_FORMAT_R64G64B64_SINT : SPV_REFLECT_FORMAT_R64G64B64_UINT;\n              break;\n            case 4:\n              *p_format = signedness ? SPV_REFLECT_FORMAT_R64G64B64A64_SINT : SPV_REFLECT_FORMAT_R64G64B64A64_UINT;\n              break;\n          }\n        }\n      }\n      result = SPV_REFLECT_RESULT_SUCCESS;\n    }\n  } else if (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_FLOAT) {\n    switch (bit_width) {\n      case 16:\n        *p_format = SPV_REFLECT_FORMAT_R16_SFLOAT;\n        break;\n      case 32:\n        *p_format = SPV_REFLECT_FORMAT_R32_SFLOAT;\n        break;\n      case 64:\n        *p_format = SPV_REFLECT_FORMAT_R64_SFLOAT;\n        break;\n    }\n    result = SPV_REFLECT_RESULT_SUCCESS;\n  } else if (p_type->type_flags & (SPV_REFLECT_TYPE_FLAG_INT | SPV_REFLECT_TYPE_FLAG_BOOL)) {\n    switch (bit_width) {\n      case 16:\n        *p_format = signedness ? SPV_REFLECT_FORMAT_R16_SINT : SPV_REFLECT_FORMAT_R16_UINT;\n        break;\n        break;\n      case 32:\n        *p_format = signedness ? SPV_REFLECT_FORMAT_R32_SINT : SPV_REFLECT_FORMAT_R32_UINT;\n        break;\n        break;\n      case 64:\n        *p_format = signedness ? SPV_REFLECT_FORMAT_R64_SINT : SPV_REFLECT_FORMAT_R64_UINT;\n        break;\n    }\n    result = SPV_REFLECT_RESULT_SUCCESS;\n  } else if (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_STRUCT) {\n    *p_format = SPV_REFLECT_FORMAT_UNDEFINED;\n    result = SPV_REFLECT_RESULT_SUCCESS;\n  }\n  return result;\n}\n\nstatic SpvReflectResult ParseInterfaceVariable(SpvReflectPrvParser* p_parser,\n                                               const SpvReflectPrvDecorations* p_var_node_decorations,\n                                               const SpvReflectPrvDecorations* p_type_node_decorations,\n                                               SpvReflectShaderModule* p_module, SpvReflectTypeDescription* p_type,\n                                               SpvReflectInterfaceVariable* p_var, bool* p_has_built_in) {\n  SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);\n  if (IsNull(p_type_node)) {\n    return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n  }\n\n  if (p_type->member_count > 0) {\n    p_var->member_count = p_type->member_count;\n    p_var->members = (SpvReflectInterfaceVariable*)calloc(p_var->member_count, sizeof(*p_var->members));\n    if (IsNull(p_var->members)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n\n    for (uint32_t member_index = 0; member_index < p_type_node->member_count; ++member_index) {\n      SpvReflectPrvDecorations* p_member_decorations = &p_type_node->member_decorations[member_index];\n      SpvReflectTypeDescription* p_member_type = &p_type->members[member_index];\n      SpvReflectInterfaceVariable* p_member_var = &p_var->members[member_index];\n\n      // Storage class is the same throughout the whole struct\n      p_member_var->storage_class = p_var->storage_class;\n\n      SpvReflectResult result =\n          ParseInterfaceVariable(p_parser, NULL, p_member_decorations, p_module, p_member_type, p_member_var, p_has_built_in);\n      if (result != SPV_REFLECT_RESULT_SUCCESS) {\n        SPV_REFLECT_ASSERT(false);\n        return result;\n      }\n    }\n  }\n\n  p_var->name = p_type_node->name;\n  p_var->decoration_flags = ApplyDecorations(p_type_node_decorations);\n  if (p_var_node_decorations != NULL) {\n    p_var->decoration_flags |= ApplyDecorations(p_var_node_decorations);\n  } else {\n    // Apply member decoration values to struct members\n    p_var->location = p_type_node_decorations->location.value;\n    p_var->component = p_type_node_decorations->component.value;\n  }\n\n  p_var->built_in = p_type_node_decorations->built_in;\n  ApplyNumericTraits(p_type, &p_var->numeric);\n  if (p_type->op == SpvOpTypeArray) {\n    ApplyArrayTraits(p_type, &p_var->array);\n  }\n\n  p_var->type_description = p_type;\n\n  *p_has_built_in |= p_type_node_decorations->is_built_in;\n\n  // Only parse format for interface variables that are input or output\n  if ((p_var->storage_class == SpvStorageClassInput) || (p_var->storage_class == SpvStorageClassOutput)) {\n    SpvReflectResult result = ParseFormat(p_var->type_description, &p_var->format);\n    if (result != SPV_REFLECT_RESULT_SUCCESS) {\n      SPV_REFLECT_ASSERT(false);\n      return result;\n    }\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseInterfaceVariables(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module,\n                                                SpvReflectEntryPoint* p_entry, uint32_t interface_variable_count,\n                                                uint32_t* p_interface_variable_ids) {\n  if (interface_variable_count == 0) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  p_entry->interface_variable_count = interface_variable_count;\n  p_entry->input_variable_count = 0;\n  p_entry->output_variable_count = 0;\n  for (size_t i = 0; i < interface_variable_count; ++i) {\n    uint32_t var_result_id = *(p_interface_variable_ids + i);\n    SpvReflectPrvNode* p_node = FindNode(p_parser, var_result_id);\n    if (IsNull(p_node)) {\n      return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n    }\n\n    if (p_node->storage_class == SpvStorageClassInput) {\n      p_entry->input_variable_count += 1;\n    } else if (p_node->storage_class == SpvStorageClassOutput) {\n      p_entry->output_variable_count += 1;\n    }\n  }\n\n  if (p_entry->input_variable_count > 0) {\n    p_entry->input_variables =\n        (SpvReflectInterfaceVariable**)calloc(p_entry->input_variable_count, sizeof(*(p_entry->input_variables)));\n    if (IsNull(p_entry->input_variables)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n  }\n\n  if (p_entry->output_variable_count > 0) {\n    p_entry->output_variables =\n        (SpvReflectInterfaceVariable**)calloc(p_entry->output_variable_count, sizeof(*(p_entry->output_variables)));\n    if (IsNull(p_entry->output_variables)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n  }\n\n  if (p_entry->interface_variable_count > 0) {\n    p_entry->interface_variables =\n        (SpvReflectInterfaceVariable*)calloc(p_entry->interface_variable_count, sizeof(*(p_entry->interface_variables)));\n    if (IsNull(p_entry->interface_variables)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n  }\n\n  size_t input_index = 0;\n  size_t output_index = 0;\n  for (size_t i = 0; i < interface_variable_count; ++i) {\n    uint32_t var_result_id = *(p_interface_variable_ids + i);\n    SpvReflectPrvNode* p_node = FindNode(p_parser, var_result_id);\n    if (IsNull(p_node)) {\n      return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n    }\n\n    SpvReflectTypeDescription* p_type = FindType(p_module, p_node->type_id);\n    if (IsNull(p_node) || IsNull(p_type)) {\n      return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n    }\n    // If the type is a pointer, resolve it\n    if (p_type->op == SpvOpTypePointer) {\n      // Find the type's node\n      SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);\n      if (IsNull(p_type_node)) {\n        return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n      }\n      // Should be the resolved type\n      p_type = FindType(p_module, p_type_node->type_id);\n      if (IsNull(p_type)) {\n        return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n      }\n    }\n\n    SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);\n    if (IsNull(p_type_node)) {\n      return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n    }\n\n    SpvReflectInterfaceVariable* p_var = &(p_entry->interface_variables[i]);\n    p_var->storage_class = p_node->storage_class;\n\n    bool has_built_in = p_node->decorations.is_built_in;\n    SpvReflectResult result =\n        ParseInterfaceVariable(p_parser, &p_node->decorations, &p_type_node->decorations, p_module, p_type, p_var, &has_built_in);\n    if (result != SPV_REFLECT_RESULT_SUCCESS) {\n      SPV_REFLECT_ASSERT(false);\n      return result;\n    }\n\n    // Input and output variables\n    if (p_var->storage_class == SpvStorageClassInput) {\n      p_entry->input_variables[input_index] = p_var;\n      ++input_index;\n    } else if (p_node->storage_class == SpvStorageClassOutput) {\n      p_entry->output_variables[output_index] = p_var;\n      ++output_index;\n    }\n\n    // SPIR-V result id\n    p_var->spirv_id = p_node->result_id;\n    // Name\n    p_var->name = p_node->name;\n    // Semantic\n    p_var->semantic = p_node->decorations.semantic.value;\n\n    // Decorate with built-in if any member is built-in\n    if (has_built_in) {\n      p_var->decoration_flags |= SPV_REFLECT_DECORATION_BUILT_IN;\n    }\n\n    // Location is decorated on OpVariable node, not the type node.\n    p_var->location = p_node->decorations.location.value;\n    p_var->component = p_node->decorations.component.value;\n    p_var->word_offset.location = p_node->decorations.location.word_offset;\n\n    // Built in\n    if (p_node->decorations.is_built_in) {\n      p_var->built_in = p_node->decorations.built_in;\n    }\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult EnumerateAllPushConstants(SpvReflectShaderModule* p_module, size_t* p_push_constant_count,\n                                                  uint32_t** p_push_constants) {\n  *p_push_constant_count = p_module->push_constant_block_count;\n  if (*p_push_constant_count == 0) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n  *p_push_constants = (uint32_t*)calloc(*p_push_constant_count, sizeof(**p_push_constants));\n\n  if (IsNull(*p_push_constants)) {\n    return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n  }\n\n  for (size_t i = 0; i < *p_push_constant_count; ++i) {\n    (*p_push_constants)[i] = p_module->push_constant_blocks[i].spirv_id;\n  }\n  qsort(*p_push_constants, *p_push_constant_count, sizeof(**p_push_constants), SortCompareUint32);\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult TraverseCallGraph(SpvReflectPrvParser* p_parser, SpvReflectPrvFunction* p_func, size_t* p_func_count,\n                                          uint32_t* p_func_ids, uint32_t depth) {\n  if (depth > p_parser->function_count) {\n    // Vulkan does not permit recursion (Vulkan spec Appendix A):\n    //   \"Recursion: The static function-call graph for an entry point must not\n    //    contain cycles.\"\n    return SPV_REFLECT_RESULT_ERROR_SPIRV_RECURSION;\n  }\n  if (IsNotNull(p_func_ids)) {\n    p_func_ids[(*p_func_count)++] = p_func->id;\n  } else {\n    ++*p_func_count;\n  }\n  for (size_t i = 0; i < p_func->callee_count; ++i) {\n    SpvReflectResult result = TraverseCallGraph(p_parser, p_func->callee_ptrs[i], p_func_count, p_func_ids, depth + 1);\n    if (result != SPV_REFLECT_RESULT_SUCCESS) {\n      return result;\n    }\n  }\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic uint32_t GetUint32Constant(SpvReflectPrvParser* p_parser, uint32_t id) {\n  uint32_t result = (uint32_t)INVALID_VALUE;\n  SpvReflectPrvNode* p_node = FindNode(p_parser, id);\n  if (p_node && p_node->op == SpvOpConstant) {\n    UNCHECKED_READU32(p_parser, p_node->word_offset + 3, result);\n  }\n  return result;\n}\n\nstatic bool HasByteAddressBufferOffset(SpvReflectPrvNode* p_node, SpvReflectDescriptorBinding* p_binding) {\n  return IsNotNull(p_node) && IsNotNull(p_binding) && p_node->op == SpvOpAccessChain && p_node->word_count == 6 &&\n         (p_binding->user_type == SPV_REFLECT_USER_TYPE_BYTE_ADDRESS_BUFFER ||\n          p_binding->user_type == SPV_REFLECT_USER_TYPE_RW_BYTE_ADDRESS_BUFFER);\n}\n\nstatic SpvReflectResult ParseByteAddressBuffer(SpvReflectPrvParser* p_parser, SpvReflectPrvNode* p_node,\n                                               SpvReflectDescriptorBinding* p_binding) {\n  const SpvReflectResult not_found = SPV_REFLECT_RESULT_SUCCESS;\n  if (!HasByteAddressBufferOffset(p_node, p_binding)) {\n    return not_found;\n  }\n\n  uint32_t offset = 0;  // starting offset\n\n  uint32_t base_id = 0;\n  // expect first index of 2D access is zero\n  UNCHECKED_READU32(p_parser, p_node->word_offset + 4, base_id);\n  if (GetUint32Constant(p_parser, base_id) != 0) {\n    return not_found;\n  }\n  UNCHECKED_READU32(p_parser, p_node->word_offset + 5, base_id);\n  SpvReflectPrvNode* p_next_node = FindNode(p_parser, base_id);\n  if (IsNull(p_next_node)) {\n    return not_found;\n  } else if (p_next_node->op == SpvOpConstant) {\n    // The access chain might just be a constant right to the offset\n    offset = GetUint32Constant(p_parser, base_id);\n    p_binding->byte_address_buffer_offsets[p_binding->byte_address_buffer_offset_count] = offset;\n    p_binding->byte_address_buffer_offset_count++;\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  // there is usually 2 (sometimes 3) instrucitons that make up the arithmetic logic to calculate the offset\n  SpvReflectPrvNode* arithmetic_node_stack[8];\n  uint32_t arithmetic_count = 0;\n\n  while (IsNotNull(p_next_node)) {\n    if (p_next_node->op == SpvOpLoad || p_next_node->op == SpvOpBitcast || p_next_node->op == SpvOpConstant) {\n      break;  // arithmetic starts here\n    }\n    arithmetic_node_stack[arithmetic_count++] = p_next_node;\n    if (arithmetic_count >= 8) {\n      return not_found;\n    }\n\n    UNCHECKED_READU32(p_parser, p_next_node->word_offset + 3, base_id);\n    p_next_node = FindNode(p_parser, base_id);\n  }\n\n  const uint32_t count = arithmetic_count;\n  for (uint32_t i = 0; i < count; i++) {\n    p_next_node = arithmetic_node_stack[--arithmetic_count];\n    // All arithmetic ops takes 2 operands, assumption is the 2nd operand has the constant\n    UNCHECKED_READU32(p_parser, p_next_node->word_offset + 4, base_id);\n    uint32_t value = GetUint32Constant(p_parser, base_id);\n    if (value == INVALID_VALUE) {\n      return not_found;\n    }\n\n    switch (p_next_node->op) {\n      case SpvOpShiftRightLogical:\n        offset >>= value;\n        break;\n      case SpvOpIAdd:\n        offset += value;\n        break;\n      case SpvOpISub:\n        offset -= value;\n        break;\n      case SpvOpIMul:\n        offset *= value;\n        break;\n      case SpvOpUDiv:\n        offset /= value;\n        break;\n      case SpvOpSDiv:\n        // OpConstant might be signed, but value should never be negative\n        assert((int32_t)value > 0);\n        offset /= value;\n        break;\n      default:\n        return not_found;\n    }\n  }\n\n  p_binding->byte_address_buffer_offsets[p_binding->byte_address_buffer_offset_count] = offset;\n  p_binding->byte_address_buffer_offset_count++;\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseStaticallyUsedResources(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module,\n                                                     SpvReflectEntryPoint* p_entry, size_t uniform_count, uint32_t* uniforms,\n                                                     size_t push_constant_count, uint32_t* push_constants) {\n  // Find function with the right id\n  SpvReflectPrvFunction* p_func = NULL;\n  for (size_t i = 0; i < p_parser->function_count; ++i) {\n    if (p_parser->functions[i].id == p_entry->id) {\n      p_func = &(p_parser->functions[i]);\n      break;\n    }\n  }\n  if (p_func == NULL) {\n    return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n  }\n\n  size_t called_function_count = 0;\n  SpvReflectResult result = TraverseCallGraph(p_parser, p_func, &called_function_count, NULL, 0);\n  if (result != SPV_REFLECT_RESULT_SUCCESS) {\n    return result;\n  }\n\n  uint32_t* p_called_functions = NULL;\n  if (called_function_count > 0) {\n    p_called_functions = (uint32_t*)calloc(called_function_count, sizeof(*p_called_functions));\n    if (IsNull(p_called_functions)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n  }\n\n  called_function_count = 0;\n  result = TraverseCallGraph(p_parser, p_func, &called_function_count, p_called_functions, 0);\n  if (result != SPV_REFLECT_RESULT_SUCCESS) {\n    SafeFree(p_called_functions);\n    return result;\n  }\n\n  if (called_function_count > 0) {\n    qsort(p_called_functions, called_function_count, sizeof(*p_called_functions), SortCompareUint32);\n  }\n  called_function_count = DedupSortedUint32(p_called_functions, called_function_count);\n\n  uint32_t used_acessed_count = 0;\n  for (size_t i = 0, j = 0; i < called_function_count; ++i) {\n    // No need to bounds check j because a missing ID issue would have been\n    // found during TraverseCallGraph\n    while (p_parser->functions[j].id != p_called_functions[i]) {\n      ++j;\n    }\n    used_acessed_count += p_parser->functions[j].accessed_variable_count;\n  }\n  SpvReflectPrvAccessedVariable* p_used_accesses = NULL;\n  if (used_acessed_count > 0) {\n    p_used_accesses = (SpvReflectPrvAccessedVariable*)calloc(used_acessed_count, sizeof(SpvReflectPrvAccessedVariable));\n    if (IsNull(p_used_accesses)) {\n      SafeFree(p_called_functions);\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n  }\n  used_acessed_count = 0;\n  for (size_t i = 0, j = 0; i < called_function_count; ++i) {\n    while (p_parser->functions[j].id != p_called_functions[i]) {\n      ++j;\n    }\n\n    memcpy(&p_used_accesses[used_acessed_count], p_parser->functions[j].accessed_variables,\n           p_parser->functions[j].accessed_variable_count * sizeof(SpvReflectPrvAccessedVariable));\n    used_acessed_count += p_parser->functions[j].accessed_variable_count;\n  }\n  SafeFree(p_called_functions);\n\n  if (used_acessed_count > 0) {\n    qsort(p_used_accesses, used_acessed_count, sizeof(*p_used_accesses), SortCompareAccessedVariable);\n  }\n\n  // Do set intersection to find the used uniform and push constants\n  size_t used_uniform_count = 0;\n  result = IntersectSortedAccessedVariable(p_used_accesses, used_acessed_count, uniforms, uniform_count, &p_entry->used_uniforms,\n                                           &used_uniform_count);\n  if (result != SPV_REFLECT_RESULT_SUCCESS) {\n    SafeFree(p_used_accesses);\n    return result;\n  }\n\n  size_t used_push_constant_count = 0;\n  result = IntersectSortedAccessedVariable(p_used_accesses, used_acessed_count, push_constants, push_constant_count,\n                                           &p_entry->used_push_constants, &used_push_constant_count);\n  if (result != SPV_REFLECT_RESULT_SUCCESS) {\n    SafeFree(p_used_accesses);\n    return result;\n  }\n\n  for (uint32_t i = 0; i < p_module->descriptor_binding_count; ++i) {\n    SpvReflectDescriptorBinding* p_binding = &p_module->descriptor_bindings[i];\n    uint32_t byte_address_buffer_offset_count = 0;\n\n    for (uint32_t j = 0; j < used_acessed_count; j++) {\n      if (p_used_accesses[j].variable_ptr == p_binding->spirv_id) {\n        p_binding->accessed = 1;\n\n        if (HasByteAddressBufferOffset(p_used_accesses[j].p_node, p_binding)) {\n          byte_address_buffer_offset_count++;\n        }\n      }\n    }\n\n    // only if SPIR-V has ByteAddressBuffer user type\n    if (byte_address_buffer_offset_count > 0) {\n      bool multi_entrypoint = p_binding->byte_address_buffer_offset_count > 0;\n      if (multi_entrypoint) {\n        // If there is a 2nd entrypoint, we can have multiple entry points, in this case we want to just combine the accessed\n        // offsets and then de-duplicate it\n        uint32_t* prev_byte_address_buffer_offsets = p_binding->byte_address_buffer_offsets;\n        p_binding->byte_address_buffer_offsets =\n            (uint32_t*)calloc(byte_address_buffer_offset_count + p_binding->byte_address_buffer_offset_count, sizeof(uint32_t));\n        memcpy(p_binding->byte_address_buffer_offsets, prev_byte_address_buffer_offsets,\n               sizeof(uint32_t) * p_binding->byte_address_buffer_offset_count);\n        SafeFree(prev_byte_address_buffer_offsets);\n      } else {\n        // possible not all allocated offset slots are used, but this will be a max per binding\n        p_binding->byte_address_buffer_offsets = (uint32_t*)calloc(byte_address_buffer_offset_count, sizeof(uint32_t));\n      }\n\n      if (IsNull(p_binding->byte_address_buffer_offsets)) {\n        SafeFree(p_used_accesses);\n        return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n      }\n\n      for (uint32_t j = 0; j < used_acessed_count; j++) {\n        if (p_used_accesses[j].variable_ptr == p_binding->spirv_id) {\n          result = ParseByteAddressBuffer(p_parser, p_used_accesses[j].p_node, p_binding);\n          if (result != SPV_REFLECT_RESULT_SUCCESS) {\n            SafeFree(p_used_accesses);\n            return result;\n          }\n        }\n      }\n\n      if (multi_entrypoint) {\n        qsort(p_binding->byte_address_buffer_offsets, p_binding->byte_address_buffer_offset_count,\n              sizeof(*(p_binding->byte_address_buffer_offsets)), SortCompareUint32);\n        p_binding->byte_address_buffer_offset_count =\n            (uint32_t)DedupSortedUint32(p_binding->byte_address_buffer_offsets, p_binding->byte_address_buffer_offset_count);\n      }\n    }\n  }\n\n  SafeFree(p_used_accesses);\n\n  p_entry->used_uniform_count = (uint32_t)used_uniform_count;\n  p_entry->used_push_constant_count = (uint32_t)used_push_constant_count;\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseEntryPoints(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) {\n  if (p_parser->entry_point_count == 0) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  p_module->entry_point_count = p_parser->entry_point_count;\n  p_module->entry_points = (SpvReflectEntryPoint*)calloc(p_module->entry_point_count, sizeof(*(p_module->entry_points)));\n  if (IsNull(p_module->entry_points)) {\n    return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n  }\n\n  SpvReflectResult result;\n  size_t uniform_count = 0;\n  uint32_t* uniforms = NULL;\n  if ((result = EnumerateAllUniforms(p_module, &uniform_count, &uniforms)) != SPV_REFLECT_RESULT_SUCCESS) {\n    return result;\n  }\n  size_t push_constant_count = 0;\n  uint32_t* push_constants = NULL;\n  if ((result = EnumerateAllPushConstants(p_module, &push_constant_count, &push_constants)) != SPV_REFLECT_RESULT_SUCCESS) {\n    return result;\n  }\n\n  size_t entry_point_index = 0;\n  for (size_t i = 0; entry_point_index < p_parser->entry_point_count && i < p_parser->node_count; ++i) {\n    SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n    if (p_node->op != SpvOpEntryPoint) {\n      continue;\n    }\n\n    SpvReflectEntryPoint* p_entry_point = &(p_module->entry_points[entry_point_index]);\n    CHECKED_READU32_CAST(p_parser, p_node->word_offset + 1, SpvExecutionModel, p_entry_point->spirv_execution_model);\n    CHECKED_READU32(p_parser, p_node->word_offset + 2, p_entry_point->id);\n\n    switch (p_entry_point->spirv_execution_model) {\n      default:\n        break;\n      case SpvExecutionModelVertex:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_VERTEX_BIT;\n        break;\n      case SpvExecutionModelTessellationControl:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_TESSELLATION_CONTROL_BIT;\n        break;\n      case SpvExecutionModelTessellationEvaluation:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;\n        break;\n      case SpvExecutionModelGeometry:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_GEOMETRY_BIT;\n        break;\n      case SpvExecutionModelFragment:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_FRAGMENT_BIT;\n        break;\n      case SpvExecutionModelGLCompute:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_COMPUTE_BIT;\n        break;\n      case SpvExecutionModelTaskNV:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_TASK_BIT_NV;\n        break;\n      case SpvExecutionModelTaskEXT:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_TASK_BIT_EXT;\n        break;\n      case SpvExecutionModelMeshNV:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_MESH_BIT_NV;\n        break;\n      case SpvExecutionModelMeshEXT:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_MESH_BIT_EXT;\n        break;\n      case SpvExecutionModelRayGenerationKHR:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_RAYGEN_BIT_KHR;\n        break;\n      case SpvExecutionModelIntersectionKHR:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_INTERSECTION_BIT_KHR;\n        break;\n      case SpvExecutionModelAnyHitKHR:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_ANY_HIT_BIT_KHR;\n        break;\n      case SpvExecutionModelClosestHitKHR:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_CLOSEST_HIT_BIT_KHR;\n        break;\n      case SpvExecutionModelMissKHR:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_MISS_BIT_KHR;\n        break;\n      case SpvExecutionModelCallableKHR:\n        p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_CALLABLE_BIT_KHR;\n        break;\n    }\n\n    ++entry_point_index;\n\n    // Name length is required to calculate next operand\n    uint32_t name_start_word_offset = 3;\n    uint32_t name_length_with_terminator = 0;\n    result =\n        ReadStr(p_parser, p_node->word_offset + name_start_word_offset, 0, p_node->word_count, &name_length_with_terminator, NULL);\n    if (result != SPV_REFLECT_RESULT_SUCCESS) {\n      return result;\n    }\n    p_entry_point->name = (const char*)(p_parser->spirv_code + p_node->word_offset + name_start_word_offset);\n\n    uint32_t name_word_count = RoundUp(name_length_with_terminator, SPIRV_WORD_SIZE) / SPIRV_WORD_SIZE;\n    uint32_t interface_variable_count = (p_node->word_count - (name_start_word_offset + name_word_count));\n    uint32_t* p_interface_variables = NULL;\n    if (interface_variable_count > 0) {\n      p_interface_variables = (uint32_t*)calloc(interface_variable_count, sizeof(*(p_interface_variables)));\n      if (IsNull(p_interface_variables)) {\n        return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n      }\n    }\n\n    for (uint32_t var_index = 0; var_index < interface_variable_count; ++var_index) {\n      uint32_t var_result_id = (uint32_t)INVALID_VALUE;\n      uint32_t offset = name_start_word_offset + name_word_count + var_index;\n      CHECKED_READU32(p_parser, p_node->word_offset + offset, var_result_id);\n      p_interface_variables[var_index] = var_result_id;\n    }\n\n    result = ParseInterfaceVariables(p_parser, p_module, p_entry_point, interface_variable_count, p_interface_variables);\n    if (result != SPV_REFLECT_RESULT_SUCCESS) {\n      return result;\n    }\n    SafeFree(p_interface_variables);\n\n    result = ParseStaticallyUsedResources(p_parser, p_module, p_entry_point, uniform_count, uniforms, push_constant_count,\n                                          push_constants);\n    if (result != SPV_REFLECT_RESULT_SUCCESS) {\n      return result;\n    }\n  }\n\n  SafeFree(uniforms);\n  SafeFree(push_constants);\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseExecutionModes(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) {\n  assert(IsNotNull(p_parser));\n  assert(IsNotNull(p_parser->nodes));\n  assert(IsNotNull(p_module));\n\n  if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) {\n    for (size_t node_idx = 0; node_idx < p_parser->node_count; ++node_idx) {\n      SpvReflectPrvNode* p_node = &(p_parser->nodes[node_idx]);\n      if (p_node->op != SpvOpExecutionMode && p_node->op != SpvOpExecutionModeId) {\n        continue;\n      }\n\n      // Read entry point id\n      uint32_t entry_point_id = 0;\n      CHECKED_READU32(p_parser, p_node->word_offset + 1, entry_point_id);\n\n      // Find entry point\n      SpvReflectEntryPoint* p_entry_point = NULL;\n      for (size_t entry_point_idx = 0; entry_point_idx < p_module->entry_point_count; ++entry_point_idx) {\n        if (p_module->entry_points[entry_point_idx].id == entry_point_id) {\n          p_entry_point = &p_module->entry_points[entry_point_idx];\n          break;\n        }\n      }\n      // Bail if entry point is null\n      if (IsNull(p_entry_point)) {\n        return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ENTRY_POINT;\n      }\n\n      // Read execution mode\n      uint32_t execution_mode = (uint32_t)INVALID_VALUE;\n      CHECKED_READU32(p_parser, p_node->word_offset + 2, execution_mode);\n\n      // Parse execution mode\n      switch (execution_mode) {\n        case SpvExecutionModeInvocations: {\n          CHECKED_READU32(p_parser, p_node->word_offset + 3, p_entry_point->invocations);\n        } break;\n\n        case SpvExecutionModeLocalSize: {\n          CHECKED_READU32(p_parser, p_node->word_offset + 3, p_entry_point->local_size.x);\n          CHECKED_READU32(p_parser, p_node->word_offset + 4, p_entry_point->local_size.y);\n          CHECKED_READU32(p_parser, p_node->word_offset + 5, p_entry_point->local_size.z);\n        } break;\n        case SpvExecutionModeLocalSizeId: {\n          uint32_t local_size_x_id = 0;\n          uint32_t local_size_y_id = 0;\n          uint32_t local_size_z_id = 0;\n          CHECKED_READU32(p_parser, p_node->word_offset + 3, local_size_x_id);\n          CHECKED_READU32(p_parser, p_node->word_offset + 4, local_size_y_id);\n          CHECKED_READU32(p_parser, p_node->word_offset + 5, local_size_z_id);\n\n          SpvReflectPrvNode* x_node = FindNode(p_parser, local_size_x_id);\n          SpvReflectPrvNode* y_node = FindNode(p_parser, local_size_y_id);\n          SpvReflectPrvNode* z_node = FindNode(p_parser, local_size_z_id);\n          if (IsNotNull(x_node) && IsNotNull(y_node) && IsNotNull(z_node)) {\n            if (IsSpecConstant(x_node)) {\n              p_entry_point->local_size.x = (uint32_t)SPV_REFLECT_EXECUTION_MODE_SPEC_CONSTANT;\n            } else {\n              CHECKED_READU32(p_parser, x_node->word_offset + 3, p_entry_point->local_size.x);\n            }\n\n            if (IsSpecConstant(y_node)) {\n              p_entry_point->local_size.y = (uint32_t)SPV_REFLECT_EXECUTION_MODE_SPEC_CONSTANT;\n            } else {\n              CHECKED_READU32(p_parser, y_node->word_offset + 3, p_entry_point->local_size.y);\n            }\n\n            if (IsSpecConstant(z_node)) {\n              p_entry_point->local_size.z = (uint32_t)SPV_REFLECT_EXECUTION_MODE_SPEC_CONSTANT;\n            } else {\n              CHECKED_READU32(p_parser, z_node->word_offset + 3, p_entry_point->local_size.z);\n            }\n          }\n        } break;\n\n        case SpvExecutionModeInputPoints:\n        case SpvExecutionModeInputLines:\n        case SpvExecutionModeInputLinesAdjacency:\n        case SpvExecutionModeTriangles:\n        case SpvExecutionModeInputTrianglesAdjacency:\n        case SpvExecutionModeQuads:\n        case SpvExecutionModeIsolines:\n        case SpvExecutionModeOutputVertices: {\n          CHECKED_READU32(p_parser, p_node->word_offset + 3, p_entry_point->output_vertices);\n        } break;\n\n        default:\n          break;\n      }\n      p_entry_point->execution_mode_count++;\n    }\n    uint32_t* indices = (uint32_t*)calloc(p_module->entry_point_count, sizeof(indices));\n    if (IsNull(indices)) {\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n    for (size_t entry_point_idx = 0; entry_point_idx < p_module->entry_point_count; ++entry_point_idx) {\n      SpvReflectEntryPoint* p_entry_point = &p_module->entry_points[entry_point_idx];\n      if (p_entry_point->execution_mode_count > 0) {\n        p_entry_point->execution_modes =\n            (SpvExecutionMode*)calloc(p_entry_point->execution_mode_count, sizeof(*p_entry_point->execution_modes));\n        if (IsNull(p_entry_point->execution_modes)) {\n          SafeFree(indices);\n          return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n        }\n      }\n    }\n\n    for (size_t node_idx = 0; node_idx < p_parser->node_count; ++node_idx) {\n      SpvReflectPrvNode* p_node = &(p_parser->nodes[node_idx]);\n      if (p_node->op != SpvOpExecutionMode) {\n        continue;\n      }\n\n      // Read entry point id\n      uint32_t entry_point_id = 0;\n      CHECKED_READU32(p_parser, p_node->word_offset + 1, entry_point_id);\n\n      // Find entry point\n      SpvReflectEntryPoint* p_entry_point = NULL;\n      uint32_t* idx = NULL;\n      for (size_t entry_point_idx = 0; entry_point_idx < p_module->entry_point_count; ++entry_point_idx) {\n        if (p_module->entry_points[entry_point_idx].id == entry_point_id) {\n          p_entry_point = &p_module->entry_points[entry_point_idx];\n          idx = &indices[entry_point_idx];\n          break;\n        }\n      }\n\n      // Read execution mode\n      uint32_t execution_mode = (uint32_t)INVALID_VALUE;\n      CHECKED_READU32(p_parser, p_node->word_offset + 2, execution_mode);\n      p_entry_point->execution_modes[(*idx)++] = (SpvExecutionMode)execution_mode;\n    }\n    SafeFree(indices);\n  }\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParsePushConstantBlocks(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) {\n  for (size_t i = 0; i < p_parser->node_count; ++i) {\n    SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n    if ((p_node->op != SpvOpVariable) || (p_node->storage_class != SpvStorageClassPushConstant)) {\n      continue;\n    }\n\n    p_module->push_constant_block_count += 1;\n  }\n\n  if (p_module->push_constant_block_count == 0) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  p_module->push_constant_blocks =\n      (SpvReflectBlockVariable*)calloc(p_module->push_constant_block_count, sizeof(*p_module->push_constant_blocks));\n  if (IsNull(p_module->push_constant_blocks)) {\n    return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n  }\n\n  p_parser->physical_pointer_struct_count = 0;\n  uint32_t push_constant_index = 0;\n  for (size_t i = 0; i < p_parser->node_count; ++i) {\n    SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);\n    if ((p_node->op != SpvOpVariable) || (p_node->storage_class != SpvStorageClassPushConstant)) {\n      continue;\n    }\n\n    SpvReflectTypeDescription* p_type = FindType(p_module, p_node->type_id);\n    if (IsNull(p_node) || IsNull(p_type)) {\n      return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n    }\n    // If the type is a pointer, resolve it\n    if (p_type->op == SpvOpTypePointer) {\n      // Find the type's node\n      SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);\n      if (IsNull(p_type_node)) {\n        return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n      }\n      // Should be the resolved type\n      p_type = FindType(p_module, p_type_node->type_id);\n      if (IsNull(p_type)) {\n        return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n      }\n    }\n\n    SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);\n    if (IsNull(p_type_node)) {\n      return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;\n    }\n\n    SpvReflectBlockVariable* p_push_constant = &p_module->push_constant_blocks[push_constant_index];\n    p_push_constant->spirv_id = p_node->result_id;\n    p_parser->physical_pointer_count = 0;\n    SpvReflectResult result = ParseDescriptorBlockVariable(p_parser, p_module, p_type, p_push_constant);\n    if (result != SPV_REFLECT_RESULT_SUCCESS) {\n      return result;\n    }\n\n    for (uint32_t access_chain_index = 0; access_chain_index < p_parser->access_chain_count; ++access_chain_index) {\n      SpvReflectPrvAccessChain* p_access_chain = &(p_parser->access_chains[access_chain_index]);\n      // Skip any access chains that aren't touching this push constant block\n      if (p_push_constant->spirv_id != FindBaseId(p_parser, p_access_chain)) {\n        continue;\n      }\n      SpvReflectBlockVariable* p_var =\n          (p_access_chain->base_id == p_push_constant->spirv_id) ? p_push_constant : GetRefBlkVar(p_parser, p_access_chain);\n      result = ParseDescriptorBlockVariableUsage(p_parser, p_module, p_access_chain, 0, (SpvOp)INVALID_VALUE, p_var);\n      if (result != SPV_REFLECT_RESULT_SUCCESS) {\n        return result;\n      }\n    }\n\n    p_push_constant->name = p_node->name;\n    result = ParseDescriptorBlockVariableSizes(p_parser, p_module, true, false, false, p_push_constant);\n    if (result != SPV_REFLECT_RESULT_SUCCESS) {\n      return result;\n    }\n\n    // Get minimum offset for whole Push Constant block\n    // It is not valid SPIR-V to have an empty Push Constant Block\n    p_push_constant->offset = UINT32_MAX;\n    for (uint32_t k = 0; k < p_push_constant->member_count; ++k) {\n      const uint32_t member_offset = p_push_constant->members[k].offset;\n      p_push_constant->offset = Min(p_push_constant->offset, member_offset);\n    }\n\n    ++push_constant_index;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic int SortCompareDescriptorSet(const void* a, const void* b) {\n  const SpvReflectDescriptorSet* p_elem_a = (const SpvReflectDescriptorSet*)a;\n  const SpvReflectDescriptorSet* p_elem_b = (const SpvReflectDescriptorSet*)b;\n  int value = (int)(p_elem_a->set) - (int)(p_elem_b->set);\n  // We should never see duplicate descriptor set numbers in a shader; if so, a tiebreaker\n  // would be needed here.\n  assert(value != 0);\n  return value;\n}\n\nstatic SpvReflectResult ParseEntrypointDescriptorSets(SpvReflectShaderModule* p_module) {\n  // Update the entry point's sets\n  for (uint32_t i = 0; i < p_module->entry_point_count; ++i) {\n    SpvReflectEntryPoint* p_entry = &p_module->entry_points[i];\n    for (uint32_t j = 0; j < p_entry->descriptor_set_count; ++j) {\n      SafeFree(p_entry->descriptor_sets[j].bindings);\n    }\n    SafeFree(p_entry->descriptor_sets);\n    p_entry->descriptor_set_count = 0;\n    for (uint32_t j = 0; j < p_module->descriptor_set_count; ++j) {\n      const SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[j];\n      for (uint32_t k = 0; k < p_set->binding_count; ++k) {\n        bool found = SearchSortedUint32(p_entry->used_uniforms, p_entry->used_uniform_count, p_set->bindings[k]->spirv_id);\n        if (found) {\n          ++p_entry->descriptor_set_count;\n          break;\n        }\n      }\n    }\n\n    p_entry->descriptor_sets = NULL;\n    if (p_entry->descriptor_set_count > 0) {\n      p_entry->descriptor_sets = (SpvReflectDescriptorSet*)calloc(p_entry->descriptor_set_count, sizeof(*p_entry->descriptor_sets));\n      if (IsNull(p_entry->descriptor_sets)) {\n        return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n      }\n    }\n    p_entry->descriptor_set_count = 0;\n    for (uint32_t j = 0; j < p_module->descriptor_set_count; ++j) {\n      const SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[j];\n      uint32_t count = 0;\n      for (uint32_t k = 0; k < p_set->binding_count; ++k) {\n        bool found = SearchSortedUint32(p_entry->used_uniforms, p_entry->used_uniform_count, p_set->bindings[k]->spirv_id);\n        if (found) {\n          ++count;\n        }\n      }\n      if (count == 0) {\n        continue;\n      }\n      SpvReflectDescriptorSet* p_entry_set = &p_entry->descriptor_sets[p_entry->descriptor_set_count++];\n      p_entry_set->set = p_set->set;\n      p_entry_set->bindings = (SpvReflectDescriptorBinding**)calloc(count, sizeof(*p_entry_set->bindings));\n      if (IsNull(p_entry_set->bindings)) {\n        return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n      }\n      for (uint32_t k = 0; k < p_set->binding_count; ++k) {\n        bool found = SearchSortedUint32(p_entry->used_uniforms, p_entry->used_uniform_count, p_set->bindings[k]->spirv_id);\n        if (found) {\n          p_entry_set->bindings[p_entry_set->binding_count++] = p_set->bindings[k];\n        }\n      }\n    }\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult ParseDescriptorSets(SpvReflectShaderModule* p_module) {\n  // Count the descriptors in each set\n  for (uint32_t i = 0; i < p_module->descriptor_binding_count; ++i) {\n    SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[i]);\n\n    // Look for a target set using the descriptor's set number\n    SpvReflectDescriptorSet* p_target_set = NULL;\n    for (uint32_t j = 0; j < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++j) {\n      SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[j];\n      if (p_set->set == p_descriptor->set) {\n        p_target_set = p_set;\n        break;\n      }\n    }\n\n    // If a target set isn't found, find the first available one.\n    if (IsNull(p_target_set)) {\n      for (uint32_t j = 0; j < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++j) {\n        SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[j];\n        if (p_set->set == (uint32_t)INVALID_VALUE) {\n          p_target_set = p_set;\n          p_target_set->set = p_descriptor->set;\n          break;\n        }\n      }\n    }\n\n    if (IsNull(p_target_set)) {\n      return SPV_REFLECT_RESULT_ERROR_INTERNAL_ERROR;\n    }\n\n    p_target_set->binding_count += 1;\n  }\n\n  // Count the descriptor sets\n  for (uint32_t i = 0; i < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++i) {\n    const SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[i];\n    if (p_set->set != (uint32_t)INVALID_VALUE) {\n      p_module->descriptor_set_count += 1;\n    }\n  }\n\n  // Sort the descriptor sets based on numbers\n  if (p_module->descriptor_set_count > 0) {\n    qsort(p_module->descriptor_sets, p_module->descriptor_set_count, sizeof(*(p_module->descriptor_sets)),\n          SortCompareDescriptorSet);\n  }\n\n  // Build descriptor pointer array\n  for (uint32_t i = 0; i < p_module->descriptor_set_count; ++i) {\n    SpvReflectDescriptorSet* p_set = &(p_module->descriptor_sets[i]);\n    p_set->bindings = (SpvReflectDescriptorBinding**)calloc(p_set->binding_count, sizeof(*(p_set->bindings)));\n\n    uint32_t descriptor_index = 0;\n    for (uint32_t j = 0; j < p_module->descriptor_binding_count; ++j) {\n      SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[j]);\n      if (p_descriptor->set == p_set->set) {\n        assert(descriptor_index < p_set->binding_count);\n        p_set->bindings[descriptor_index] = p_descriptor;\n        ++descriptor_index;\n      }\n    }\n  }\n\n  return ParseEntrypointDescriptorSets(p_module);\n}\n\nstatic SpvReflectResult DisambiguateStorageBufferSrvUav(SpvReflectShaderModule* p_module) {\n  if (p_module->descriptor_binding_count == 0) {\n    return SPV_REFLECT_RESULT_SUCCESS;\n  }\n\n  for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) {\n    SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]);\n    // Skip everything that isn't a STORAGE_BUFFER descriptor\n    if (p_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER) {\n      continue;\n    }\n\n    //\n    // Vulkan doesn't disambiguate between SRVs and UAVs so they\n    // come back as STORAGE_BUFFER. The block parsing process will\n    // mark a block as non-writable should any member of the block\n    // or its descendants are non-writable.\n    //\n    if (p_descriptor->block.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE) {\n      p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SRV;\n    }\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nstatic SpvReflectResult SynchronizeDescriptorSets(SpvReflectShaderModule* p_module) {\n  // Free and reset all descriptor set numbers\n  for (uint32_t i = 0; i < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++i) {\n    SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[i];\n    SafeFree(p_set->bindings);\n    p_set->binding_count = 0;\n    p_set->set = (uint32_t)INVALID_VALUE;\n  }\n  // Set descriptor set count to zero\n  p_module->descriptor_set_count = 0;\n\n  SpvReflectResult result = ParseDescriptorSets(p_module);\n  return result;\n}\n\nstatic SpvReflectResult CreateShaderModule(uint32_t flags, size_t size, const void* p_code, SpvReflectShaderModule* p_module) {\n  // Initialize all module fields to zero\n  memset(p_module, 0, sizeof(*p_module));\n\n  // Allocate module internals\n#ifdef __cplusplus\n  p_module->_internal = (SpvReflectShaderModule::Internal*)calloc(1, sizeof(*(p_module->_internal)));\n#else\n  p_module->_internal = calloc(1, sizeof(*(p_module->_internal)));\n#endif\n  if (IsNull(p_module->_internal)) {\n    return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n  }\n  // Copy flags\n  p_module->_internal->module_flags = flags;\n  // Figure out if we need to copy the SPIR-V code or not\n  if (flags & SPV_REFLECT_MODULE_FLAG_NO_COPY) {\n    // Set internal size and pointer to args passed in\n    p_module->_internal->spirv_size = size;\n#if defined(__cplusplus)\n    p_module->_internal->spirv_code = const_cast<uint32_t*>(static_cast<const uint32_t*>(p_code));  // cast that const away\n#else\n    p_module->_internal->spirv_code = (void*)p_code;  // cast that const away\n#endif\n    p_module->_internal->spirv_word_count = (uint32_t)(size / SPIRV_WORD_SIZE);\n  } else {\n    // Allocate SPIR-V code storage\n    p_module->_internal->spirv_size = size;\n    p_module->_internal->spirv_code = (uint32_t*)calloc(1, p_module->_internal->spirv_size);\n    p_module->_internal->spirv_word_count = (uint32_t)(size / SPIRV_WORD_SIZE);\n    if (IsNull(p_module->_internal->spirv_code)) {\n      SafeFree(p_module->_internal);\n      return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;\n    }\n    // Copy SPIR-V to code storage\n    memcpy(p_module->_internal->spirv_code, p_code, size);\n  }\n\n  // Initialize everything to zero\n  SpvReflectPrvParser parser;\n  memset(&parser, 0, sizeof(SpvReflectPrvParser));\n\n  // Create parser\n  SpvReflectResult result = CreateParser(p_module->_internal->spirv_size, p_module->_internal->spirv_code, &parser);\n\n  // Generator\n  {\n    const uint32_t* p_ptr = (const uint32_t*)p_module->_internal->spirv_code;\n    p_module->generator = (SpvReflectGenerator)((*(p_ptr + 2) & 0xFFFF0000) >> 16);\n  }\n\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseNodes(&parser);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseStrings(&parser);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseSource(&parser, p_module);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseFunctions(&parser);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseMemberCounts(&parser);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseNames(&parser);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseDecorations(&parser, p_module);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n\n  // Start of reflection data parsing\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    p_module->source_language = parser.source_language;\n    p_module->source_language_version = parser.source_language_version;\n\n    // Zero out descriptor set data\n    p_module->descriptor_set_count = 0;\n    memset(p_module->descriptor_sets, 0, SPV_REFLECT_MAX_DESCRIPTOR_SETS * sizeof(*p_module->descriptor_sets));\n    // Initialize descriptor set numbers\n    for (uint32_t set_number = 0; set_number < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++set_number) {\n      p_module->descriptor_sets[set_number].set = (uint32_t)INVALID_VALUE;\n    }\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseTypes(&parser, p_module);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseDescriptorBindings(&parser, p_module);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseDescriptorType(p_module);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseUAVCounterBindings(p_module);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseDescriptorBlocks(&parser, p_module);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParsePushConstantBlocks(&parser, p_module);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseEntryPoints(&parser, p_module);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseCapabilities(&parser, p_module);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS && p_module->entry_point_count > 0) {\n    SpvReflectEntryPoint* p_entry = &(p_module->entry_points[0]);\n    p_module->entry_point_name = p_entry->name;\n    p_module->entry_point_id = p_entry->id;\n    p_module->spirv_execution_model = p_entry->spirv_execution_model;\n    p_module->shader_stage = p_entry->shader_stage;\n    p_module->input_variable_count = p_entry->input_variable_count;\n    p_module->input_variables = p_entry->input_variables;\n    p_module->output_variable_count = p_entry->output_variable_count;\n    p_module->output_variables = p_entry->output_variables;\n    p_module->interface_variable_count = p_entry->interface_variable_count;\n    p_module->interface_variables = p_entry->interface_variables;\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = DisambiguateStorageBufferSrvUav(p_module);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = SynchronizeDescriptorSets(p_module);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n  if (result == SPV_REFLECT_RESULT_SUCCESS) {\n    result = ParseExecutionModes(&parser, p_module);\n    SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);\n  }\n\n  // Destroy module if parse was not successful\n  if (result != SPV_REFLECT_RESULT_SUCCESS) {\n    spvReflectDestroyShaderModule(p_module);\n  }\n\n  DestroyParser(&parser);\n\n  return result;\n}\n\nSpvReflectResult spvReflectCreateShaderModule(size_t size, const void* p_code, SpvReflectShaderModule* p_module) {\n  return CreateShaderModule(0, size, p_code, p_module);\n}\n\nSpvReflectResult spvReflectCreateShaderModule2(uint32_t flags, size_t size, const void* p_code, SpvReflectShaderModule* p_module) {\n  return CreateShaderModule(flags, size, p_code, p_module);\n}\n\nSpvReflectResult spvReflectGetShaderModule(size_t size, const void* p_code, SpvReflectShaderModule* p_module) {\n  return spvReflectCreateShaderModule(size, p_code, p_module);\n}\n\nstatic void SafeFreeTypes(SpvReflectTypeDescription* p_type) {\n  if (IsNull(p_type) || p_type->copied) {\n    return;\n  }\n\n  if (IsNotNull(p_type->members)) {\n    for (size_t i = 0; i < p_type->member_count; ++i) {\n      SpvReflectTypeDescription* p_member = &p_type->members[i];\n      SafeFreeTypes(p_member);\n    }\n\n    SafeFree(p_type->members);\n    p_type->members = NULL;\n  }\n}\n\nstatic void SafeFreeBlockVariables(SpvReflectBlockVariable* p_block) {\n  if (IsNull(p_block)) {\n    return;\n  }\n\n  // We share pointers to Physical Pointer structs and don't want to double free\n  if (p_block->flags & SPV_REFLECT_VARIABLE_FLAGS_PHYSICAL_POINTER_COPY) {\n    return;\n  }\n\n  if (IsNotNull(p_block->members)) {\n    for (size_t i = 0; i < p_block->member_count; ++i) {\n      SpvReflectBlockVariable* p_member = &p_block->members[i];\n      SafeFreeBlockVariables(p_member);\n    }\n\n    SafeFree(p_block->members);\n    p_block->members = NULL;\n  }\n}\n\nstatic void SafeFreeInterfaceVariable(SpvReflectInterfaceVariable* p_interface) {\n  if (IsNull(p_interface)) {\n    return;\n  }\n\n  if (IsNotNull(p_interface->members)) {\n    for (size_t i = 0; i < p_interface->member_count; ++i) {\n      SpvReflectInterfaceVariable* p_member = &p_interface->members[i];\n      SafeFreeInterfaceVariable(p_member);\n    }\n\n    SafeFree(p_interface->members);\n    p_interface->members = NULL;\n  }\n}\n\nvoid spvReflectDestroyShaderModule(SpvReflectShaderModule* p_module) {\n  if (IsNull(p_module->_internal)) {\n    return;\n  }\n\n  SafeFree(p_module->source_source);\n\n  // Descriptor set bindings\n  for (size_t i = 0; i < p_module->descriptor_set_count; ++i) {\n    SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[i];\n    free(p_set->bindings);\n  }\n\n  // Descriptor binding blocks\n  for (size_t i = 0; i < p_module->descriptor_binding_count; ++i) {\n    SpvReflectDescriptorBinding* p_descriptor = &p_module->descriptor_bindings[i];\n    if (IsNotNull(p_descriptor->byte_address_buffer_offsets)) {\n      SafeFree(p_descriptor->byte_address_buffer_offsets);\n    }\n    SafeFreeBlockVariables(&p_descriptor->block);\n  }\n  SafeFree(p_module->descriptor_bindings);\n\n  // Entry points\n  for (size_t i = 0; i < p_module->entry_point_count; ++i) {\n    SpvReflectEntryPoint* p_entry = &p_module->entry_points[i];\n    for (size_t j = 0; j < p_entry->interface_variable_count; j++) {\n      SafeFreeInterfaceVariable(&p_entry->interface_variables[j]);\n    }\n    for (uint32_t j = 0; j < p_entry->descriptor_set_count; ++j) {\n      SafeFree(p_entry->descriptor_sets[j].bindings);\n    }\n    SafeFree(p_entry->descriptor_sets);\n    SafeFree(p_entry->input_variables);\n    SafeFree(p_entry->output_variables);\n    SafeFree(p_entry->interface_variables);\n    SafeFree(p_entry->used_uniforms);\n    SafeFree(p_entry->used_push_constants);\n    SafeFree(p_entry->execution_modes);\n  }\n  SafeFree(p_module->capabilities);\n  SafeFree(p_module->entry_points);\n  SafeFree(p_module->spec_constants);\n\n  // Push constants\n  for (size_t i = 0; i < p_module->push_constant_block_count; ++i) {\n    SafeFreeBlockVariables(&p_module->push_constant_blocks[i]);\n  }\n  SafeFree(p_module->push_constant_blocks);\n\n  // Type infos\n  for (size_t i = 0; i < p_module->_internal->type_description_count; ++i) {\n    SpvReflectTypeDescription* p_type = &p_module->_internal->type_descriptions[i];\n    if (IsNotNull(p_type->members)) {\n      SafeFreeTypes(p_type);\n    }\n    SafeFree(p_type->members);\n  }\n  SafeFree(p_module->_internal->type_descriptions);\n\n  // Free SPIR-V code if there was a copy\n  if ((p_module->_internal->module_flags & SPV_REFLECT_MODULE_FLAG_NO_COPY) == 0) {\n    SafeFree(p_module->_internal->spirv_code);\n  }\n  // Free internal\n  SafeFree(p_module->_internal);\n}\n\nuint32_t spvReflectGetCodeSize(const SpvReflectShaderModule* p_module) {\n  if (IsNull(p_module)) {\n    return 0;\n  }\n\n  return (uint32_t)(p_module->_internal->spirv_size);\n}\n\nconst uint32_t* spvReflectGetCode(const SpvReflectShaderModule* p_module) {\n  if (IsNull(p_module)) {\n    return NULL;\n  }\n\n  return p_module->_internal->spirv_code;\n}\n\nconst SpvReflectEntryPoint* spvReflectGetEntryPoint(const SpvReflectShaderModule* p_module, const char* entry_point) {\n  if (IsNull(p_module) || IsNull(entry_point)) {\n    return NULL;\n  }\n\n  for (uint32_t i = 0; i < p_module->entry_point_count; ++i) {\n    if (strcmp(p_module->entry_points[i].name, entry_point) == 0) {\n      return &p_module->entry_points[i];\n    }\n  }\n  return NULL;\n}\n\nSpvReflectResult spvReflectEnumerateDescriptorBindings(const SpvReflectShaderModule* p_module, uint32_t* p_count,\n                                                       SpvReflectDescriptorBinding** pp_bindings) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_count)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  if (IsNotNull(pp_bindings)) {\n    if (*p_count != p_module->descriptor_binding_count) {\n      return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n    }\n\n    for (uint32_t index = 0; index < *p_count; ++index) {\n      SpvReflectDescriptorBinding* p_bindings = (SpvReflectDescriptorBinding*)&p_module->descriptor_bindings[index];\n      pp_bindings[index] = p_bindings;\n    }\n  } else {\n    *p_count = p_module->descriptor_binding_count;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nSpvReflectResult spvReflectEnumerateEntryPointDescriptorBindings(const SpvReflectShaderModule* p_module, const char* entry_point,\n                                                                 uint32_t* p_count, SpvReflectDescriptorBinding** pp_bindings) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_count)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point);\n  if (IsNull(p_entry)) {\n    return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n  }\n\n  uint32_t count = 0;\n  for (uint32_t i = 0; i < p_module->descriptor_binding_count; ++i) {\n    bool found = SearchSortedUint32(p_entry->used_uniforms, p_entry->used_uniform_count, p_module->descriptor_bindings[i].spirv_id);\n    if (found) {\n      if (IsNotNull(pp_bindings)) {\n        if (count >= *p_count) {\n          return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n        }\n        pp_bindings[count++] = (SpvReflectDescriptorBinding*)&p_module->descriptor_bindings[i];\n      } else {\n        ++count;\n      }\n    }\n  }\n  if (IsNotNull(pp_bindings)) {\n    if (count != *p_count) {\n      return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n    }\n  } else {\n    *p_count = count;\n  }\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nSpvReflectResult spvReflectEnumerateDescriptorSets(const SpvReflectShaderModule* p_module, uint32_t* p_count,\n                                                   SpvReflectDescriptorSet** pp_sets) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_count)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  if (IsNotNull(pp_sets)) {\n    if (*p_count != p_module->descriptor_set_count) {\n      return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n    }\n\n    for (uint32_t index = 0; index < *p_count; ++index) {\n      SpvReflectDescriptorSet* p_set = (SpvReflectDescriptorSet*)&p_module->descriptor_sets[index];\n      pp_sets[index] = p_set;\n    }\n  } else {\n    *p_count = p_module->descriptor_set_count;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nSpvReflectResult spvReflectEnumerateEntryPointDescriptorSets(const SpvReflectShaderModule* p_module, const char* entry_point,\n                                                             uint32_t* p_count, SpvReflectDescriptorSet** pp_sets) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_count)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point);\n  if (IsNull(p_entry)) {\n    return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n  }\n\n  if (IsNotNull(pp_sets)) {\n    if (*p_count != p_entry->descriptor_set_count) {\n      return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n    }\n\n    for (uint32_t index = 0; index < *p_count; ++index) {\n      SpvReflectDescriptorSet* p_set = (SpvReflectDescriptorSet*)&p_entry->descriptor_sets[index];\n      pp_sets[index] = p_set;\n    }\n  } else {\n    *p_count = p_entry->descriptor_set_count;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nSpvReflectResult spvReflectEnumerateInterfaceVariables(const SpvReflectShaderModule* p_module, uint32_t* p_count,\n                                                       SpvReflectInterfaceVariable** pp_variables) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_count)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  if (IsNotNull(pp_variables)) {\n    if (*p_count != p_module->interface_variable_count) {\n      return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n    }\n\n    for (uint32_t index = 0; index < *p_count; ++index) {\n      SpvReflectInterfaceVariable* p_var = &p_module->interface_variables[index];\n      pp_variables[index] = p_var;\n    }\n  } else {\n    *p_count = p_module->interface_variable_count;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nSpvReflectResult spvReflectEnumerateEntryPointInterfaceVariables(const SpvReflectShaderModule* p_module, const char* entry_point,\n                                                                 uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_count)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point);\n  if (IsNull(p_entry)) {\n    return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n  }\n\n  if (IsNotNull(pp_variables)) {\n    if (*p_count != p_entry->interface_variable_count) {\n      return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n    }\n\n    for (uint32_t index = 0; index < *p_count; ++index) {\n      SpvReflectInterfaceVariable* p_var = &p_entry->interface_variables[index];\n      pp_variables[index] = p_var;\n    }\n  } else {\n    *p_count = p_entry->interface_variable_count;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nSpvReflectResult spvReflectEnumerateInputVariables(const SpvReflectShaderModule* p_module, uint32_t* p_count,\n                                                   SpvReflectInterfaceVariable** pp_variables) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_count)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  if (IsNotNull(pp_variables)) {\n    if (*p_count != p_module->input_variable_count) {\n      return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n    }\n\n    for (uint32_t index = 0; index < *p_count; ++index) {\n      SpvReflectInterfaceVariable* p_var = p_module->input_variables[index];\n      pp_variables[index] = p_var;\n    }\n  } else {\n    *p_count = p_module->input_variable_count;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nSpvReflectResult spvReflectEnumerateEntryPointInputVariables(const SpvReflectShaderModule* p_module, const char* entry_point,\n                                                             uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_count)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point);\n  if (IsNull(p_entry)) {\n    return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n  }\n\n  if (IsNotNull(pp_variables)) {\n    if (*p_count != p_entry->input_variable_count) {\n      return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n    }\n\n    for (uint32_t index = 0; index < *p_count; ++index) {\n      SpvReflectInterfaceVariable* p_var = p_entry->input_variables[index];\n      pp_variables[index] = p_var;\n    }\n  } else {\n    *p_count = p_entry->input_variable_count;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nSpvReflectResult spvReflectEnumerateOutputVariables(const SpvReflectShaderModule* p_module, uint32_t* p_count,\n                                                    SpvReflectInterfaceVariable** pp_variables) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_count)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  if (IsNotNull(pp_variables)) {\n    if (*p_count != p_module->output_variable_count) {\n      return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n    }\n\n    for (uint32_t index = 0; index < *p_count; ++index) {\n      SpvReflectInterfaceVariable* p_var = p_module->output_variables[index];\n      pp_variables[index] = p_var;\n    }\n  } else {\n    *p_count = p_module->output_variable_count;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nSpvReflectResult spvReflectEnumerateEntryPointOutputVariables(const SpvReflectShaderModule* p_module, const char* entry_point,\n                                                              uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_count)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point);\n  if (IsNull(p_entry)) {\n    return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n  }\n\n  if (IsNotNull(pp_variables)) {\n    if (*p_count != p_entry->output_variable_count) {\n      return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n    }\n\n    for (uint32_t index = 0; index < *p_count; ++index) {\n      SpvReflectInterfaceVariable* p_var = p_entry->output_variables[index];\n      pp_variables[index] = p_var;\n    }\n  } else {\n    *p_count = p_entry->output_variable_count;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nSpvReflectResult spvReflectEnumeratePushConstantBlocks(const SpvReflectShaderModule* p_module, uint32_t* p_count,\n                                                       SpvReflectBlockVariable** pp_blocks) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_count)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  if (pp_blocks != NULL) {\n    if (*p_count != p_module->push_constant_block_count) {\n      return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n    }\n\n    for (uint32_t index = 0; index < *p_count; ++index) {\n      SpvReflectBlockVariable* p_push_constant_blocks = (SpvReflectBlockVariable*)&p_module->push_constant_blocks[index];\n      pp_blocks[index] = p_push_constant_blocks;\n    }\n  } else {\n    *p_count = p_module->push_constant_block_count;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\nSpvReflectResult spvReflectEnumeratePushConstants(const SpvReflectShaderModule* p_module, uint32_t* p_count,\n                                                  SpvReflectBlockVariable** pp_blocks) {\n  return spvReflectEnumeratePushConstantBlocks(p_module, p_count, pp_blocks);\n}\n\nSpvReflectResult spvReflectEnumerateEntryPointPushConstantBlocks(const SpvReflectShaderModule* p_module, const char* entry_point,\n                                                                 uint32_t* p_count, SpvReflectBlockVariable** pp_blocks) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_count)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point);\n  if (IsNull(p_entry)) {\n    return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n  }\n\n  uint32_t count = 0;\n  for (uint32_t i = 0; i < p_module->push_constant_block_count; ++i) {\n    bool found = SearchSortedUint32(p_entry->used_push_constants, p_entry->used_push_constant_count,\n                                    p_module->push_constant_blocks[i].spirv_id);\n    if (found) {\n      if (IsNotNull(pp_blocks)) {\n        if (count >= *p_count) {\n          return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n        }\n        pp_blocks[count++] = (SpvReflectBlockVariable*)&p_module->push_constant_blocks[i];\n      } else {\n        ++count;\n      }\n    }\n  }\n  if (IsNotNull(pp_blocks)) {\n    if (count != *p_count) {\n      return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n    }\n  } else {\n    *p_count = count;\n  }\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nSpvReflectResult spvReflectEnumerateSpecializationConstants(const SpvReflectShaderModule* p_module, uint32_t* p_count,\n                                                            SpvReflectSpecializationConstant** pp_constants) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_count)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  if (IsNotNull(pp_constants)) {\n    if (*p_count != p_module->spec_constant_count) {\n      return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH;\n    }\n\n    for (uint32_t index = 0; index < *p_count; ++index) {\n      SpvReflectSpecializationConstant* p_constant = (SpvReflectSpecializationConstant*)&p_module->spec_constants[index];\n      pp_constants[index] = p_constant;\n    }\n  } else {\n    *p_count = p_module->spec_constant_count;\n  }\n\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nconst SpvReflectDescriptorBinding* spvReflectGetDescriptorBinding(const SpvReflectShaderModule* p_module, uint32_t binding_number,\n                                                                  uint32_t set_number, SpvReflectResult* p_result) {\n  const SpvReflectDescriptorBinding* p_descriptor = NULL;\n  if (IsNotNull(p_module)) {\n    for (uint32_t index = 0; index < p_module->descriptor_binding_count; ++index) {\n      const SpvReflectDescriptorBinding* p_potential = &p_module->descriptor_bindings[index];\n      if ((p_potential->binding == binding_number) && (p_potential->set == set_number)) {\n        p_descriptor = p_potential;\n        break;\n      }\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_descriptor)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_descriptor;\n}\n\nconst SpvReflectDescriptorBinding* spvReflectGetEntryPointDescriptorBinding(const SpvReflectShaderModule* p_module,\n                                                                            const char* entry_point, uint32_t binding_number,\n                                                                            uint32_t set_number, SpvReflectResult* p_result) {\n  const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point);\n  if (IsNull(p_entry)) {\n    if (IsNotNull(p_result)) {\n      *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n    }\n    return NULL;\n  }\n  const SpvReflectDescriptorBinding* p_descriptor = NULL;\n  if (IsNotNull(p_module)) {\n    for (uint32_t index = 0; index < p_module->descriptor_binding_count; ++index) {\n      const SpvReflectDescriptorBinding* p_potential = &p_module->descriptor_bindings[index];\n      bool found = SearchSortedUint32(p_entry->used_uniforms, p_entry->used_uniform_count, p_potential->spirv_id);\n      if ((p_potential->binding == binding_number) && (p_potential->set == set_number) && found) {\n        p_descriptor = p_potential;\n        break;\n      }\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_descriptor)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_descriptor;\n}\n\nconst SpvReflectDescriptorSet* spvReflectGetDescriptorSet(const SpvReflectShaderModule* p_module, uint32_t set_number,\n                                                          SpvReflectResult* p_result) {\n  const SpvReflectDescriptorSet* p_set = NULL;\n  if (IsNotNull(p_module)) {\n    for (uint32_t index = 0; index < p_module->descriptor_set_count; ++index) {\n      const SpvReflectDescriptorSet* p_potential = &p_module->descriptor_sets[index];\n      if (p_potential->set == set_number) {\n        p_set = p_potential;\n      }\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_set)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_set;\n}\n\nconst SpvReflectDescriptorSet* spvReflectGetEntryPointDescriptorSet(const SpvReflectShaderModule* p_module, const char* entry_point,\n                                                                    uint32_t set_number, SpvReflectResult* p_result) {\n  const SpvReflectDescriptorSet* p_set = NULL;\n  if (IsNotNull(p_module)) {\n    const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point);\n    if (IsNull(p_entry)) {\n      if (IsNotNull(p_result)) {\n        *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n      }\n      return NULL;\n    }\n    for (uint32_t index = 0; index < p_entry->descriptor_set_count; ++index) {\n      const SpvReflectDescriptorSet* p_potential = &p_entry->descriptor_sets[index];\n      if (p_potential->set == set_number) {\n        p_set = p_potential;\n      }\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_set)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_set;\n}\n\nconst SpvReflectInterfaceVariable* spvReflectGetInputVariableByLocation(const SpvReflectShaderModule* p_module, uint32_t location,\n                                                                        SpvReflectResult* p_result) {\n  if (location == INVALID_VALUE) {\n    if (IsNotNull(p_result)) {\n      *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n    }\n    return NULL;\n  }\n  const SpvReflectInterfaceVariable* p_var = NULL;\n  if (IsNotNull(p_module)) {\n    for (uint32_t index = 0; index < p_module->input_variable_count; ++index) {\n      const SpvReflectInterfaceVariable* p_potential = p_module->input_variables[index];\n      if (p_potential->location == location) {\n        p_var = p_potential;\n      }\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_var)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_var;\n}\nconst SpvReflectInterfaceVariable* spvReflectGetInputVariable(const SpvReflectShaderModule* p_module, uint32_t location,\n                                                              SpvReflectResult* p_result) {\n  return spvReflectGetInputVariableByLocation(p_module, location, p_result);\n}\n\nconst SpvReflectInterfaceVariable* spvReflectGetEntryPointInputVariableByLocation(const SpvReflectShaderModule* p_module,\n                                                                                  const char* entry_point, uint32_t location,\n                                                                                  SpvReflectResult* p_result) {\n  if (location == INVALID_VALUE) {\n    if (IsNotNull(p_result)) {\n      *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n    }\n    return NULL;\n  }\n\n  const SpvReflectInterfaceVariable* p_var = NULL;\n  if (IsNotNull(p_module)) {\n    const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point);\n    if (IsNull(p_entry)) {\n      if (IsNotNull(p_result)) {\n        *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n      }\n      return NULL;\n    }\n    for (uint32_t index = 0; index < p_entry->input_variable_count; ++index) {\n      const SpvReflectInterfaceVariable* p_potential = p_entry->input_variables[index];\n      if (p_potential->location == location) {\n        p_var = p_potential;\n      }\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_var)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_var;\n}\n\nconst SpvReflectInterfaceVariable* spvReflectGetInputVariableBySemantic(const SpvReflectShaderModule* p_module,\n                                                                        const char* semantic, SpvReflectResult* p_result) {\n  if (IsNull(semantic)) {\n    if (IsNotNull(p_result)) {\n      *p_result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n    }\n    return NULL;\n  }\n  if (semantic[0] == '\\0') {\n    if (IsNotNull(p_result)) {\n      *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n    }\n    return NULL;\n  }\n  const SpvReflectInterfaceVariable* p_var = NULL;\n  if (IsNotNull(p_module)) {\n    for (uint32_t index = 0; index < p_module->input_variable_count; ++index) {\n      const SpvReflectInterfaceVariable* p_potential = p_module->input_variables[index];\n      if (p_potential->semantic != NULL && strcmp(p_potential->semantic, semantic) == 0) {\n        p_var = p_potential;\n      }\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_var)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_var;\n}\n\nconst SpvReflectInterfaceVariable* spvReflectGetEntryPointInputVariableBySemantic(const SpvReflectShaderModule* p_module,\n                                                                                  const char* entry_point, const char* semantic,\n                                                                                  SpvReflectResult* p_result) {\n  if (IsNull(semantic)) {\n    if (IsNotNull(p_result)) {\n      *p_result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n    }\n    return NULL;\n  }\n  if (semantic[0] == '\\0') {\n    if (IsNotNull(p_result)) {\n      *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n    }\n    return NULL;\n  }\n  const SpvReflectInterfaceVariable* p_var = NULL;\n  if (IsNotNull(p_module)) {\n    const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point);\n    if (IsNull(p_entry)) {\n      if (IsNotNull(p_result)) {\n        *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n      }\n      return NULL;\n    }\n    for (uint32_t index = 0; index < p_entry->input_variable_count; ++index) {\n      const SpvReflectInterfaceVariable* p_potential = p_entry->input_variables[index];\n      if (p_potential->semantic != NULL && strcmp(p_potential->semantic, semantic) == 0) {\n        p_var = p_potential;\n      }\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_var)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_var;\n}\n\nconst SpvReflectInterfaceVariable* spvReflectGetOutputVariableByLocation(const SpvReflectShaderModule* p_module, uint32_t location,\n                                                                         SpvReflectResult* p_result) {\n  if (location == INVALID_VALUE) {\n    if (IsNotNull(p_result)) {\n      *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n    }\n    return NULL;\n  }\n  const SpvReflectInterfaceVariable* p_var = NULL;\n  if (IsNotNull(p_module)) {\n    for (uint32_t index = 0; index < p_module->output_variable_count; ++index) {\n      const SpvReflectInterfaceVariable* p_potential = p_module->output_variables[index];\n      if (p_potential->location == location) {\n        p_var = p_potential;\n      }\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_var)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_var;\n}\nconst SpvReflectInterfaceVariable* spvReflectGetOutputVariable(const SpvReflectShaderModule* p_module, uint32_t location,\n                                                               SpvReflectResult* p_result) {\n  return spvReflectGetOutputVariableByLocation(p_module, location, p_result);\n}\n\nconst SpvReflectInterfaceVariable* spvReflectGetEntryPointOutputVariableByLocation(const SpvReflectShaderModule* p_module,\n                                                                                   const char* entry_point, uint32_t location,\n                                                                                   SpvReflectResult* p_result) {\n  if (location == INVALID_VALUE) {\n    if (IsNotNull(p_result)) {\n      *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n    }\n    return NULL;\n  }\n\n  const SpvReflectInterfaceVariable* p_var = NULL;\n  if (IsNotNull(p_module)) {\n    const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point);\n    if (IsNull(p_entry)) {\n      if (IsNotNull(p_result)) {\n        *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n      }\n      return NULL;\n    }\n    for (uint32_t index = 0; index < p_entry->output_variable_count; ++index) {\n      const SpvReflectInterfaceVariable* p_potential = p_entry->output_variables[index];\n      if (p_potential->location == location) {\n        p_var = p_potential;\n      }\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_var)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_var;\n}\n\nconst SpvReflectInterfaceVariable* spvReflectGetOutputVariableBySemantic(const SpvReflectShaderModule* p_module,\n                                                                         const char* semantic, SpvReflectResult* p_result) {\n  if (IsNull(semantic)) {\n    if (IsNotNull(p_result)) {\n      *p_result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n    }\n    return NULL;\n  }\n  if (semantic[0] == '\\0') {\n    if (IsNotNull(p_result)) {\n      *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n    }\n    return NULL;\n  }\n  const SpvReflectInterfaceVariable* p_var = NULL;\n  if (IsNotNull(p_module)) {\n    for (uint32_t index = 0; index < p_module->output_variable_count; ++index) {\n      const SpvReflectInterfaceVariable* p_potential = p_module->output_variables[index];\n      if (p_potential->semantic != NULL && strcmp(p_potential->semantic, semantic) == 0) {\n        p_var = p_potential;\n      }\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_var)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_var;\n}\n\nconst SpvReflectInterfaceVariable* spvReflectGetEntryPointOutputVariableBySemantic(const SpvReflectShaderModule* p_module,\n                                                                                   const char* entry_point, const char* semantic,\n                                                                                   SpvReflectResult* p_result) {\n  if (IsNull(semantic)) {\n    if (IsNotNull(p_result)) {\n      *p_result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n    }\n    return NULL;\n  }\n  if (semantic[0] == '\\0') {\n    if (IsNotNull(p_result)) {\n      *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n    }\n    return NULL;\n  }\n  const SpvReflectInterfaceVariable* p_var = NULL;\n  if (IsNotNull(p_module)) {\n    const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point);\n    if (IsNull(p_entry)) {\n      if (IsNotNull(p_result)) {\n        *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n      }\n      return NULL;\n    }\n    for (uint32_t index = 0; index < p_entry->output_variable_count; ++index) {\n      const SpvReflectInterfaceVariable* p_potential = p_entry->output_variables[index];\n      if (p_potential->semantic != NULL && strcmp(p_potential->semantic, semantic) == 0) {\n        p_var = p_potential;\n      }\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_var)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_var;\n}\n\nconst SpvReflectBlockVariable* spvReflectGetPushConstantBlock(const SpvReflectShaderModule* p_module, uint32_t index,\n                                                              SpvReflectResult* p_result) {\n  const SpvReflectBlockVariable* p_push_constant = NULL;\n  if (IsNotNull(p_module)) {\n    if (index < p_module->push_constant_block_count) {\n      p_push_constant = &p_module->push_constant_blocks[index];\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_push_constant)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_push_constant;\n}\nconst SpvReflectBlockVariable* spvReflectGetPushConstant(const SpvReflectShaderModule* p_module, uint32_t index,\n                                                         SpvReflectResult* p_result) {\n  return spvReflectGetPushConstantBlock(p_module, index, p_result);\n}\n\nconst SpvReflectBlockVariable* spvReflectGetEntryPointPushConstantBlock(const SpvReflectShaderModule* p_module,\n                                                                        const char* entry_point, SpvReflectResult* p_result) {\n  const SpvReflectBlockVariable* p_push_constant = NULL;\n  if (IsNotNull(p_module)) {\n    const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point);\n    if (IsNull(p_entry)) {\n      if (IsNotNull(p_result)) {\n        *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n      }\n      return NULL;\n    }\n    for (uint32_t i = 0; i < p_module->push_constant_block_count; ++i) {\n      bool found = SearchSortedUint32(p_entry->used_push_constants, p_entry->used_push_constant_count,\n                                      p_module->push_constant_blocks[i].spirv_id);\n      if (found) {\n        p_push_constant = &p_module->push_constant_blocks[i];\n        break;\n      }\n    }\n  }\n  if (IsNotNull(p_result)) {\n    *p_result = IsNotNull(p_push_constant)\n                    ? SPV_REFLECT_RESULT_SUCCESS\n                    : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND);\n  }\n  return p_push_constant;\n}\n\nSpvReflectResult spvReflectChangeDescriptorBindingNumbers(SpvReflectShaderModule* p_module,\n                                                          const SpvReflectDescriptorBinding* p_binding, uint32_t new_binding_number,\n                                                          uint32_t new_set_binding) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_binding)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n\n  SpvReflectDescriptorBinding* p_target_descriptor = NULL;\n  for (uint32_t index = 0; index < p_module->descriptor_binding_count; ++index) {\n    if (&p_module->descriptor_bindings[index] == p_binding) {\n      p_target_descriptor = &p_module->descriptor_bindings[index];\n      break;\n    }\n  }\n\n  if (IsNotNull(p_target_descriptor)) {\n    if (p_target_descriptor->word_offset.binding > (p_module->_internal->spirv_word_count - 1)) {\n      return SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED;\n    }\n    // Binding number\n    if (new_binding_number != (uint32_t)SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE) {\n      uint32_t* p_code = p_module->_internal->spirv_code + p_target_descriptor->word_offset.binding;\n      *p_code = new_binding_number;\n      p_target_descriptor->binding = new_binding_number;\n    }\n    // Set number\n    if (new_set_binding != (uint32_t)SPV_REFLECT_SET_NUMBER_DONT_CHANGE) {\n      uint32_t* p_code = p_module->_internal->spirv_code + p_target_descriptor->word_offset.set;\n      *p_code = new_set_binding;\n      p_target_descriptor->set = new_set_binding;\n    }\n  }\n\n  SpvReflectResult result = SPV_REFLECT_RESULT_SUCCESS;\n  if (new_set_binding != (uint32_t)SPV_REFLECT_SET_NUMBER_DONT_CHANGE) {\n    result = SynchronizeDescriptorSets(p_module);\n  }\n  return result;\n}\nSpvReflectResult spvReflectChangeDescriptorBindingNumber(SpvReflectShaderModule* p_module,\n                                                         const SpvReflectDescriptorBinding* p_descriptor_binding,\n                                                         uint32_t new_binding_number, uint32_t optional_new_set_number) {\n  return spvReflectChangeDescriptorBindingNumbers(p_module, p_descriptor_binding, new_binding_number, optional_new_set_number);\n}\n\nSpvReflectResult spvReflectChangeDescriptorSetNumber(SpvReflectShaderModule* p_module, const SpvReflectDescriptorSet* p_set,\n                                                     uint32_t new_set_number) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_set)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  SpvReflectDescriptorSet* p_target_set = NULL;\n  for (uint32_t index = 0; index < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++index) {\n    // The descriptor sets for specific entry points might not be in this set,\n    // so just match on set index.\n    if (p_module->descriptor_sets[index].set == p_set->set) {\n      p_target_set = (SpvReflectDescriptorSet*)p_set;\n      break;\n    }\n  }\n\n  SpvReflectResult result = SPV_REFLECT_RESULT_SUCCESS;\n  if (IsNotNull(p_target_set) && new_set_number != (uint32_t)SPV_REFLECT_SET_NUMBER_DONT_CHANGE) {\n    for (uint32_t index = 0; index < p_target_set->binding_count; ++index) {\n      SpvReflectDescriptorBinding* p_descriptor = p_target_set->bindings[index];\n      if (p_descriptor->word_offset.set > (p_module->_internal->spirv_word_count - 1)) {\n        return SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED;\n      }\n\n      uint32_t* p_code = p_module->_internal->spirv_code + p_descriptor->word_offset.set;\n      *p_code = new_set_number;\n      p_descriptor->set = new_set_number;\n    }\n\n    result = SynchronizeDescriptorSets(p_module);\n  }\n\n  return result;\n}\n\nstatic SpvReflectResult ChangeVariableLocation(SpvReflectShaderModule* p_module, SpvReflectInterfaceVariable* p_variable,\n                                               uint32_t new_location) {\n  if (p_variable->word_offset.location > (p_module->_internal->spirv_word_count - 1)) {\n    return SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED;\n  }\n  uint32_t* p_code = p_module->_internal->spirv_code + p_variable->word_offset.location;\n  *p_code = new_location;\n  p_variable->location = new_location;\n  return SPV_REFLECT_RESULT_SUCCESS;\n}\n\nSpvReflectResult spvReflectChangeInputVariableLocation(SpvReflectShaderModule* p_module,\n                                                       const SpvReflectInterfaceVariable* p_input_variable, uint32_t new_location) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_input_variable)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  for (uint32_t index = 0; index < p_module->input_variable_count; ++index) {\n    if (p_module->input_variables[index] == p_input_variable) {\n      return ChangeVariableLocation(p_module, p_module->input_variables[index], new_location);\n    }\n  }\n  return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n}\n\nSpvReflectResult spvReflectChangeOutputVariableLocation(SpvReflectShaderModule* p_module,\n                                                        const SpvReflectInterfaceVariable* p_output_variable,\n                                                        uint32_t new_location) {\n  if (IsNull(p_module)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  if (IsNull(p_output_variable)) {\n    return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;\n  }\n  for (uint32_t index = 0; index < p_module->output_variable_count; ++index) {\n    if (p_module->output_variables[index] == p_output_variable) {\n      return ChangeVariableLocation(p_module, p_module->output_variables[index], new_location);\n    }\n  }\n  return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND;\n}\n\nconst char* spvReflectSourceLanguage(SpvSourceLanguage source_lang) {\n  switch (source_lang) {\n    case SpvSourceLanguageESSL:\n      return \"ESSL\";\n    case SpvSourceLanguageGLSL:\n      return \"GLSL\";\n    case SpvSourceLanguageOpenCL_C:\n      return \"OpenCL_C\";\n    case SpvSourceLanguageOpenCL_CPP:\n      return \"OpenCL_CPP\";\n    case SpvSourceLanguageHLSL:\n      return \"HLSL\";\n    case SpvSourceLanguageCPP_for_OpenCL:\n      return \"CPP_for_OpenCL\";\n    case SpvSourceLanguageSYCL:\n      return \"SYCL\";\n    case SpvSourceLanguageHERO_C:\n      return \"Hero C\";\n    case SpvSourceLanguageNZSL:\n      return \"NZSL\";\n    default:\n      break;\n  }\n  // The source language is SpvSourceLanguageUnknown, SpvSourceLanguageMax, or\n  // some other value that does not correspond to a knonwn language.\n  return \"Unknown\";\n}\n\nconst char* spvReflectBlockVariableTypeName(const SpvReflectBlockVariable* p_var) {\n  if (p_var == NULL) {\n    return NULL;\n  }\n  return p_var->type_description->type_name;\n}\n"
  },
  {
    "path": "deps/SPIRV-reflect/spirv_reflect.h",
    "content": "/*\n Copyright 2017-2022 Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*/\n\n/*\n\nVERSION HISTORY\n\n  1.0   (2018-03-27) Initial public release\n\n*/\n\n// clang-format off\n/*!\n\n @file spirv_reflect.h\n\n*/\n#ifndef SPIRV_REFLECT_H\n#define SPIRV_REFLECT_H\n\n#if defined(SPIRV_REFLECT_USE_SYSTEM_SPIRV_H)\n#include <spirv/unified1/spirv.h>\n#else\n#include \"./include/spirv/unified1/spirv.h\"\n#endif\n\n\n#include <stdint.h>\n#include <string.h>\n\n#ifdef _MSC_VER\n  #define SPV_REFLECT_DEPRECATED(msg_str) __declspec(deprecated(\"This symbol is deprecated. Details: \" msg_str))\n#elif defined(__clang__)\n  #define SPV_REFLECT_DEPRECATED(msg_str) __attribute__((deprecated(msg_str)))\n#elif defined(__GNUC__)\n  #if GCC_VERSION >= 40500\n    #define SPV_REFLECT_DEPRECATED(msg_str) __attribute__((deprecated(msg_str)))\n  #else\n    #define SPV_REFLECT_DEPRECATED(msg_str) __attribute__((deprecated))\n  #endif\n#else\n  #define SPV_REFLECT_DEPRECATED(msg_str)\n#endif\n\n/*! @enum SpvReflectResult\n\n*/\ntypedef enum SpvReflectResult {\n  SPV_REFLECT_RESULT_SUCCESS,\n  SPV_REFLECT_RESULT_NOT_READY,\n  SPV_REFLECT_RESULT_ERROR_PARSE_FAILED,\n  SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED,\n  SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED,\n  SPV_REFLECT_RESULT_ERROR_NULL_POINTER,\n  SPV_REFLECT_RESULT_ERROR_INTERNAL_ERROR,\n  SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH,\n  SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND,\n  SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_CODE_SIZE,\n  SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_MAGIC_NUMBER,\n  SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_EOF,\n  SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE,\n  SPV_REFLECT_RESULT_ERROR_SPIRV_SET_NUMBER_OVERFLOW,\n  SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_STORAGE_CLASS,\n  SPV_REFLECT_RESULT_ERROR_SPIRV_RECURSION,\n  SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_INSTRUCTION,\n  SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_BLOCK_DATA,\n  SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_BLOCK_MEMBER_REFERENCE,\n  SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ENTRY_POINT,\n  SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_EXECUTION_MODE,\n  SPV_REFLECT_RESULT_ERROR_SPIRV_MAX_RECURSIVE_EXCEEDED,\n} SpvReflectResult;\n\n/*! @enum SpvReflectModuleFlagBits\n\nSPV_REFLECT_MODULE_FLAG_NO_COPY - Disables copying of SPIR-V code\n  when a SPIRV-Reflect shader module is created. It is the\n  responsibility of the calling program to ensure that the pointer\n  remains valid and the memory it's pointing to is not freed while\n  SPIRV-Reflect operations are taking place. Freeing the backing\n  memory will cause undefined behavior or most likely a crash.\n  This is flag is intended for cases where the memory overhead of\n  storing the copied SPIR-V is undesirable.\n\n*/\ntypedef enum SpvReflectModuleFlagBits {\n  SPV_REFLECT_MODULE_FLAG_NONE    = 0x00000000,\n  SPV_REFLECT_MODULE_FLAG_NO_COPY = 0x00000001,\n} SpvReflectModuleFlagBits;\n\ntypedef uint32_t SpvReflectModuleFlags;\n\n/*! @enum SpvReflectTypeFlagBits\n\n*/\ntypedef enum SpvReflectTypeFlagBits {\n  SPV_REFLECT_TYPE_FLAG_UNDEFINED                       = 0x00000000,\n  SPV_REFLECT_TYPE_FLAG_VOID                            = 0x00000001,\n  SPV_REFLECT_TYPE_FLAG_BOOL                            = 0x00000002,\n  SPV_REFLECT_TYPE_FLAG_INT                             = 0x00000004,\n  SPV_REFLECT_TYPE_FLAG_FLOAT                           = 0x00000008,\n  SPV_REFLECT_TYPE_FLAG_VECTOR                          = 0x00000100,\n  SPV_REFLECT_TYPE_FLAG_MATRIX                          = 0x00000200,\n  SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE                  = 0x00010000,\n  SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLER                = 0x00020000,\n  SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLED_IMAGE          = 0x00040000,\n  SPV_REFLECT_TYPE_FLAG_EXTERNAL_BLOCK                  = 0x00080000,\n  SPV_REFLECT_TYPE_FLAG_EXTERNAL_ACCELERATION_STRUCTURE = 0x00100000,\n  SPV_REFLECT_TYPE_FLAG_EXTERNAL_MASK                   = 0x00FF0000,\n  SPV_REFLECT_TYPE_FLAG_STRUCT                          = 0x10000000,\n  SPV_REFLECT_TYPE_FLAG_ARRAY                           = 0x20000000,\n  SPV_REFLECT_TYPE_FLAG_REF                             = 0x40000000,\n} SpvReflectTypeFlagBits;\n\ntypedef uint32_t SpvReflectTypeFlags;\n\n/*! @enum SpvReflectDecorationBits\n\nNOTE: HLSL row_major and column_major decorations are reversed\n      in SPIR-V. Meaning that matrices declrations with row_major\n      will get reflected as column_major and vice versa. The\n      row and column decorations get appied during the compilation.\n      SPIRV-Reflect reads the data as is and does not make any\n      attempt to correct it to match what's in the source.\n\n      The Patch, PerVertex, and PerTask are used for Interface\n      variables that can have array\n\n*/\ntypedef enum SpvReflectDecorationFlagBits {\n  SPV_REFLECT_DECORATION_NONE                   = 0x00000000,\n  SPV_REFLECT_DECORATION_BLOCK                  = 0x00000001,\n  SPV_REFLECT_DECORATION_BUFFER_BLOCK           = 0x00000002,\n  SPV_REFLECT_DECORATION_ROW_MAJOR              = 0x00000004,\n  SPV_REFLECT_DECORATION_COLUMN_MAJOR           = 0x00000008,\n  SPV_REFLECT_DECORATION_BUILT_IN               = 0x00000010,\n  SPV_REFLECT_DECORATION_NOPERSPECTIVE          = 0x00000020,\n  SPV_REFLECT_DECORATION_FLAT                   = 0x00000040,\n  SPV_REFLECT_DECORATION_NON_WRITABLE           = 0x00000080,\n  SPV_REFLECT_DECORATION_RELAXED_PRECISION      = 0x00000100,\n  SPV_REFLECT_DECORATION_NON_READABLE           = 0x00000200,\n  SPV_REFLECT_DECORATION_PATCH                  = 0x00000400,\n  SPV_REFLECT_DECORATION_PER_VERTEX             = 0x00000800,\n  SPV_REFLECT_DECORATION_PER_TASK               = 0x00001000,\n  SPV_REFLECT_DECORATION_WEIGHT_TEXTURE         = 0x00002000,\n  SPV_REFLECT_DECORATION_BLOCK_MATCH_TEXTURE    = 0x00004000,\n} SpvReflectDecorationFlagBits;\n\ntypedef uint32_t SpvReflectDecorationFlags;\n\n// Based of SPV_GOOGLE_user_type\ntypedef enum SpvReflectUserType {\n  SPV_REFLECT_USER_TYPE_INVALID = 0,\n  SPV_REFLECT_USER_TYPE_CBUFFER,\n  SPV_REFLECT_USER_TYPE_TBUFFER,\n  SPV_REFLECT_USER_TYPE_APPEND_STRUCTURED_BUFFER,\n  SPV_REFLECT_USER_TYPE_BUFFER,\n  SPV_REFLECT_USER_TYPE_BYTE_ADDRESS_BUFFER,\n  SPV_REFLECT_USER_TYPE_CONSTANT_BUFFER,\n  SPV_REFLECT_USER_TYPE_CONSUME_STRUCTURED_BUFFER,\n  SPV_REFLECT_USER_TYPE_INPUT_PATCH,\n  SPV_REFLECT_USER_TYPE_OUTPUT_PATCH,\n  SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_BUFFER,\n  SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_BYTE_ADDRESS_BUFFER,\n  SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_STRUCTURED_BUFFER,\n  SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_1D,\n  SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_1D_ARRAY,\n  SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_2D,\n  SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_2D_ARRAY,\n  SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_3D,\n  SPV_REFLECT_USER_TYPE_RAYTRACING_ACCELERATION_STRUCTURE,\n  SPV_REFLECT_USER_TYPE_RW_BUFFER,\n  SPV_REFLECT_USER_TYPE_RW_BYTE_ADDRESS_BUFFER,\n  SPV_REFLECT_USER_TYPE_RW_STRUCTURED_BUFFER,\n  SPV_REFLECT_USER_TYPE_RW_TEXTURE_1D,\n  SPV_REFLECT_USER_TYPE_RW_TEXTURE_1D_ARRAY,\n  SPV_REFLECT_USER_TYPE_RW_TEXTURE_2D,\n  SPV_REFLECT_USER_TYPE_RW_TEXTURE_2D_ARRAY,\n  SPV_REFLECT_USER_TYPE_RW_TEXTURE_3D,\n  SPV_REFLECT_USER_TYPE_STRUCTURED_BUFFER,\n  SPV_REFLECT_USER_TYPE_SUBPASS_INPUT,\n  SPV_REFLECT_USER_TYPE_SUBPASS_INPUT_MS,\n  SPV_REFLECT_USER_TYPE_TEXTURE_1D,\n  SPV_REFLECT_USER_TYPE_TEXTURE_1D_ARRAY,\n  SPV_REFLECT_USER_TYPE_TEXTURE_2D,\n  SPV_REFLECT_USER_TYPE_TEXTURE_2D_ARRAY,\n  SPV_REFLECT_USER_TYPE_TEXTURE_2DMS,\n  SPV_REFLECT_USER_TYPE_TEXTURE_2DMS_ARRAY,\n  SPV_REFLECT_USER_TYPE_TEXTURE_3D,\n  SPV_REFLECT_USER_TYPE_TEXTURE_BUFFER,\n  SPV_REFLECT_USER_TYPE_TEXTURE_CUBE,\n  SPV_REFLECT_USER_TYPE_TEXTURE_CUBE_ARRAY,\n} SpvReflectUserType;\n\n/*! @enum SpvReflectResourceType\n\n*/\ntypedef enum SpvReflectResourceType {\n  SPV_REFLECT_RESOURCE_FLAG_UNDEFINED           = 0x00000000,\n  SPV_REFLECT_RESOURCE_FLAG_SAMPLER             = 0x00000001,\n  SPV_REFLECT_RESOURCE_FLAG_CBV                 = 0x00000002,\n  SPV_REFLECT_RESOURCE_FLAG_SRV                 = 0x00000004,\n  SPV_REFLECT_RESOURCE_FLAG_UAV                 = 0x00000008,\n} SpvReflectResourceType;\n\n/*! @enum SpvReflectFormat\n\n*/\ntypedef enum SpvReflectFormat {\n  SPV_REFLECT_FORMAT_UNDEFINED           =   0, // = VK_FORMAT_UNDEFINED\n  SPV_REFLECT_FORMAT_R16_UINT            =  74, // = VK_FORMAT_R16_UINT\n  SPV_REFLECT_FORMAT_R16_SINT            =  75, // = VK_FORMAT_R16_SINT\n  SPV_REFLECT_FORMAT_R16_SFLOAT          =  76, // = VK_FORMAT_R16_SFLOAT\n  SPV_REFLECT_FORMAT_R16G16_UINT         =  81, // = VK_FORMAT_R16G16_UINT\n  SPV_REFLECT_FORMAT_R16G16_SINT         =  82, // = VK_FORMAT_R16G16_SINT\n  SPV_REFLECT_FORMAT_R16G16_SFLOAT       =  83, // = VK_FORMAT_R16G16_SFLOAT\n  SPV_REFLECT_FORMAT_R16G16B16_UINT      =  88, // = VK_FORMAT_R16G16B16_UINT\n  SPV_REFLECT_FORMAT_R16G16B16_SINT      =  89, // = VK_FORMAT_R16G16B16_SINT\n  SPV_REFLECT_FORMAT_R16G16B16_SFLOAT    =  90, // = VK_FORMAT_R16G16B16_SFLOAT\n  SPV_REFLECT_FORMAT_R16G16B16A16_UINT   =  95, // = VK_FORMAT_R16G16B16A16_UINT\n  SPV_REFLECT_FORMAT_R16G16B16A16_SINT   =  96, // = VK_FORMAT_R16G16B16A16_SINT\n  SPV_REFLECT_FORMAT_R16G16B16A16_SFLOAT =  97, // = VK_FORMAT_R16G16B16A16_SFLOAT\n  SPV_REFLECT_FORMAT_R32_UINT            =  98, // = VK_FORMAT_R32_UINT\n  SPV_REFLECT_FORMAT_R32_SINT            =  99, // = VK_FORMAT_R32_SINT\n  SPV_REFLECT_FORMAT_R32_SFLOAT          = 100, // = VK_FORMAT_R32_SFLOAT\n  SPV_REFLECT_FORMAT_R32G32_UINT         = 101, // = VK_FORMAT_R32G32_UINT\n  SPV_REFLECT_FORMAT_R32G32_SINT         = 102, // = VK_FORMAT_R32G32_SINT\n  SPV_REFLECT_FORMAT_R32G32_SFLOAT       = 103, // = VK_FORMAT_R32G32_SFLOAT\n  SPV_REFLECT_FORMAT_R32G32B32_UINT      = 104, // = VK_FORMAT_R32G32B32_UINT\n  SPV_REFLECT_FORMAT_R32G32B32_SINT      = 105, // = VK_FORMAT_R32G32B32_SINT\n  SPV_REFLECT_FORMAT_R32G32B32_SFLOAT    = 106, // = VK_FORMAT_R32G32B32_SFLOAT\n  SPV_REFLECT_FORMAT_R32G32B32A32_UINT   = 107, // = VK_FORMAT_R32G32B32A32_UINT\n  SPV_REFLECT_FORMAT_R32G32B32A32_SINT   = 108, // = VK_FORMAT_R32G32B32A32_SINT\n  SPV_REFLECT_FORMAT_R32G32B32A32_SFLOAT = 109, // = VK_FORMAT_R32G32B32A32_SFLOAT\n  SPV_REFLECT_FORMAT_R64_UINT            = 110, // = VK_FORMAT_R64_UINT\n  SPV_REFLECT_FORMAT_R64_SINT            = 111, // = VK_FORMAT_R64_SINT\n  SPV_REFLECT_FORMAT_R64_SFLOAT          = 112, // = VK_FORMAT_R64_SFLOAT\n  SPV_REFLECT_FORMAT_R64G64_UINT         = 113, // = VK_FORMAT_R64G64_UINT\n  SPV_REFLECT_FORMAT_R64G64_SINT         = 114, // = VK_FORMAT_R64G64_SINT\n  SPV_REFLECT_FORMAT_R64G64_SFLOAT       = 115, // = VK_FORMAT_R64G64_SFLOAT\n  SPV_REFLECT_FORMAT_R64G64B64_UINT      = 116, // = VK_FORMAT_R64G64B64_UINT\n  SPV_REFLECT_FORMAT_R64G64B64_SINT      = 117, // = VK_FORMAT_R64G64B64_SINT\n  SPV_REFLECT_FORMAT_R64G64B64_SFLOAT    = 118, // = VK_FORMAT_R64G64B64_SFLOAT\n  SPV_REFLECT_FORMAT_R64G64B64A64_UINT   = 119, // = VK_FORMAT_R64G64B64A64_UINT\n  SPV_REFLECT_FORMAT_R64G64B64A64_SINT   = 120, // = VK_FORMAT_R64G64B64A64_SINT\n  SPV_REFLECT_FORMAT_R64G64B64A64_SFLOAT = 121, // = VK_FORMAT_R64G64B64A64_SFLOAT\n} SpvReflectFormat;\n\n/*! @enum SpvReflectVariableFlagBits\n\n*/\nenum SpvReflectVariableFlagBits{\n  SPV_REFLECT_VARIABLE_FLAGS_NONE   = 0x00000000,\n  SPV_REFLECT_VARIABLE_FLAGS_UNUSED = 0x00000001,\n  // If variable points to a copy of the PhysicalStorageBuffer struct\n  SPV_REFLECT_VARIABLE_FLAGS_PHYSICAL_POINTER_COPY = 0x00000002,\n};\n\ntypedef uint32_t SpvReflectVariableFlags;\n\n/*! @enum SpvReflectDescriptorType\n\n*/\ntypedef enum SpvReflectDescriptorType {\n  SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER                    =  0,        // = VK_DESCRIPTOR_TYPE_SAMPLER\n  SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER     =  1,        // = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER\n  SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE              =  2,        // = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE\n  SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE              =  3,        // = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE\n  SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER       =  4,        // = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER\n  SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER       =  5,        // = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER\n  SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER             =  6,        // = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER\n  SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER             =  7,        // = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER\n  SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC     =  8,        // = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC\n  SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC     =  9,        // = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC\n  SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT           = 10,        // = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT\n  SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000 // = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR\n} SpvReflectDescriptorType;\n\n/*! @enum SpvReflectShaderStageFlagBits\n\n*/\ntypedef enum SpvReflectShaderStageFlagBits {\n  SPV_REFLECT_SHADER_STAGE_VERTEX_BIT                  = 0x00000001, // = VK_SHADER_STAGE_VERTEX_BIT\n  SPV_REFLECT_SHADER_STAGE_TESSELLATION_CONTROL_BIT    = 0x00000002, // = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT\n  SPV_REFLECT_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004, // = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT\n  SPV_REFLECT_SHADER_STAGE_GEOMETRY_BIT                = 0x00000008, // = VK_SHADER_STAGE_GEOMETRY_BIT\n  SPV_REFLECT_SHADER_STAGE_FRAGMENT_BIT                = 0x00000010, // = VK_SHADER_STAGE_FRAGMENT_BIT\n  SPV_REFLECT_SHADER_STAGE_COMPUTE_BIT                 = 0x00000020, // = VK_SHADER_STAGE_COMPUTE_BIT\n  SPV_REFLECT_SHADER_STAGE_TASK_BIT_NV                 = 0x00000040, // = VK_SHADER_STAGE_TASK_BIT_NV\n  SPV_REFLECT_SHADER_STAGE_TASK_BIT_EXT                = SPV_REFLECT_SHADER_STAGE_TASK_BIT_NV, // = VK_SHADER_STAGE_CALLABLE_BIT_EXT\n  SPV_REFLECT_SHADER_STAGE_MESH_BIT_NV                 = 0x00000080, // = VK_SHADER_STAGE_MESH_BIT_NV\n  SPV_REFLECT_SHADER_STAGE_MESH_BIT_EXT                = SPV_REFLECT_SHADER_STAGE_MESH_BIT_NV, // = VK_SHADER_STAGE_CALLABLE_BIT_EXT\n  SPV_REFLECT_SHADER_STAGE_RAYGEN_BIT_KHR              = 0x00000100, // = VK_SHADER_STAGE_RAYGEN_BIT_KHR\n  SPV_REFLECT_SHADER_STAGE_ANY_HIT_BIT_KHR             = 0x00000200, // = VK_SHADER_STAGE_ANY_HIT_BIT_KHR\n  SPV_REFLECT_SHADER_STAGE_CLOSEST_HIT_BIT_KHR         = 0x00000400, // = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR\n  SPV_REFLECT_SHADER_STAGE_MISS_BIT_KHR                = 0x00000800, // = VK_SHADER_STAGE_MISS_BIT_KHR\n  SPV_REFLECT_SHADER_STAGE_INTERSECTION_BIT_KHR        = 0x00001000, // = VK_SHADER_STAGE_INTERSECTION_BIT_KHR\n  SPV_REFLECT_SHADER_STAGE_CALLABLE_BIT_KHR            = 0x00002000, // = VK_SHADER_STAGE_CALLABLE_BIT_KHR\n\n} SpvReflectShaderStageFlagBits;\n\n/*! @enum SpvReflectGenerator\n\n*/\ntypedef enum SpvReflectGenerator {\n  SPV_REFLECT_GENERATOR_KHRONOS_LLVM_SPIRV_TRANSLATOR         = 6,\n  SPV_REFLECT_GENERATOR_KHRONOS_SPIRV_TOOLS_ASSEMBLER         = 7,\n  SPV_REFLECT_GENERATOR_KHRONOS_GLSLANG_REFERENCE_FRONT_END   = 8,\n  SPV_REFLECT_GENERATOR_GOOGLE_SHADERC_OVER_GLSLANG           = 13,\n  SPV_REFLECT_GENERATOR_GOOGLE_SPIREGG                        = 14,\n  SPV_REFLECT_GENERATOR_GOOGLE_RSPIRV                         = 15,\n  SPV_REFLECT_GENERATOR_X_LEGEND_MESA_MESAIR_SPIRV_TRANSLATOR = 16,\n  SPV_REFLECT_GENERATOR_KHRONOS_SPIRV_TOOLS_LINKER            = 17,\n  SPV_REFLECT_GENERATOR_WINE_VKD3D_SHADER_COMPILER            = 18,\n  SPV_REFLECT_GENERATOR_CLAY_CLAY_SHADER_COMPILER             = 19,\n} SpvReflectGenerator;\n\nenum {\n  SPV_REFLECT_MAX_ARRAY_DIMS                    = 32,\n  SPV_REFLECT_MAX_DESCRIPTOR_SETS               = 64,\n};\n\nenum {\n  SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE        = ~0,\n  SPV_REFLECT_SET_NUMBER_DONT_CHANGE            = ~0\n};\n\ntypedef struct SpvReflectNumericTraits {\n  struct Scalar {\n    uint32_t                        width;\n    uint32_t                        signedness;\n  } scalar;\n\n  struct Vector {\n    uint32_t                        component_count;\n  } vector;\n\n  struct Matrix {\n    uint32_t                        column_count;\n    uint32_t                        row_count;\n    uint32_t                        stride; // Measured in bytes\n  } matrix;\n} SpvReflectNumericTraits;\n\ntypedef struct SpvReflectImageTraits {\n  SpvDim                            dim;\n  uint32_t                          depth;\n  uint32_t                          arrayed;\n  uint32_t                          ms; // 0: single-sampled; 1: multisampled\n  uint32_t                          sampled;\n  SpvImageFormat                    image_format;\n} SpvReflectImageTraits;\n\ntypedef enum SpvReflectArrayDimType {\n  SPV_REFLECT_ARRAY_DIM_RUNTIME       = 0,         // OpTypeRuntimeArray\n} SpvReflectArrayDimType;\n\ntypedef struct SpvReflectArrayTraits {\n  uint32_t                          dims_count;\n  // Each entry is either:\n  // - specialization constant dimension\n  // - OpTypeRuntimeArray\n  // - the array length otherwise\n  uint32_t                          dims[SPV_REFLECT_MAX_ARRAY_DIMS];\n  // Stores Ids for dimensions that are specialization constants\n  uint32_t                          spec_constant_op_ids[SPV_REFLECT_MAX_ARRAY_DIMS];\n  uint32_t                          stride; // Measured in bytes\n} SpvReflectArrayTraits;\n\ntypedef struct SpvReflectBindingArrayTraits {\n  uint32_t                          dims_count;\n  uint32_t                          dims[SPV_REFLECT_MAX_ARRAY_DIMS];\n} SpvReflectBindingArrayTraits;\n\n/*! @struct SpvReflectTypeDescription\n    @brief Information about an OpType* instruction\n*/\ntypedef struct SpvReflectTypeDescription {\n  uint32_t                          id;\n  SpvOp                             op;\n  const char*                       type_name;\n  // Non-NULL if type is member of a struct\n  const char*                       struct_member_name;\n  SpvStorageClass                   storage_class;\n  SpvReflectTypeFlags               type_flags;\n  SpvReflectDecorationFlags         decoration_flags;\n\n  struct Traits {\n    SpvReflectNumericTraits         numeric;\n    SpvReflectImageTraits           image;\n    SpvReflectArrayTraits           array;\n  } traits;\n\n  // If underlying type is a struct (ex. array of structs)\n  // this gives access to the OpTypeStruct\n  struct SpvReflectTypeDescription* struct_type_description;\n\n  // Some pointers to SpvReflectTypeDescription are really\n  // just copies of another reference to the same OpType\n  uint32_t                          copied;\n\n  // @deprecated use struct_type_description instead\n  uint32_t                          member_count;\n  // @deprecated use struct_type_description instead\n  struct SpvReflectTypeDescription* members;\n} SpvReflectTypeDescription;\n\n\n/*! @struct SpvReflectInterfaceVariable\n    @brief The OpVariable that is either an Input or Output to the module\n*/\ntypedef struct SpvReflectInterfaceVariable {\n  uint32_t                            spirv_id;\n  const char*                         name;\n  uint32_t                            location;\n  uint32_t                            component;\n  SpvStorageClass                     storage_class;\n  const char*                         semantic;\n  SpvReflectDecorationFlags           decoration_flags;\n  SpvBuiltIn                          built_in;\n  SpvReflectNumericTraits             numeric;\n  SpvReflectArrayTraits               array;\n\n  uint32_t                            member_count;\n  struct SpvReflectInterfaceVariable* members;\n\n  SpvReflectFormat                    format;\n\n  // NOTE: SPIR-V shares type references for variables\n  //       that have the same underlying type. This means\n  //       that the same type name will appear for multiple\n  //       variables.\n  SpvReflectTypeDescription*          type_description;\n\n  struct {\n    uint32_t                          location;\n  } word_offset;\n} SpvReflectInterfaceVariable;\n\n/*! @struct SpvReflectBlockVariable\n\n*/\ntypedef struct SpvReflectBlockVariable {\n  uint32_t                          spirv_id;\n  const char*                       name;\n  // For Push Constants, this is the lowest offset of all memebers\n  uint32_t                          offset;           // Measured in bytes\n  uint32_t                          absolute_offset;  // Measured in bytes\n  uint32_t                          size;             // Measured in bytes\n  uint32_t                          padded_size;      // Measured in bytes\n  SpvReflectDecorationFlags         decoration_flags;\n  SpvReflectNumericTraits           numeric;\n  SpvReflectArrayTraits             array;\n  SpvReflectVariableFlags           flags;\n\n  uint32_t                          member_count;\n  struct SpvReflectBlockVariable*   members;\n\n  SpvReflectTypeDescription*        type_description;\n\n  struct {\n    uint32_t                          offset;\n  } word_offset;\n\n} SpvReflectBlockVariable;\n\n/*! @struct SpvReflectDescriptorBinding\n\n*/\ntypedef struct SpvReflectDescriptorBinding {\n  uint32_t                            spirv_id;\n  const char*                         name;\n  uint32_t                            binding;\n  uint32_t                            input_attachment_index;\n  uint32_t                            set;\n  SpvReflectDescriptorType            descriptor_type;\n  SpvReflectResourceType              resource_type;\n  SpvReflectImageTraits               image;\n  SpvReflectBlockVariable             block;\n  SpvReflectBindingArrayTraits        array;\n  uint32_t                            count;\n  uint32_t                            accessed;\n  uint32_t                            uav_counter_id;\n  struct SpvReflectDescriptorBinding* uav_counter_binding;\n  uint32_t                            byte_address_buffer_offset_count;\n  uint32_t*                           byte_address_buffer_offsets;\n\n  SpvReflectTypeDescription*          type_description;\n\n  struct {\n    uint32_t                          binding;\n    uint32_t                          set;\n  } word_offset;\n\n  SpvReflectDecorationFlags           decoration_flags;\n  // Requires SPV_GOOGLE_user_type\n  SpvReflectUserType                  user_type;\n} SpvReflectDescriptorBinding;\n\n/*! @struct SpvReflectDescriptorSet\n\n*/\ntypedef struct SpvReflectDescriptorSet {\n  uint32_t                          set;\n  uint32_t                          binding_count;\n  SpvReflectDescriptorBinding**     bindings;\n} SpvReflectDescriptorSet;\n\ntypedef enum SpvReflectExecutionModeValue {\n  SPV_REFLECT_EXECUTION_MODE_SPEC_CONSTANT = 0xFFFFFFFF // specialization constant\n} SpvReflectExecutionModeValue;\n\n/*! @struct SpvReflectEntryPoint\n\n */\ntypedef struct SpvReflectEntryPoint {\n  const char*                       name;\n  uint32_t                          id;\n\n  SpvExecutionModel                 spirv_execution_model;\n  SpvReflectShaderStageFlagBits     shader_stage;\n\n  uint32_t                          input_variable_count;\n  SpvReflectInterfaceVariable**     input_variables;\n  uint32_t                          output_variable_count;\n  SpvReflectInterfaceVariable**     output_variables;\n  uint32_t                          interface_variable_count;\n  SpvReflectInterfaceVariable*      interface_variables;\n\n  uint32_t                          descriptor_set_count;\n  SpvReflectDescriptorSet*          descriptor_sets;\n\n  uint32_t                          used_uniform_count;\n  uint32_t*                         used_uniforms;\n  uint32_t                          used_push_constant_count;\n  uint32_t*                         used_push_constants;\n\n  uint32_t                          execution_mode_count;\n  SpvExecutionMode*                 execution_modes;\n\n  struct LocalSize {\n    uint32_t                        x;\n    uint32_t                        y;\n    uint32_t                        z;\n  } local_size;\n  uint32_t                          invocations; // valid for geometry\n  uint32_t                          output_vertices; // valid for geometry, tesselation\n} SpvReflectEntryPoint;\n\n/*! @struct SpvReflectCapability\n\n*/\ntypedef struct SpvReflectCapability {\n  SpvCapability                     value;\n  uint32_t                          word_offset;\n} SpvReflectCapability;\n\n\n/*! @struct SpvReflectSpecId\n\n*/\ntypedef struct SpvReflectSpecializationConstant {\n  uint32_t spirv_id;\n  uint32_t constant_id;\n  const char* name;\n} SpvReflectSpecializationConstant;\n\n/*! @struct SpvReflectShaderModule\n\n*/\ntypedef struct SpvReflectShaderModule {\n  SpvReflectGenerator               generator;\n  const char*                       entry_point_name;\n  uint32_t                          entry_point_id;\n  uint32_t                          entry_point_count;\n  SpvReflectEntryPoint*             entry_points;\n  SpvSourceLanguage                 source_language;\n  uint32_t                          source_language_version;\n  const char*                       source_file;\n  const char*                       source_source;\n  uint32_t                          capability_count;\n  SpvReflectCapability*             capabilities;\n  SpvExecutionModel                 spirv_execution_model;                            // Uses value(s) from first entry point\n  SpvReflectShaderStageFlagBits     shader_stage;                                     // Uses value(s) from first entry point\n  uint32_t                          descriptor_binding_count;                         // Uses value(s) from first entry point\n  SpvReflectDescriptorBinding*      descriptor_bindings;                              // Uses value(s) from first entry point\n  uint32_t                          descriptor_set_count;                             // Uses value(s) from first entry point\n  SpvReflectDescriptorSet           descriptor_sets[SPV_REFLECT_MAX_DESCRIPTOR_SETS]; // Uses value(s) from first entry point\n  uint32_t                          input_variable_count;                             // Uses value(s) from first entry point\n  SpvReflectInterfaceVariable**     input_variables;                                  // Uses value(s) from first entry point\n  uint32_t                          output_variable_count;                            // Uses value(s) from first entry point\n  SpvReflectInterfaceVariable**     output_variables;                                 // Uses value(s) from first entry point\n  uint32_t                          interface_variable_count;                         // Uses value(s) from first entry point\n  SpvReflectInterfaceVariable*      interface_variables;                              // Uses value(s) from first entry point\n  uint32_t                          push_constant_block_count;                        // Uses value(s) from first entry point\n  SpvReflectBlockVariable*          push_constant_blocks;                             // Uses value(s) from first entry point\n  uint32_t                          spec_constant_count;                              // Uses value(s) from first entry point\n  SpvReflectSpecializationConstant* spec_constants;                                   // Uses value(s) from first entry point\n\n  struct Internal {\n    SpvReflectModuleFlags           module_flags;\n    size_t                          spirv_size;\n    uint32_t*                       spirv_code;\n    uint32_t                        spirv_word_count;\n\n    size_t                          type_description_count;\n    SpvReflectTypeDescription*      type_descriptions;\n  } * _internal;\n\n} SpvReflectShaderModule;\n\n#if defined(__cplusplus)\nextern \"C\" {\n#endif\n\n/*! @fn spvReflectCreateShaderModule\n\n @param  size      Size in bytes of SPIR-V code.\n @param  p_code    Pointer to SPIR-V code.\n @param  p_module  Pointer to an instance of SpvReflectShaderModule.\n @return           SPV_REFLECT_RESULT_SUCCESS on success.\n\n*/\nSpvReflectResult spvReflectCreateShaderModule(\n  size_t                   size,\n  const void*              p_code,\n  SpvReflectShaderModule*  p_module\n);\n\n/*! @fn spvReflectCreateShaderModule2\n\n @param  flags     Flags for module creations.\n @param  size      Size in bytes of SPIR-V code.\n @param  p_code    Pointer to SPIR-V code.\n @param  p_module  Pointer to an instance of SpvReflectShaderModule.\n @return           SPV_REFLECT_RESULT_SUCCESS on success.\n\n*/\nSpvReflectResult spvReflectCreateShaderModule2(\n  SpvReflectModuleFlags    flags,\n  size_t                   size,\n  const void*              p_code,\n  SpvReflectShaderModule*  p_module\n);\n\nSPV_REFLECT_DEPRECATED(\"renamed to spvReflectCreateShaderModule\")\nSpvReflectResult spvReflectGetShaderModule(\n  size_t                   size,\n  const void*              p_code,\n  SpvReflectShaderModule*  p_module\n);\n\n\n/*! @fn spvReflectDestroyShaderModule\n\n @param  p_module  Pointer to an instance of SpvReflectShaderModule.\n\n*/\nvoid spvReflectDestroyShaderModule(SpvReflectShaderModule* p_module);\n\n\n/*! @fn spvReflectGetCodeSize\n\n @param  p_module  Pointer to an instance of SpvReflectShaderModule.\n @return           Returns the size of the SPIR-V in bytes\n\n*/\nuint32_t spvReflectGetCodeSize(const SpvReflectShaderModule* p_module);\n\n\n/*! @fn spvReflectGetCode\n\n @param  p_module  Pointer to an instance of SpvReflectShaderModule.\n @return           Returns a const pointer to the compiled SPIR-V bytecode.\n\n*/\nconst uint32_t* spvReflectGetCode(const SpvReflectShaderModule* p_module);\n\n/*! @fn spvReflectGetEntryPoint\n\n @param  p_module     Pointer to an instance of SpvReflectShaderModule.\n @param  entry_point  Name of the requested entry point.\n @return              Returns a const pointer to the requested entry point,\n                      or NULL if it's not found.\n*/\nconst SpvReflectEntryPoint* spvReflectGetEntryPoint(\n  const SpvReflectShaderModule* p_module,\n  const char*                   entry_point\n);\n\n/*! @fn spvReflectEnumerateDescriptorBindings\n\n @param  p_module     Pointer to an instance of SpvReflectShaderModule.\n @param  p_count      If pp_bindings is NULL, the module's descriptor binding\n                      count (across all descriptor sets) will be stored here.\n                      If pp_bindings is not NULL, *p_count must contain the\n                      module's descriptor binding count.\n @param  pp_bindings  If NULL, the module's total descriptor binding count\n                      will be written to *p_count.\n                      If non-NULL, pp_bindings must point to an array with\n                      *p_count entries, where pointers to the module's\n                      descriptor bindings will be written. The caller must not\n                      free the binding pointers written to this array.\n @return              If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                      Otherwise, the error code indicates the cause of the\n                      failure.\n\n*/\nSpvReflectResult spvReflectEnumerateDescriptorBindings(\n  const SpvReflectShaderModule*  p_module,\n  uint32_t*                      p_count,\n  SpvReflectDescriptorBinding**  pp_bindings\n);\n\n/*! @fn spvReflectEnumerateEntryPointDescriptorBindings\n @brief  Creates a listing of all descriptor bindings that are used in the\n         static call tree of the given entry point.\n @param  p_module     Pointer to an instance of SpvReflectShaderModule.\n @param  entry_point  The name of the entry point to get the descriptor bindings for.\n @param  p_count      If pp_bindings is NULL, the entry point's descriptor binding\n                      count (across all descriptor sets) will be stored here.\n                      If pp_bindings is not NULL, *p_count must contain the\n                      entry points's descriptor binding count.\n @param  pp_bindings  If NULL, the entry point's total descriptor binding count\n                      will be written to *p_count.\n                      If non-NULL, pp_bindings must point to an array with\n                      *p_count entries, where pointers to the entry point's\n                      descriptor bindings will be written. The caller must not\n                      free the binding pointers written to this array.\n @return              If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                      Otherwise, the error code indicates the cause of the\n                      failure.\n\n*/\nSpvReflectResult spvReflectEnumerateEntryPointDescriptorBindings(\n  const SpvReflectShaderModule* p_module,\n  const char*                   entry_point,\n  uint32_t*                     p_count,\n  SpvReflectDescriptorBinding** pp_bindings\n);\n\n/*! @fn spvReflectEnumerateDescriptorSets\n\n @param  p_module  Pointer to an instance of SpvReflectShaderModule.\n @param  p_count   If pp_sets is NULL, the module's descriptor set\n                   count will be stored here.\n                   If pp_sets is not NULL, *p_count must contain the\n                   module's descriptor set count.\n @param  pp_sets   If NULL, the module's total descriptor set count\n                   will be written to *p_count.\n                   If non-NULL, pp_sets must point to an array with\n                   *p_count entries, where pointers to the module's\n                   descriptor sets will be written. The caller must not\n                   free the descriptor set pointers written to this array.\n @return           If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                   Otherwise, the error code indicates the cause of the\n                   failure.\n\n*/\nSpvReflectResult spvReflectEnumerateDescriptorSets(\n  const SpvReflectShaderModule* p_module,\n  uint32_t*                     p_count,\n  SpvReflectDescriptorSet**     pp_sets\n);\n\n/*! @fn spvReflectEnumerateEntryPointDescriptorSets\n @brief  Creates a listing of all descriptor sets and their bindings that are\n         used in the static call tree of a given entry point.\n @param  p_module    Pointer to an instance of SpvReflectShaderModule.\n @param  entry_point The name of the entry point to get the descriptor bindings for.\n @param  p_count     If pp_sets is NULL, the module's descriptor set\n                     count will be stored here.\n                     If pp_sets is not NULL, *p_count must contain the\n                     module's descriptor set count.\n @param  pp_sets     If NULL, the module's total descriptor set count\n                     will be written to *p_count.\n                     If non-NULL, pp_sets must point to an array with\n                     *p_count entries, where pointers to the module's\n                     descriptor sets will be written. The caller must not\n                     free the descriptor set pointers written to this array.\n @return             If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                     Otherwise, the error code indicates the cause of the\n                     failure.\n\n*/\nSpvReflectResult spvReflectEnumerateEntryPointDescriptorSets(\n  const SpvReflectShaderModule* p_module,\n  const char*                   entry_point,\n  uint32_t*                     p_count,\n  SpvReflectDescriptorSet**     pp_sets\n);\n\n\n/*! @fn spvReflectEnumerateInterfaceVariables\n @brief  If the module contains multiple entry points, this will only get\n         the interface variables for the first one.\n @param  p_module      Pointer to an instance of SpvReflectShaderModule.\n @param  p_count       If pp_variables is NULL, the module's interface variable\n                       count will be stored here.\n                       If pp_variables is not NULL, *p_count must contain\n                       the module's interface variable count.\n @param  pp_variables  If NULL, the module's interface variable count will be\n                       written to *p_count.\n                       If non-NULL, pp_variables must point to an array with\n                       *p_count entries, where pointers to the module's\n                       interface variables will be written. The caller must not\n                       free the interface variables written to this array.\n @return               If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                       Otherwise, the error code indicates the cause of the\n                       failure.\n\n*/\nSpvReflectResult spvReflectEnumerateInterfaceVariables(\n  const SpvReflectShaderModule* p_module,\n  uint32_t*                     p_count,\n  SpvReflectInterfaceVariable** pp_variables\n);\n\n/*! @fn spvReflectEnumerateEntryPointInterfaceVariables\n @brief  Enumerate the interface variables for a given entry point.\n @param  entry_point The name of the entry point to get the interface variables for.\n @param  p_module      Pointer to an instance of SpvReflectShaderModule.\n @param  p_count       If pp_variables is NULL, the entry point's interface variable\n                       count will be stored here.\n                       If pp_variables is not NULL, *p_count must contain\n                       the entry point's interface variable count.\n @param  pp_variables  If NULL, the entry point's interface variable count will be\n                       written to *p_count.\n                       If non-NULL, pp_variables must point to an array with\n                       *p_count entries, where pointers to the entry point's\n                       interface variables will be written. The caller must not\n                       free the interface variables written to this array.\n @return               If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                       Otherwise, the error code indicates the cause of the\n                       failure.\n\n*/\nSpvReflectResult spvReflectEnumerateEntryPointInterfaceVariables(\n  const SpvReflectShaderModule* p_module,\n  const char*                   entry_point,\n  uint32_t*                     p_count,\n  SpvReflectInterfaceVariable** pp_variables\n);\n\n\n/*! @fn spvReflectEnumerateInputVariables\n @brief  If the module contains multiple entry points, this will only get\n         the input variables for the first one.\n @param  p_module      Pointer to an instance of SpvReflectShaderModule.\n @param  p_count       If pp_variables is NULL, the module's input variable\n                       count will be stored here.\n                       If pp_variables is not NULL, *p_count must contain\n                       the module's input variable count.\n @param  pp_variables  If NULL, the module's input variable count will be\n                       written to *p_count.\n                       If non-NULL, pp_variables must point to an array with\n                       *p_count entries, where pointers to the module's\n                       input variables will be written. The caller must not\n                       free the interface variables written to this array.\n @return               If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                       Otherwise, the error code indicates the cause of the\n                       failure.\n\n*/\nSpvReflectResult spvReflectEnumerateInputVariables(\n  const SpvReflectShaderModule* p_module,\n  uint32_t*                     p_count,\n  SpvReflectInterfaceVariable** pp_variables\n);\n\n/*! @fn spvReflectEnumerateEntryPointInputVariables\n @brief  Enumerate the input variables for a given entry point.\n @param  entry_point The name of the entry point to get the input variables for.\n @param  p_module      Pointer to an instance of SpvReflectShaderModule.\n @param  p_count       If pp_variables is NULL, the entry point's input variable\n                       count will be stored here.\n                       If pp_variables is not NULL, *p_count must contain\n                       the entry point's input variable count.\n @param  pp_variables  If NULL, the entry point's input variable count will be\n                       written to *p_count.\n                       If non-NULL, pp_variables must point to an array with\n                       *p_count entries, where pointers to the entry point's\n                       input variables will be written. The caller must not\n                       free the interface variables written to this array.\n @return               If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                       Otherwise, the error code indicates the cause of the\n                       failure.\n\n*/\nSpvReflectResult spvReflectEnumerateEntryPointInputVariables(\n  const SpvReflectShaderModule* p_module,\n  const char*                   entry_point,\n  uint32_t*                     p_count,\n  SpvReflectInterfaceVariable** pp_variables\n);\n\n\n/*! @fn spvReflectEnumerateOutputVariables\n @brief  Note: If the module contains multiple entry points, this will only get\n         the output variables for the first one.\n @param  p_module      Pointer to an instance of SpvReflectShaderModule.\n @param  p_count       If pp_variables is NULL, the module's output variable\n                       count will be stored here.\n                       If pp_variables is not NULL, *p_count must contain\n                       the module's output variable count.\n @param  pp_variables  If NULL, the module's output variable count will be\n                       written to *p_count.\n                       If non-NULL, pp_variables must point to an array with\n                       *p_count entries, where pointers to the module's\n                       output variables will be written. The caller must not\n                       free the interface variables written to this array.\n @return               If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                       Otherwise, the error code indicates the cause of the\n                       failure.\n\n*/\nSpvReflectResult spvReflectEnumerateOutputVariables(\n  const SpvReflectShaderModule* p_module,\n  uint32_t*                     p_count,\n  SpvReflectInterfaceVariable** pp_variables\n);\n\n/*! @fn spvReflectEnumerateEntryPointOutputVariables\n @brief  Enumerate the output variables for a given entry point.\n @param  p_module      Pointer to an instance of SpvReflectShaderModule.\n @param  entry_point   The name of the entry point to get the output variables for.\n @param  p_count       If pp_variables is NULL, the entry point's output variable\n                       count will be stored here.\n                       If pp_variables is not NULL, *p_count must contain\n                       the entry point's output variable count.\n @param  pp_variables  If NULL, the entry point's output variable count will be\n                       written to *p_count.\n                       If non-NULL, pp_variables must point to an array with\n                       *p_count entries, where pointers to the entry point's\n                       output variables will be written. The caller must not\n                       free the interface variables written to this array.\n @return               If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                       Otherwise, the error code indicates the cause of the\n                       failure.\n\n*/\nSpvReflectResult spvReflectEnumerateEntryPointOutputVariables(\n  const SpvReflectShaderModule* p_module,\n  const char*                   entry_point,\n  uint32_t*                     p_count,\n  SpvReflectInterfaceVariable** pp_variables\n);\n\n\n/*! @fn spvReflectEnumeratePushConstantBlocks\n @brief  Note: If the module contains multiple entry points, this will only get\n         the push constant blocks for the first one.\n @param  p_module   Pointer to an instance of SpvReflectShaderModule.\n @param  p_count    If pp_blocks is NULL, the module's push constant\n                    block count will be stored here.\n                    If pp_blocks is not NULL, *p_count must\n                    contain the module's push constant block count.\n @param  pp_blocks  If NULL, the module's push constant block count\n                    will be written to *p_count.\n                    If non-NULL, pp_blocks must point to an\n                    array with *p_count entries, where pointers to\n                    the module's push constant blocks will be written.\n                    The caller must not free the block variables written\n                    to this array.\n @return            If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                    Otherwise, the error code indicates the cause of the\n                    failure.\n\n*/\nSpvReflectResult spvReflectEnumeratePushConstantBlocks(\n  const SpvReflectShaderModule* p_module,\n  uint32_t*                     p_count,\n  SpvReflectBlockVariable**     pp_blocks\n);\nSPV_REFLECT_DEPRECATED(\"renamed to spvReflectEnumeratePushConstantBlocks\")\nSpvReflectResult spvReflectEnumeratePushConstants(\n  const SpvReflectShaderModule* p_module,\n  uint32_t*                     p_count,\n  SpvReflectBlockVariable**     pp_blocks\n);\n\n/*! @fn spvReflectEnumerateEntryPointPushConstantBlocks\n @brief  Enumerate the push constant blocks used in the static call tree of a\n         given entry point.\n @param  p_module   Pointer to an instance of SpvReflectShaderModule.\n @param  p_count    If pp_blocks is NULL, the entry point's push constant\n                    block count will be stored here.\n                    If pp_blocks is not NULL, *p_count must\n                    contain the entry point's push constant block count.\n @param  pp_blocks  If NULL, the entry point's push constant block count\n                    will be written to *p_count.\n                    If non-NULL, pp_blocks must point to an\n                    array with *p_count entries, where pointers to\n                    the entry point's push constant blocks will be written.\n                    The caller must not free the block variables written\n                    to this array.\n @return            If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                    Otherwise, the error code indicates the cause of the\n                    failure.\n\n*/\nSpvReflectResult spvReflectEnumerateEntryPointPushConstantBlocks(\n  const SpvReflectShaderModule* p_module,\n  const char*                   entry_point,\n  uint32_t*                     p_count,\n  SpvReflectBlockVariable**     pp_blocks\n);\n\n\n/*! @fn spvReflectEnumerateSpecializationConstants\n @param  p_module      Pointer to an instance of SpvReflectShaderModule.\n @param  p_count       If pp_blocks is NULL, the module's specialization constant\n                       count will be stored here. If pp_blocks is not NULL, *p_count\n                       must contain the module's specialization constant count.\n @param  pp_constants  If NULL, the module's specialization constant count\n                       will be written to *p_count. If non-NULL, pp_blocks must\n                       point to an array with *p_count entries, where pointers to\n                       the module's specialization constant blocks will be written.\n                       The caller must not free the  variables written to this array.\n @return               If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                       Otherwise, the error code indicates the cause of the failure.\n*/\nSpvReflectResult spvReflectEnumerateSpecializationConstants(\n  const SpvReflectShaderModule*      p_module,\n  uint32_t*                          p_count,\n  SpvReflectSpecializationConstant** pp_constants\n);\n\n/*! @fn spvReflectGetDescriptorBinding\n\n @param  p_module        Pointer to an instance of SpvReflectShaderModule.\n @param  binding_number  The \"binding\" value of the requested descriptor\n                         binding.\n @param  set_number      The \"set\" value of the requested descriptor binding.\n @param  p_result        If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                         written to *p_result. Otherwise, a error code\n                         indicating the cause of the failure will be stored\n                         here.\n @return                 If the module contains a descriptor binding that\n                         matches the provided [binding_number, set_number]\n                         values, a pointer to that binding is returned. The\n                         caller must not free this pointer.\n                         If no match can be found, or if an unrelated error\n                         occurs, the return value will be NULL. Detailed\n                         error results are written to *pResult.\n@note                    If the module contains multiple desriptor bindings\n                         with the same set and binding numbers, there are\n                         no guarantees about which binding will be returned.\n\n*/\nconst SpvReflectDescriptorBinding* spvReflectGetDescriptorBinding(\n  const SpvReflectShaderModule* p_module,\n  uint32_t                      binding_number,\n  uint32_t                      set_number,\n  SpvReflectResult*             p_result\n);\n\n/*! @fn spvReflectGetEntryPointDescriptorBinding\n @brief  Get the descriptor binding with the given binding number and set\n         number that is used in the static call tree of a certain entry\n         point.\n @param  p_module        Pointer to an instance of SpvReflectShaderModule.\n @param  entry_point     The entry point to get the binding from.\n @param  binding_number  The \"binding\" value of the requested descriptor\n                         binding.\n @param  set_number      The \"set\" value of the requested descriptor binding.\n @param  p_result        If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                         written to *p_result. Otherwise, a error code\n                         indicating the cause of the failure will be stored\n                         here.\n @return                 If the entry point contains a descriptor binding that\n                         matches the provided [binding_number, set_number]\n                         values, a pointer to that binding is returned. The\n                         caller must not free this pointer.\n                         If no match can be found, or if an unrelated error\n                         occurs, the return value will be NULL. Detailed\n                         error results are written to *pResult.\n@note                    If the entry point contains multiple desriptor bindings\n                         with the same set and binding numbers, there are\n                         no guarantees about which binding will be returned.\n\n*/\nconst SpvReflectDescriptorBinding* spvReflectGetEntryPointDescriptorBinding(\n  const SpvReflectShaderModule* p_module,\n  const char*                   entry_point,\n  uint32_t                      binding_number,\n  uint32_t                      set_number,\n  SpvReflectResult*             p_result\n);\n\n\n/*! @fn spvReflectGetDescriptorSet\n\n @param  p_module    Pointer to an instance of SpvReflectShaderModule.\n @param  set_number  The \"set\" value of the requested descriptor set.\n @param  p_result    If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                     written to *p_result. Otherwise, a error code\n                     indicating the cause of the failure will be stored\n                     here.\n @return             If the module contains a descriptor set with the\n                     provided set_number, a pointer to that set is\n                     returned. The caller must not free this pointer.\n                     If no match can be found, or if an unrelated error\n                     occurs, the return value will be NULL. Detailed\n                     error results are written to *pResult.\n\n*/\nconst SpvReflectDescriptorSet* spvReflectGetDescriptorSet(\n  const SpvReflectShaderModule* p_module,\n  uint32_t                      set_number,\n  SpvReflectResult*             p_result\n);\n\n/*! @fn spvReflectGetEntryPointDescriptorSet\n\n @param  p_module    Pointer to an instance of SpvReflectShaderModule.\n @param  entry_point The entry point to get the descriptor set from.\n @param  set_number  The \"set\" value of the requested descriptor set.\n @param  p_result    If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                     written to *p_result. Otherwise, a error code\n                     indicating the cause of the failure will be stored\n                     here.\n @return             If the entry point contains a descriptor set with the\n                     provided set_number, a pointer to that set is\n                     returned. The caller must not free this pointer.\n                     If no match can be found, or if an unrelated error\n                     occurs, the return value will be NULL. Detailed\n                     error results are written to *pResult.\n\n*/\nconst SpvReflectDescriptorSet* spvReflectGetEntryPointDescriptorSet(\n  const SpvReflectShaderModule* p_module,\n  const char*                   entry_point,\n  uint32_t                      set_number,\n  SpvReflectResult*             p_result\n);\n\n\n/* @fn spvReflectGetInputVariableByLocation\n\n @param  p_module  Pointer to an instance of SpvReflectShaderModule.\n @param  location  The \"location\" value of the requested input variable.\n                   A location of 0xFFFFFFFF will always return NULL\n                   with *p_result == ELEMENT_NOT_FOUND.\n @param  p_result  If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                   written to *p_result. Otherwise, a error code\n                   indicating the cause of the failure will be stored\n                   here.\n @return           If the module contains an input interface variable\n                   with the provided location value, a pointer to that\n                   variable is returned. The caller must not free this\n                   pointer.\n                   If no match can be found, or if an unrelated error\n                   occurs, the return value will be NULL. Detailed\n                   error results are written to *pResult.\n@note\n\n*/\nconst SpvReflectInterfaceVariable* spvReflectGetInputVariableByLocation(\n  const SpvReflectShaderModule* p_module,\n  uint32_t                      location,\n  SpvReflectResult*             p_result\n);\nSPV_REFLECT_DEPRECATED(\"renamed to spvReflectGetInputVariableByLocation\")\nconst SpvReflectInterfaceVariable* spvReflectGetInputVariable(\n  const SpvReflectShaderModule* p_module,\n  uint32_t                      location,\n  SpvReflectResult*             p_result\n);\n\n/* @fn spvReflectGetEntryPointInputVariableByLocation\n\n @param  p_module    Pointer to an instance of SpvReflectShaderModule.\n @param  entry_point The entry point to get the input variable from.\n @param  location    The \"location\" value of the requested input variable.\n                     A location of 0xFFFFFFFF will always return NULL\n                     with *p_result == ELEMENT_NOT_FOUND.\n @param  p_result    If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                     written to *p_result. Otherwise, a error code\n                     indicating the cause of the failure will be stored\n                     here.\n @return             If the entry point contains an input interface variable\n                     with the provided location value, a pointer to that\n                     variable is returned. The caller must not free this\n                     pointer.\n                     If no match can be found, or if an unrelated error\n                     occurs, the return value will be NULL. Detailed\n                     error results are written to *pResult.\n@note\n\n*/\nconst SpvReflectInterfaceVariable* spvReflectGetEntryPointInputVariableByLocation(\n  const SpvReflectShaderModule* p_module,\n  const char*                   entry_point,\n  uint32_t                      location,\n  SpvReflectResult*             p_result\n);\n\n/* @fn spvReflectGetInputVariableBySemantic\n\n @param  p_module  Pointer to an instance of SpvReflectShaderModule.\n @param  semantic  The \"semantic\" value of the requested input variable.\n                   A semantic of NULL will return NULL.\n                   A semantic of \"\" will always return NULL with\n                   *p_result == ELEMENT_NOT_FOUND.\n @param  p_result  If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                   written to *p_result. Otherwise, a error code\n                   indicating the cause of the failure will be stored\n                   here.\n @return           If the module contains an input interface variable\n                   with the provided semantic, a pointer to that\n                   variable is returned. The caller must not free this\n                   pointer.\n                   If no match can be found, or if an unrelated error\n                   occurs, the return value will be NULL. Detailed\n                   error results are written to *pResult.\n@note\n\n*/\nconst SpvReflectInterfaceVariable* spvReflectGetInputVariableBySemantic(\n  const SpvReflectShaderModule* p_module,\n  const char*                   semantic,\n  SpvReflectResult*             p_result\n);\n\n/* @fn spvReflectGetEntryPointInputVariableBySemantic\n\n @param  p_module  Pointer to an instance of SpvReflectShaderModule.\n @param  entry_point The entry point to get the input variable from.\n @param  semantic  The \"semantic\" value of the requested input variable.\n                   A semantic of NULL will return NULL.\n                   A semantic of \"\" will always return NULL with\n                   *p_result == ELEMENT_NOT_FOUND.\n @param  p_result  If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                   written to *p_result. Otherwise, a error code\n                   indicating the cause of the failure will be stored\n                   here.\n @return           If the entry point contains an input interface variable\n                   with the provided semantic, a pointer to that\n                   variable is returned. The caller must not free this\n                   pointer.\n                   If no match can be found, or if an unrelated error\n                   occurs, the return value will be NULL. Detailed\n                   error results are written to *pResult.\n@note\n\n*/\nconst SpvReflectInterfaceVariable* spvReflectGetEntryPointInputVariableBySemantic(\n  const SpvReflectShaderModule* p_module,\n  const char*                   entry_point,\n  const char*                   semantic,\n  SpvReflectResult*             p_result\n);\n\n/* @fn spvReflectGetOutputVariableByLocation\n\n @param  p_module  Pointer to an instance of SpvReflectShaderModule.\n @param  location  The \"location\" value of the requested output variable.\n                   A location of 0xFFFFFFFF will always return NULL\n                   with *p_result == ELEMENT_NOT_FOUND.\n @param  p_result  If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                   written to *p_result. Otherwise, a error code\n                   indicating the cause of the failure will be stored\n                   here.\n @return           If the module contains an output interface variable\n                   with the provided location value, a pointer to that\n                   variable is returned. The caller must not free this\n                   pointer.\n                   If no match can be found, or if an unrelated error\n                   occurs, the return value will be NULL. Detailed\n                   error results are written to *pResult.\n@note\n\n*/\nconst SpvReflectInterfaceVariable* spvReflectGetOutputVariableByLocation(\n  const SpvReflectShaderModule*  p_module,\n  uint32_t                       location,\n  SpvReflectResult*              p_result\n);\nSPV_REFLECT_DEPRECATED(\"renamed to spvReflectGetOutputVariableByLocation\")\nconst SpvReflectInterfaceVariable* spvReflectGetOutputVariable(\n  const SpvReflectShaderModule*  p_module,\n  uint32_t                       location,\n  SpvReflectResult*              p_result\n);\n\n/* @fn spvReflectGetEntryPointOutputVariableByLocation\n\n @param  p_module     Pointer to an instance of SpvReflectShaderModule.\n @param  entry_point  The entry point to get the output variable from.\n @param  location     The \"location\" value of the requested output variable.\n                      A location of 0xFFFFFFFF will always return NULL\n                      with *p_result == ELEMENT_NOT_FOUND.\n @param  p_result     If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                      written to *p_result. Otherwise, a error code\n                      indicating the cause of the failure will be stored\n                      here.\n @return              If the entry point contains an output interface variable\n                      with the provided location value, a pointer to that\n                      variable is returned. The caller must not free this\n                      pointer.\n                      If no match can be found, or if an unrelated error\n                      occurs, the return value will be NULL. Detailed\n                      error results are written to *pResult.\n@note\n\n*/\nconst SpvReflectInterfaceVariable* spvReflectGetEntryPointOutputVariableByLocation(\n  const SpvReflectShaderModule*  p_module,\n  const char*                    entry_point,\n  uint32_t                       location,\n  SpvReflectResult*              p_result\n);\n\n/* @fn spvReflectGetOutputVariableBySemantic\n\n @param  p_module  Pointer to an instance of SpvReflectShaderModule.\n @param  semantic  The \"semantic\" value of the requested output variable.\n                   A semantic of NULL will return NULL.\n                   A semantic of \"\" will always return NULL with\n                   *p_result == ELEMENT_NOT_FOUND.\n @param  p_result  If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                   written to *p_result. Otherwise, a error code\n                   indicating the cause of the failure will be stored\n                   here.\n @return           If the module contains an output interface variable\n                   with the provided semantic, a pointer to that\n                   variable is returned. The caller must not free this\n                   pointer.\n                   If no match can be found, or if an unrelated error\n                   occurs, the return value will be NULL. Detailed\n                   error results are written to *pResult.\n@note\n\n*/\nconst SpvReflectInterfaceVariable* spvReflectGetOutputVariableBySemantic(\n  const SpvReflectShaderModule*  p_module,\n  const char*                    semantic,\n  SpvReflectResult*              p_result\n);\n\n/* @fn spvReflectGetEntryPointOutputVariableBySemantic\n\n @param  p_module  Pointer to an instance of SpvReflectShaderModule.\n @param  entry_point  The entry point to get the output variable from.\n @param  semantic  The \"semantic\" value of the requested output variable.\n                   A semantic of NULL will return NULL.\n                   A semantic of \"\" will always return NULL with\n                   *p_result == ELEMENT_NOT_FOUND.\n @param  p_result  If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                   written to *p_result. Otherwise, a error code\n                   indicating the cause of the failure will be stored\n                   here.\n @return           If the entry point contains an output interface variable\n                   with the provided semantic, a pointer to that\n                   variable is returned. The caller must not free this\n                   pointer.\n                   If no match can be found, or if an unrelated error\n                   occurs, the return value will be NULL. Detailed\n                   error results are written to *pResult.\n@note\n\n*/\nconst SpvReflectInterfaceVariable* spvReflectGetEntryPointOutputVariableBySemantic(\n  const SpvReflectShaderModule*  p_module,\n  const char*                    entry_point,\n  const char*                    semantic,\n  SpvReflectResult*              p_result\n);\n\n/*! @fn spvReflectGetPushConstantBlock\n\n @param  p_module  Pointer to an instance of SpvReflectShaderModule.\n @param  index     The index of the desired block within the module's\n                   array of push constant blocks.\n @param  p_result  If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                   written to *p_result. Otherwise, a error code\n                   indicating the cause of the failure will be stored\n                   here.\n @return           If the provided index is within range, a pointer to\n                   the corresponding push constant block is returned.\n                   The caller must not free this pointer.\n                   If no match can be found, or if an unrelated error\n                   occurs, the return value will be NULL. Detailed\n                   error results are written to *pResult.\n\n*/\nconst SpvReflectBlockVariable* spvReflectGetPushConstantBlock(\n  const SpvReflectShaderModule*  p_module,\n  uint32_t                       index,\n  SpvReflectResult*              p_result\n);\nSPV_REFLECT_DEPRECATED(\"renamed to spvReflectGetPushConstantBlock\")\nconst SpvReflectBlockVariable* spvReflectGetPushConstant(\n  const SpvReflectShaderModule*  p_module,\n  uint32_t                       index,\n  SpvReflectResult*              p_result\n);\n\n/*! @fn spvReflectGetEntryPointPushConstantBlock\n @brief  Get the push constant block corresponding to the given entry point.\n         As by the Vulkan specification there can be no more than one push\n         constant block used by a given entry point, so if there is one it will\n         be returned, otherwise NULL will be returned.\n @param  p_module     Pointer to an instance of SpvReflectShaderModule.\n @param  entry_point  The entry point to get the push constant block from.\n @param  p_result     If successful, SPV_REFLECT_RESULT_SUCCESS will be\n                      written to *p_result. Otherwise, a error code\n                      indicating the cause of the failure will be stored\n                      here.\n @return              If the provided index is within range, a pointer to\n                      the corresponding push constant block is returned.\n                      The caller must not free this pointer.\n                      If no match can be found, or if an unrelated error\n                      occurs, the return value will be NULL. Detailed\n                      error results are written to *pResult.\n\n*/\nconst SpvReflectBlockVariable* spvReflectGetEntryPointPushConstantBlock(\n  const SpvReflectShaderModule*  p_module,\n  const char*                    entry_point,\n  SpvReflectResult*              p_result\n);\n\n\n/*! @fn spvReflectChangeDescriptorBindingNumbers\n @brief  Assign new set and/or binding numbers to a descriptor binding.\n         In addition to updating the reflection data, this function modifies\n         the underlying SPIR-V bytecode. The updated code can be retrieved\n         with spvReflectGetCode().  If the binding is used in multiple\n         entry points within the module, it will be changed in all of them.\n @param  p_module            Pointer to an instance of SpvReflectShaderModule.\n @param  p_binding           Pointer to the descriptor binding to modify.\n @param  new_binding_number  The new binding number to assign to the\n                             provided descriptor binding.\n                             To leave the binding number unchanged, pass\n                             SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE.\n @param  new_set_number      The new set number to assign to the\n                             provided descriptor binding. Successfully changing\n                             a descriptor binding's set number invalidates all\n                             existing SpvReflectDescriptorBinding and\n                             SpvReflectDescriptorSet pointers from this module.\n                             To leave the set number unchanged, pass\n                             SPV_REFLECT_SET_NUMBER_DONT_CHANGE.\n @return                     If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                             Otherwise, the error code indicates the cause of\n                             the failure.\n*/\nSpvReflectResult spvReflectChangeDescriptorBindingNumbers(\n  SpvReflectShaderModule*            p_module,\n  const SpvReflectDescriptorBinding* p_binding,\n  uint32_t                           new_binding_number,\n  uint32_t                           new_set_number\n);\nSPV_REFLECT_DEPRECATED(\"Renamed to spvReflectChangeDescriptorBindingNumbers\")\nSpvReflectResult spvReflectChangeDescriptorBindingNumber(\n  SpvReflectShaderModule*            p_module,\n  const SpvReflectDescriptorBinding* p_descriptor_binding,\n  uint32_t                           new_binding_number,\n  uint32_t                           optional_new_set_number\n);\n\n/*! @fn spvReflectChangeDescriptorSetNumber\n @brief  Assign a new set number to an entire descriptor set (including\n         all descriptor bindings in that set).\n         In addition to updating the reflection data, this function modifies\n         the underlying SPIR-V bytecode. The updated code can be retrieved\n         with spvReflectGetCode().  If the descriptor set is used in\n         multiple entry points within the module, it will be modified in all\n         of them.\n @param  p_module        Pointer to an instance of SpvReflectShaderModule.\n @param  p_set           Pointer to the descriptor binding to modify.\n @param  new_set_number  The new set number to assign to the\n                         provided descriptor set, and all its descriptor\n                         bindings. Successfully changing a descriptor\n                         binding's set number invalidates all existing\n                         SpvReflectDescriptorBinding and\n                         SpvReflectDescriptorSet pointers from this module.\n                         To leave the set number unchanged, pass\n                         SPV_REFLECT_SET_NUMBER_DONT_CHANGE.\n @return                 If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                         Otherwise, the error code indicates the cause of\n                         the failure.\n*/\nSpvReflectResult spvReflectChangeDescriptorSetNumber(\n  SpvReflectShaderModule*        p_module,\n  const SpvReflectDescriptorSet* p_set,\n  uint32_t                       new_set_number\n);\n\n/*! @fn spvReflectChangeInputVariableLocation\n @brief  Assign a new location to an input interface variable.\n         In addition to updating the reflection data, this function modifies\n         the underlying SPIR-V bytecode. The updated code can be retrieved\n         with spvReflectGetCode().\n         It is the caller's responsibility to avoid assigning the same\n         location to multiple input variables.  If the input variable is used\n         by multiple entry points in the module, it will be changed in all of\n         them.\n @param  p_module          Pointer to an instance of SpvReflectShaderModule.\n @param  p_input_variable  Pointer to the input variable to update.\n @param  new_location      The new location to assign to p_input_variable.\n @return                   If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                           Otherwise, the error code indicates the cause of\n                           the failure.\n\n*/\nSpvReflectResult spvReflectChangeInputVariableLocation(\n  SpvReflectShaderModule*            p_module,\n  const SpvReflectInterfaceVariable* p_input_variable,\n  uint32_t                           new_location\n);\n\n\n/*! @fn spvReflectChangeOutputVariableLocation\n @brief  Assign a new location to an output interface variable.\n         In addition to updating the reflection data, this function modifies\n         the underlying SPIR-V bytecode. The updated code can be retrieved\n         with spvReflectGetCode().\n         It is the caller's responsibility to avoid assigning the same\n         location to multiple output variables.  If the output variable is used\n         by multiple entry points in the module, it will be changed in all of\n         them.\n @param  p_module          Pointer to an instance of SpvReflectShaderModule.\n @param  p_output_variable Pointer to the output variable to update.\n @param  new_location      The new location to assign to p_output_variable.\n @return                   If successful, returns SPV_REFLECT_RESULT_SUCCESS.\n                           Otherwise, the error code indicates the cause of\n                           the failure.\n\n*/\nSpvReflectResult spvReflectChangeOutputVariableLocation(\n  SpvReflectShaderModule*             p_module,\n  const SpvReflectInterfaceVariable*  p_output_variable,\n  uint32_t                            new_location\n);\n\n\n/*! @fn spvReflectSourceLanguage\n\n @param  source_lang  The source language code.\n @return Returns string of source language specified in \\a source_lang.\n         The caller must not free the memory associated with this string.\n*/\nconst char* spvReflectSourceLanguage(SpvSourceLanguage source_lang);\n\n/*! @fn spvReflectBlockVariableTypeName\n\n @param  p_var Pointer to block variable.\n @return Returns string of block variable's type description type name\n         or NULL if p_var is NULL.\n*/\nconst char* spvReflectBlockVariableTypeName(\n  const SpvReflectBlockVariable* p_var\n);\n\n#if defined(__cplusplus)\n};\n#endif\n\n#if defined(__cplusplus) && !defined(SPIRV_REFLECT_DISABLE_CPP_BINDINGS)\n#include <cstdlib>\n#include <string>\n#include <vector>\n\nnamespace spv_reflect {\n\n/*! \\class ShaderModule\n\n*/\nclass ShaderModule {\npublic:\n  ShaderModule();\n  ShaderModule(size_t size, const void* p_code, SpvReflectModuleFlags flags = SPV_REFLECT_MODULE_FLAG_NONE);\n  ShaderModule(const std::vector<uint8_t>& code, SpvReflectModuleFlags flags = SPV_REFLECT_MODULE_FLAG_NONE);\n  ShaderModule(const std::vector<uint32_t>& code, SpvReflectModuleFlags flags = SPV_REFLECT_MODULE_FLAG_NONE);\n  ~ShaderModule();\n\n  ShaderModule(ShaderModule&& other);\n  ShaderModule& operator=(ShaderModule&& other);\n\n  SpvReflectResult GetResult() const;\n\n  const SpvReflectShaderModule& GetShaderModule() const;\n\n  uint32_t        GetCodeSize() const;\n  const uint32_t* GetCode() const;\n\n  const char*           GetEntryPointName() const;\n\n  const char*           GetSourceFile() const;\n\n  uint32_t                      GetEntryPointCount() const;\n  const char*                   GetEntryPointName(uint32_t index) const;\n  SpvReflectShaderStageFlagBits GetEntryPointShaderStage(uint32_t index) const;\n\n  SpvReflectShaderStageFlagBits GetShaderStage() const;\n  SPV_REFLECT_DEPRECATED(\"Renamed to GetShaderStage\")\n  SpvReflectShaderStageFlagBits GetVulkanShaderStage() const {\n    return GetShaderStage();\n  }\n\n  SpvReflectResult  EnumerateDescriptorBindings(uint32_t* p_count, SpvReflectDescriptorBinding** pp_bindings) const;\n  SpvReflectResult  EnumerateEntryPointDescriptorBindings(const char* entry_point, uint32_t* p_count, SpvReflectDescriptorBinding** pp_bindings) const;\n  SpvReflectResult  EnumerateDescriptorSets( uint32_t* p_count, SpvReflectDescriptorSet** pp_sets) const ;\n  SpvReflectResult  EnumerateEntryPointDescriptorSets(const char* entry_point, uint32_t* p_count, SpvReflectDescriptorSet** pp_sets) const ;\n  SpvReflectResult  EnumerateInterfaceVariables(uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) const;\n  SpvReflectResult  EnumerateEntryPointInterfaceVariables(const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) const;\n  SpvReflectResult  EnumerateInputVariables(uint32_t* p_count,SpvReflectInterfaceVariable** pp_variables) const;\n  SpvReflectResult  EnumerateEntryPointInputVariables(const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) const;\n  SpvReflectResult  EnumerateOutputVariables(uint32_t* p_count,SpvReflectInterfaceVariable** pp_variables) const;\n  SpvReflectResult  EnumerateEntryPointOutputVariables(const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) const;\n  SpvReflectResult  EnumeratePushConstantBlocks(uint32_t* p_count, SpvReflectBlockVariable** pp_blocks) const;\n  SpvReflectResult  EnumerateEntryPointPushConstantBlocks(const char* entry_point, uint32_t* p_count, SpvReflectBlockVariable** pp_blocks) const;\n  SPV_REFLECT_DEPRECATED(\"Renamed to EnumeratePushConstantBlocks\")\n  SpvReflectResult  EnumeratePushConstants(uint32_t* p_count, SpvReflectBlockVariable** pp_blocks) const {\n    return EnumeratePushConstantBlocks(p_count, pp_blocks);\n  }\n  SpvReflectResult  EnumerateSpecializationConstants(uint32_t* p_count, SpvReflectSpecializationConstant** pp_constants) const;\n\n  const SpvReflectDescriptorBinding*  GetDescriptorBinding(uint32_t binding_number, uint32_t set_number, SpvReflectResult* p_result = nullptr) const;\n  const SpvReflectDescriptorBinding*  GetEntryPointDescriptorBinding(const char* entry_point, uint32_t binding_number, uint32_t set_number, SpvReflectResult* p_result = nullptr) const;\n  const SpvReflectDescriptorSet*      GetDescriptorSet(uint32_t set_number, SpvReflectResult* p_result = nullptr) const;\n  const SpvReflectDescriptorSet*      GetEntryPointDescriptorSet(const char* entry_point, uint32_t set_number, SpvReflectResult* p_result = nullptr) const;\n  const SpvReflectInterfaceVariable*  GetInputVariableByLocation(uint32_t location,  SpvReflectResult* p_result = nullptr) const;\n  SPV_REFLECT_DEPRECATED(\"Renamed to GetInputVariableByLocation\")\n  const SpvReflectInterfaceVariable*  GetInputVariable(uint32_t location,  SpvReflectResult* p_result = nullptr) const {\n    return GetInputVariableByLocation(location, p_result);\n  }\n  const SpvReflectInterfaceVariable*  GetEntryPointInputVariableByLocation(const char* entry_point, uint32_t location,  SpvReflectResult* p_result = nullptr) const;\n  const SpvReflectInterfaceVariable*  GetInputVariableBySemantic(const char* semantic,  SpvReflectResult* p_result = nullptr) const;\n  const SpvReflectInterfaceVariable*  GetEntryPointInputVariableBySemantic(const char* entry_point, const char* semantic,  SpvReflectResult* p_result = nullptr) const;\n  const SpvReflectInterfaceVariable*  GetOutputVariableByLocation(uint32_t location, SpvReflectResult*  p_result = nullptr) const;\n  SPV_REFLECT_DEPRECATED(\"Renamed to GetOutputVariableByLocation\")\n  const SpvReflectInterfaceVariable*  GetOutputVariable(uint32_t location, SpvReflectResult*  p_result = nullptr) const {\n    return GetOutputVariableByLocation(location, p_result);\n  }\n  const SpvReflectInterfaceVariable*  GetEntryPointOutputVariableByLocation(const char* entry_point, uint32_t location, SpvReflectResult*  p_result = nullptr) const;\n  const SpvReflectInterfaceVariable*  GetOutputVariableBySemantic(const char* semantic, SpvReflectResult*  p_result = nullptr) const;\n  const SpvReflectInterfaceVariable*  GetEntryPointOutputVariableBySemantic(const char* entry_point, const char* semantic, SpvReflectResult*  p_result = nullptr) const;\n  const SpvReflectBlockVariable*      GetPushConstantBlock(uint32_t index, SpvReflectResult*  p_result = nullptr) const;\n  SPV_REFLECT_DEPRECATED(\"Renamed to GetPushConstantBlock\")\n  const SpvReflectBlockVariable*      GetPushConstant(uint32_t index, SpvReflectResult*  p_result = nullptr) const {\n    return GetPushConstantBlock(index, p_result);\n  }\n  const SpvReflectBlockVariable*      GetEntryPointPushConstantBlock(const char* entry_point, SpvReflectResult*  p_result = nullptr) const;\n\n  SpvReflectResult ChangeDescriptorBindingNumbers(const SpvReflectDescriptorBinding* p_binding,\n      uint32_t new_binding_number = SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE,\n      uint32_t optional_new_set_number = SPV_REFLECT_SET_NUMBER_DONT_CHANGE);\n  SPV_REFLECT_DEPRECATED(\"Renamed to ChangeDescriptorBindingNumbers\")\n  SpvReflectResult ChangeDescriptorBindingNumber(const SpvReflectDescriptorBinding* p_binding, uint32_t new_binding_number = SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE,\n      uint32_t new_set_number = SPV_REFLECT_SET_NUMBER_DONT_CHANGE) {\n    return ChangeDescriptorBindingNumbers(p_binding, new_binding_number, new_set_number);\n  }\n  SpvReflectResult ChangeDescriptorSetNumber(const SpvReflectDescriptorSet* p_set, uint32_t new_set_number = SPV_REFLECT_SET_NUMBER_DONT_CHANGE);\n  SpvReflectResult ChangeInputVariableLocation(const SpvReflectInterfaceVariable* p_input_variable, uint32_t new_location);\n  SpvReflectResult ChangeOutputVariableLocation(const SpvReflectInterfaceVariable* p_output_variable, uint32_t new_location);\n\nprivate:\n  // Make noncopyable\n  ShaderModule(const ShaderModule&);\n  ShaderModule& operator=(const ShaderModule&);\n\nprivate:\n  mutable SpvReflectResult  m_result = SPV_REFLECT_RESULT_NOT_READY;\n  SpvReflectShaderModule    m_module = {};\n};\n\n\n// =================================================================================================\n// ShaderModule\n// =================================================================================================\n\n/*! @fn ShaderModule\n\n*/\ninline ShaderModule::ShaderModule() {}\n\n\n/*! @fn ShaderModule\n\n  @param  size\n  @param  p_code\n\n*/\ninline ShaderModule::ShaderModule(size_t size, const void* p_code, SpvReflectModuleFlags flags) {\n  m_result = spvReflectCreateShaderModule2(\n    flags,\n    size,\n    p_code,\n    &m_module);\n}\n\n/*! @fn ShaderModule\n\n  @param  code\n\n*/\ninline ShaderModule::ShaderModule(const std::vector<uint8_t>& code, SpvReflectModuleFlags flags) {\n  m_result = spvReflectCreateShaderModule2(\n    flags,\n    code.size(),\n    code.data(),\n    &m_module);\n}\n\n/*! @fn ShaderModule\n\n  @param  code\n\n*/\ninline ShaderModule::ShaderModule(const std::vector<uint32_t>& code, SpvReflectModuleFlags flags) {\n  m_result = spvReflectCreateShaderModule2(\n    flags,\n    code.size() * sizeof(uint32_t),\n    code.data(),\n    &m_module);\n}\n\n/*! @fn  ~ShaderModule\n\n*/\ninline ShaderModule::~ShaderModule() {\n  spvReflectDestroyShaderModule(&m_module);\n}\n\n\ninline ShaderModule::ShaderModule(ShaderModule&& other)\n{\n    *this = std::move(other);\n}\n\ninline ShaderModule& ShaderModule::operator=(ShaderModule&& other)\n{\n    m_result = std::move(other.m_result);\n    m_module = std::move(other.m_module);\n\n    other.m_module = {};\n    return *this;\n}\n\n/*! @fn GetResult\n\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::GetResult() const {\n  return m_result;\n}\n\n\n/*! @fn GetShaderModule\n\n  @return\n\n*/\ninline const SpvReflectShaderModule& ShaderModule::GetShaderModule() const {\n  return m_module;\n}\n\n\n/*! @fn GetCodeSize\n\n  @return\n\n  */\ninline uint32_t ShaderModule::GetCodeSize() const {\n  return spvReflectGetCodeSize(&m_module);\n}\n\n\n/*! @fn GetCode\n\n  @return\n\n*/\ninline const uint32_t* ShaderModule::GetCode() const {\n  return spvReflectGetCode(&m_module);\n}\n\n\n/*! @fn GetEntryPoint\n\n  @return Returns entry point\n\n*/\ninline const char* ShaderModule::GetEntryPointName() const {\n  return this->GetEntryPointName(0);\n}\n\n/*! @fn GetEntryPoint\n\n  @return Returns entry point\n\n*/\ninline const char* ShaderModule::GetSourceFile() const {\n  return m_module.source_file;\n}\n\n/*! @fn GetEntryPointCount\n\n  @param\n  @return\n*/\ninline uint32_t ShaderModule::GetEntryPointCount() const {\n  return m_module.entry_point_count;\n}\n\n/*! @fn GetEntryPointName\n\n  @param index\n  @return\n*/\ninline const char* ShaderModule::GetEntryPointName(uint32_t index) const {\n  return m_module.entry_points[index].name;\n}\n\n/*! @fn GetEntryPointShaderStage\n\n  @param index\n  @return Returns the shader stage for the entry point at \\b index\n*/\ninline SpvReflectShaderStageFlagBits ShaderModule::GetEntryPointShaderStage(uint32_t index) const {\n  return m_module.entry_points[index].shader_stage;\n}\n\n/*! @fn GetShaderStage\n\n  @return Returns shader stage for the first entry point\n\n*/\ninline SpvReflectShaderStageFlagBits ShaderModule::GetShaderStage() const {\n  return m_module.shader_stage;\n}\n\n/*! @fn EnumerateDescriptorBindings\n\n  @param  count\n  @param  p_binding_numbers\n  @param  pp_bindings\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::EnumerateDescriptorBindings(\n  uint32_t*                     p_count,\n  SpvReflectDescriptorBinding** pp_bindings\n) const\n{\n  m_result = spvReflectEnumerateDescriptorBindings(\n    &m_module,\n    p_count,\n    pp_bindings);\n  return m_result;\n}\n\n/*! @fn EnumerateEntryPointDescriptorBindings\n\n  @param  entry_point\n  @param  count\n  @param  pp_bindings\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::EnumerateEntryPointDescriptorBindings(\n  const char*                   entry_point,\n  uint32_t*                     p_count,\n  SpvReflectDescriptorBinding** pp_bindings\n) const\n{\n  m_result = spvReflectEnumerateEntryPointDescriptorBindings(\n      &m_module,\n      entry_point,\n      p_count,\n      pp_bindings);\n  return m_result;\n}\n\n\n/*! @fn EnumerateDescriptorSets\n\n  @param  count\n  @param  pp_sets\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::EnumerateDescriptorSets(\n  uint32_t*                 p_count,\n  SpvReflectDescriptorSet** pp_sets\n) const\n{\n  m_result = spvReflectEnumerateDescriptorSets(\n    &m_module,\n    p_count,\n    pp_sets);\n  return m_result;\n}\n\n/*! @fn EnumerateEntryPointDescriptorSets\n\n  @param  entry_point\n  @param  count\n  @param  pp_sets\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::EnumerateEntryPointDescriptorSets(\n  const char*               entry_point,\n  uint32_t*                 p_count,\n  SpvReflectDescriptorSet** pp_sets\n) const\n{\n  m_result = spvReflectEnumerateEntryPointDescriptorSets(\n      &m_module,\n      entry_point,\n      p_count,\n      pp_sets);\n  return m_result;\n}\n\n\n/*! @fn EnumerateInterfaceVariables\n\n  @param  count\n  @param  pp_variables\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::EnumerateInterfaceVariables(\n  uint32_t*                     p_count,\n  SpvReflectInterfaceVariable** pp_variables\n) const\n{\n  m_result = spvReflectEnumerateInterfaceVariables(\n    &m_module,\n    p_count,\n    pp_variables);\n  return m_result;\n}\n\n/*! @fn EnumerateEntryPointInterfaceVariables\n\n  @param  entry_point\n  @param  count\n  @param  pp_variables\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::EnumerateEntryPointInterfaceVariables(\n  const char*                   entry_point,\n  uint32_t*                     p_count,\n  SpvReflectInterfaceVariable** pp_variables\n) const\n{\n  m_result = spvReflectEnumerateEntryPointInterfaceVariables(\n      &m_module,\n      entry_point,\n      p_count,\n      pp_variables);\n  return m_result;\n}\n\n\n/*! @fn EnumerateInputVariables\n\n  @param  count\n  @param  pp_variables\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::EnumerateInputVariables(\n  uint32_t*                     p_count,\n  SpvReflectInterfaceVariable** pp_variables\n) const\n{\n  m_result = spvReflectEnumerateInputVariables(\n    &m_module,\n    p_count,\n    pp_variables);\n  return m_result;\n}\n\n/*! @fn EnumerateEntryPointInputVariables\n\n  @param  entry_point\n  @param  count\n  @param  pp_variables\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::EnumerateEntryPointInputVariables(\n  const char*                   entry_point,\n  uint32_t*                     p_count,\n  SpvReflectInterfaceVariable** pp_variables\n) const\n{\n  m_result = spvReflectEnumerateEntryPointInputVariables(\n      &m_module,\n      entry_point,\n      p_count,\n      pp_variables);\n  return m_result;\n}\n\n\n/*! @fn EnumerateOutputVariables\n\n  @param  count\n  @param  pp_variables\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::EnumerateOutputVariables(\n  uint32_t*                     p_count,\n  SpvReflectInterfaceVariable** pp_variables\n) const\n{\n  m_result = spvReflectEnumerateOutputVariables(\n    &m_module,\n    p_count,\n    pp_variables);\n  return m_result;\n}\n\n/*! @fn EnumerateEntryPointOutputVariables\n\n  @param  entry_point\n  @param  count\n  @param  pp_variables\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::EnumerateEntryPointOutputVariables(\n  const char*                   entry_point,\n  uint32_t*                     p_count,\n  SpvReflectInterfaceVariable** pp_variables\n) const\n{\n  m_result = spvReflectEnumerateEntryPointOutputVariables(\n      &m_module,\n      entry_point,\n      p_count,\n      pp_variables);\n  return m_result;\n}\n\n\n/*! @fn EnumeratePushConstantBlocks\n\n  @param  count\n  @param  pp_blocks\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::EnumeratePushConstantBlocks(\n  uint32_t*                 p_count,\n  SpvReflectBlockVariable** pp_blocks\n) const\n{\n  m_result = spvReflectEnumeratePushConstantBlocks(\n    &m_module,\n    p_count,\n    pp_blocks);\n  return m_result;\n}\n\n/*! @fn EnumerateSpecializationConstants\n  @param  p_count\n  @param  pp_constants\n  @return\n*/\ninline SpvReflectResult ShaderModule::EnumerateSpecializationConstants(\n    uint32_t*                          p_count,\n    SpvReflectSpecializationConstant** pp_constants\n) const\n{\n  m_result = spvReflectEnumerateSpecializationConstants(\n    &m_module,\n    p_count,\n    pp_constants\n  );\n  return m_result;\n}\n\n/*! @fn EnumerateEntryPointPushConstantBlocks\n\n  @param  entry_point\n  @param  count\n  @param  pp_blocks\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::EnumerateEntryPointPushConstantBlocks(\n  const char*               entry_point,\n  uint32_t*                 p_count,\n  SpvReflectBlockVariable** pp_blocks\n) const\n{\n  m_result = spvReflectEnumerateEntryPointPushConstantBlocks(\n      &m_module,\n      entry_point,\n      p_count,\n      pp_blocks);\n  return m_result;\n}\n\n\n/*! @fn GetDescriptorBinding\n\n  @param  binding_number\n  @param  set_number\n  @param  p_result\n  @return\n\n*/\ninline const SpvReflectDescriptorBinding* ShaderModule::GetDescriptorBinding(\n  uint32_t          binding_number,\n  uint32_t          set_number,\n  SpvReflectResult* p_result\n) const\n{\n  return spvReflectGetDescriptorBinding(\n    &m_module,\n    binding_number,\n    set_number,\n    p_result);\n}\n\n/*! @fn GetEntryPointDescriptorBinding\n\n  @param  entry_point\n  @param  binding_number\n  @param  set_number\n  @param  p_result\n  @return\n\n*/\ninline const SpvReflectDescriptorBinding* ShaderModule::GetEntryPointDescriptorBinding(\n  const char*       entry_point,\n  uint32_t          binding_number,\n  uint32_t          set_number,\n  SpvReflectResult* p_result\n) const\n{\n  return spvReflectGetEntryPointDescriptorBinding(\n    &m_module,\n    entry_point,\n    binding_number,\n    set_number,\n    p_result);\n}\n\n\n/*! @fn GetDescriptorSet\n\n  @param  set_number\n  @param  p_result\n  @return\n\n*/\ninline const SpvReflectDescriptorSet* ShaderModule::GetDescriptorSet(\n  uint32_t          set_number,\n  SpvReflectResult* p_result\n) const\n{\n  return spvReflectGetDescriptorSet(\n    &m_module,\n    set_number,\n    p_result);\n}\n\n/*! @fn GetEntryPointDescriptorSet\n\n  @param  entry_point\n  @param  set_number\n  @param  p_result\n  @return\n\n*/\ninline const SpvReflectDescriptorSet* ShaderModule::GetEntryPointDescriptorSet(\n  const char*       entry_point,\n  uint32_t          set_number,\n  SpvReflectResult* p_result\n) const\n{\n  return spvReflectGetEntryPointDescriptorSet(\n    &m_module,\n    entry_point,\n    set_number,\n    p_result);\n}\n\n\n/*! @fn GetInputVariable\n\n  @param  location\n  @param  p_result\n  @return\n\n*/\ninline const SpvReflectInterfaceVariable* ShaderModule::GetInputVariableByLocation(\n  uint32_t          location,\n  SpvReflectResult* p_result\n) const\n{\n  return spvReflectGetInputVariableByLocation(\n    &m_module,\n    location,\n    p_result);\n}\ninline const SpvReflectInterfaceVariable* ShaderModule::GetInputVariableBySemantic(\n  const char*       semantic,\n  SpvReflectResult* p_result\n) const\n{\n  return spvReflectGetInputVariableBySemantic(\n    &m_module,\n    semantic,\n    p_result);\n}\n\n/*! @fn GetEntryPointInputVariable\n\n  @param  entry_point\n  @param  location\n  @param  p_result\n  @return\n\n*/\ninline const SpvReflectInterfaceVariable* ShaderModule::GetEntryPointInputVariableByLocation(\n  const char*       entry_point,\n  uint32_t          location,\n  SpvReflectResult* p_result\n) const\n{\n  return spvReflectGetEntryPointInputVariableByLocation(\n    &m_module,\n    entry_point,\n    location,\n    p_result);\n}\ninline const SpvReflectInterfaceVariable* ShaderModule::GetEntryPointInputVariableBySemantic(\n  const char*       entry_point,\n  const char*       semantic,\n  SpvReflectResult* p_result\n) const\n{\n  return spvReflectGetEntryPointInputVariableBySemantic(\n    &m_module,\n    entry_point,\n    semantic,\n    p_result);\n}\n\n\n/*! @fn GetOutputVariable\n\n  @param  location\n  @param  p_result\n  @return\n\n*/\ninline const SpvReflectInterfaceVariable* ShaderModule::GetOutputVariableByLocation(\n  uint32_t           location,\n  SpvReflectResult*  p_result\n) const\n{\n  return spvReflectGetOutputVariableByLocation(\n    &m_module,\n    location,\n    p_result);\n}\ninline const SpvReflectInterfaceVariable* ShaderModule::GetOutputVariableBySemantic(\n  const char*       semantic,\n  SpvReflectResult* p_result\n) const\n{\n  return spvReflectGetOutputVariableBySemantic(&m_module,\n    semantic,\n    p_result);\n}\n\n/*! @fn GetEntryPointOutputVariable\n\n  @param  entry_point\n  @param  location\n  @param  p_result\n  @return\n\n*/\ninline const SpvReflectInterfaceVariable* ShaderModule::GetEntryPointOutputVariableByLocation(\n  const char*        entry_point,\n  uint32_t           location,\n  SpvReflectResult*  p_result\n) const\n{\n  return spvReflectGetEntryPointOutputVariableByLocation(\n    &m_module,\n    entry_point,\n    location,\n    p_result);\n}\ninline const SpvReflectInterfaceVariable* ShaderModule::GetEntryPointOutputVariableBySemantic(\n  const char*       entry_point,\n  const char*       semantic,\n  SpvReflectResult* p_result\n) const\n{\n  return spvReflectGetEntryPointOutputVariableBySemantic(\n    &m_module,\n    entry_point,\n    semantic,\n    p_result);\n}\n\n\n/*! @fn GetPushConstant\n\n  @param  index\n  @param  p_result\n  @return\n\n*/\ninline const SpvReflectBlockVariable* ShaderModule::GetPushConstantBlock(\n  uint32_t           index,\n  SpvReflectResult*  p_result\n) const\n{\n  return spvReflectGetPushConstantBlock(\n    &m_module,\n    index,\n    p_result);\n}\n\n/*! @fn GetEntryPointPushConstant\n\n  @param  entry_point\n  @param  index\n  @param  p_result\n  @return\n\n*/\ninline const SpvReflectBlockVariable* ShaderModule::GetEntryPointPushConstantBlock(\n  const char*        entry_point,\n  SpvReflectResult*  p_result\n) const\n{\n  return spvReflectGetEntryPointPushConstantBlock(\n    &m_module,\n    entry_point,\n    p_result);\n}\n\n\n/*! @fn ChangeDescriptorBindingNumbers\n\n  @param  p_binding\n  @param  new_binding_number\n  @param  new_set_number\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::ChangeDescriptorBindingNumbers(\n  const SpvReflectDescriptorBinding* p_binding,\n  uint32_t                           new_binding_number,\n  uint32_t                           new_set_number\n)\n{\n  return spvReflectChangeDescriptorBindingNumbers(\n    &m_module,\n    p_binding,\n    new_binding_number,\n    new_set_number);\n}\n\n\n/*! @fn ChangeDescriptorSetNumber\n\n  @param  p_set\n  @param  new_set_number\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::ChangeDescriptorSetNumber(\n  const SpvReflectDescriptorSet* p_set,\n  uint32_t                       new_set_number\n)\n{\n  return spvReflectChangeDescriptorSetNumber(\n    &m_module,\n    p_set,\n    new_set_number);\n}\n\n\n/*! @fn ChangeInputVariableLocation\n\n  @param  p_input_variable\n  @param  new_location\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::ChangeInputVariableLocation(\n  const SpvReflectInterfaceVariable* p_input_variable,\n  uint32_t                           new_location)\n{\n  return spvReflectChangeInputVariableLocation(\n    &m_module,\n    p_input_variable,\n    new_location);\n}\n\n\n/*! @fn ChangeOutputVariableLocation\n\n  @param  p_input_variable\n  @param  new_location\n  @return\n\n*/\ninline SpvReflectResult ShaderModule::ChangeOutputVariableLocation(\n  const SpvReflectInterfaceVariable* p_output_variable,\n  uint32_t                           new_location)\n{\n  return spvReflectChangeOutputVariableLocation(\n    &m_module,\n    p_output_variable,\n    new_location);\n}\n\n} // namespace spv_reflect\n#endif // defined(__cplusplus) && !defined(SPIRV_REFLECT_DISABLE_CPP_WRAPPER)\n#endif // SPIRV_REFLECT_H\n\n// clang-format on\n"
  },
  {
    "path": "deps/metal-cpp/MetalSingleHeader.hpp",
    "content": "//\n// Metal.hpp\n//\n// Autogenerated on October 02, 2025.\n//\n// Copyright 2020-2024 Apple Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//\n\n#pragma once\n\n#define _NS_WEAK_IMPORT __attribute__((weak_import))\n#ifdef METALCPP_SYMBOL_VISIBILITY_HIDDEN\n#define _NS_EXPORT __attribute__((visibility(\"hidden\")))\n#else\n#define _NS_EXPORT __attribute__((visibility(\"default\")))\n#endif // METALCPP_SYMBOL_VISIBILITY_HIDDEN\n#define _NS_EXTERN extern \"C\" _NS_EXPORT\n#define _NS_INLINE inline __attribute__((always_inline))\n#define _NS_PACKED __attribute__((packed))\n\n#define _NS_CONST(type, name) _NS_EXTERN type const name\n#define _NS_ENUM(type, name) enum name : type\n#define _NS_OPTIONS(type, name) \\\n    using name = type;          \\\n    enum : name\n\n#define _NS_CAST_TO_UINT(value) static_cast<NS::UInteger>(value)\n#define _NS_VALIDATE_SIZE(ns, name) static_assert(sizeof(ns::name) == sizeof(ns##name), \"size mismatch \" #ns \"::\" #name)\n#define _NS_VALIDATE_ENUM(ns, name) static_assert(_NS_CAST_TO_UINT(ns::name) == _NS_CAST_TO_UINT(ns##name), \"value mismatch \" #ns \"::\" #name)\n\n#include <objc/runtime.h>\n\n#define _NS_PRIVATE_CLS(symbol) (Private::Class::s_k##symbol)\n#define _NS_PRIVATE_SEL(accessor) (Private::Selector::s_k##accessor)\n\n#if defined(NS_PRIVATE_IMPLEMENTATION)\n\n#include <dlfcn.h>\n\nnamespace NS::Private\n{\n    template <typename _Type>\n    inline _Type const LoadSymbol(const char* pSymbol)\n    {\n        const _Type* pAddress = static_cast<_Type*>(dlsym(RTLD_DEFAULT, pSymbol));\n\n        return pAddress ? *pAddress : _Type();\n    }\n} // NS::Private\n\n#ifdef METALCPP_SYMBOL_VISIBILITY_HIDDEN\n#define _NS_PRIVATE_VISIBILITY __attribute__((visibility(\"hidden\")))\n#else\n#define _NS_PRIVATE_VISIBILITY __attribute__((visibility(\"default\")))\n#endif // METALCPP_SYMBOL_VISIBILITY_HIDDEN\n\n#define _NS_PRIVATE_IMPORT __attribute__((weak_import))\n\n#ifdef __OBJC__\n#define _NS_PRIVATE_OBJC_LOOKUP_CLASS(symbol) ((__bridge void*)objc_lookUpClass(#symbol))\n#define _NS_PRIVATE_OBJC_GET_PROTOCOL(symbol) ((__bridge void*)objc_getProtocol(#symbol))\n#else\n#define _NS_PRIVATE_OBJC_LOOKUP_CLASS(symbol) objc_lookUpClass(#symbol)\n#define _NS_PRIVATE_OBJC_GET_PROTOCOL(symbol) objc_getProtocol(#symbol)\n#endif // __OBJC__\n\n#define _NS_PRIVATE_DEF_CLS(symbol) void* s_k##symbol _NS_PRIVATE_VISIBILITY = _NS_PRIVATE_OBJC_LOOKUP_CLASS(symbol)\n#define _NS_PRIVATE_DEF_PRO(symbol) void* s_k##symbol _NS_PRIVATE_VISIBILITY = _NS_PRIVATE_OBJC_GET_PROTOCOL(symbol)\n#define _NS_PRIVATE_DEF_SEL(accessor, symbol) SEL s_k##accessor _NS_PRIVATE_VISIBILITY = sel_registerName(symbol)\n\n#if defined(__MAC_15_0) || defined(__IPHONE_18_0) || defined(__TVOS_18_0)\n#define _NS_PRIVATE_DEF_CONST(type, symbol)              \\\n    _NS_EXTERN type const NS##symbol _NS_PRIVATE_IMPORT; \\\n    type const                       NS::symbol = (nullptr != &NS##symbol) ? NS##symbol : type()\n#else\n#define _NS_PRIVATE_DEF_CONST(type, symbol) \\\n    _NS_EXTERN type const MTL##symbol _NS_PRIVATE_IMPORT; \\\n    type const             NS::symbol = Private::LoadSymbol<type>(\"NS\" #symbol)\n#endif\n\n#else\n\n#define _NS_PRIVATE_DEF_CLS(symbol) extern void* s_k##symbol\n#define _NS_PRIVATE_DEF_PRO(symbol) extern void* s_k##symbol\n#define _NS_PRIVATE_DEF_SEL(accessor, symbol) extern SEL s_k##accessor\n#define _NS_PRIVATE_DEF_CONST(type, symbol) extern type const NS::symbol\n\n#endif // NS_PRIVATE_IMPLEMENTATION\n\nnamespace NS\n{\nnamespace Private\n{\n    namespace Class\n    {\n\n        _NS_PRIVATE_DEF_CLS(NSArray);\n        _NS_PRIVATE_DEF_CLS(NSAutoreleasePool);\n        _NS_PRIVATE_DEF_CLS(NSBundle);\n        _NS_PRIVATE_DEF_CLS(NSCondition);\n        _NS_PRIVATE_DEF_CLS(NSDate);\n        _NS_PRIVATE_DEF_CLS(NSDictionary);\n        _NS_PRIVATE_DEF_CLS(NSError);\n        _NS_PRIVATE_DEF_CLS(NSNotificationCenter);\n        _NS_PRIVATE_DEF_CLS(NSNumber);\n        _NS_PRIVATE_DEF_CLS(NSObject);\n        _NS_PRIVATE_DEF_CLS(NSProcessInfo);\n        _NS_PRIVATE_DEF_CLS(NSSet);\n        _NS_PRIVATE_DEF_CLS(NSString);\n        _NS_PRIVATE_DEF_CLS(NSURL);\n        _NS_PRIVATE_DEF_CLS(NSValue);\n\n    } // Class\n} // Private\n} // MTL\n\nnamespace NS\n{\nnamespace Private\n{\n    namespace Protocol\n    {\n\n    } // Protocol\n} // Private\n} // NS\n\nnamespace NS\n{\nnamespace Private\n{\n    namespace Selector\n    {\n\n        _NS_PRIVATE_DEF_SEL(addObject_,\n            \"addObject:\");\n        _NS_PRIVATE_DEF_SEL(addObserverName_object_queue_block_,\n            \"addObserverForName:object:queue:usingBlock:\");\n        _NS_PRIVATE_DEF_SEL(activeProcessorCount,\n            \"activeProcessorCount\");\n        _NS_PRIVATE_DEF_SEL(allBundles,\n            \"allBundles\");\n        _NS_PRIVATE_DEF_SEL(allFrameworks,\n            \"allFrameworks\");\n        _NS_PRIVATE_DEF_SEL(allObjects,\n            \"allObjects\");\n        _NS_PRIVATE_DEF_SEL(alloc,\n            \"alloc\");\n        _NS_PRIVATE_DEF_SEL(appStoreReceiptURL,\n            \"appStoreReceiptURL\");\n        _NS_PRIVATE_DEF_SEL(arguments,\n            \"arguments\");\n        _NS_PRIVATE_DEF_SEL(array,\n            \"array\");\n        _NS_PRIVATE_DEF_SEL(arrayWithObject_,\n            \"arrayWithObject:\");\n        _NS_PRIVATE_DEF_SEL(arrayWithObjects_count_,\n            \"arrayWithObjects:count:\");\n        _NS_PRIVATE_DEF_SEL(automaticTerminationSupportEnabled,\n            \"automaticTerminationSupportEnabled\");\n        _NS_PRIVATE_DEF_SEL(autorelease,\n            \"autorelease\");\n        _NS_PRIVATE_DEF_SEL(beginActivityWithOptions_reason_,\n            \"beginActivityWithOptions:reason:\");\n        _NS_PRIVATE_DEF_SEL(boolValue,\n            \"boolValue\");\n        _NS_PRIVATE_DEF_SEL(broadcast,\n            \"broadcast\");\n        _NS_PRIVATE_DEF_SEL(builtInPlugInsPath,\n            \"builtInPlugInsPath\");\n        _NS_PRIVATE_DEF_SEL(builtInPlugInsURL,\n            \"builtInPlugInsURL\");\n        _NS_PRIVATE_DEF_SEL(bundleIdentifier,\n            \"bundleIdentifier\");\n        _NS_PRIVATE_DEF_SEL(bundlePath,\n            \"bundlePath\");\n        _NS_PRIVATE_DEF_SEL(bundleURL,\n            \"bundleURL\");\n        _NS_PRIVATE_DEF_SEL(bundleWithPath_,\n            \"bundleWithPath:\");\n        _NS_PRIVATE_DEF_SEL(bundleWithURL_,\n            \"bundleWithURL:\");\n        _NS_PRIVATE_DEF_SEL(caseInsensitiveCompare_,\n            \"caseInsensitiveCompare:\");\n        _NS_PRIVATE_DEF_SEL(characterAtIndex_,\n            \"characterAtIndex:\");\n        _NS_PRIVATE_DEF_SEL(charValue,\n            \"charValue\");\n        _NS_PRIVATE_DEF_SEL(countByEnumeratingWithState_objects_count_,\n            \"countByEnumeratingWithState:objects:count:\");\n        _NS_PRIVATE_DEF_SEL(cStringUsingEncoding_,\n            \"cStringUsingEncoding:\");\n        _NS_PRIVATE_DEF_SEL(code,\n            \"code\");\n        _NS_PRIVATE_DEF_SEL(compare_,\n            \"compare:\");\n        _NS_PRIVATE_DEF_SEL(copy,\n            \"copy\");\n        _NS_PRIVATE_DEF_SEL(count,\n            \"count\");\n        _NS_PRIVATE_DEF_SEL(dateWithTimeIntervalSinceNow_,\n            \"dateWithTimeIntervalSinceNow:\");\n        _NS_PRIVATE_DEF_SEL(defaultCenter,\n            \"defaultCenter\");\n        _NS_PRIVATE_DEF_SEL(descriptionWithLocale_,\n            \"descriptionWithLocale:\");\n        _NS_PRIVATE_DEF_SEL(disableAutomaticTermination_,\n            \"disableAutomaticTermination:\");\n        _NS_PRIVATE_DEF_SEL(disableSuddenTermination,\n            \"disableSuddenTermination\");\n        _NS_PRIVATE_DEF_SEL(debugDescription,\n            \"debugDescription\");\n        _NS_PRIVATE_DEF_SEL(description,\n            \"description\");\n        _NS_PRIVATE_DEF_SEL(dictionary,\n            \"dictionary\");\n        _NS_PRIVATE_DEF_SEL(dictionaryWithObject_forKey_,\n            \"dictionaryWithObject:forKey:\");\n        _NS_PRIVATE_DEF_SEL(dictionaryWithObjects_forKeys_count_,\n            \"dictionaryWithObjects:forKeys:count:\");\n        _NS_PRIVATE_DEF_SEL(domain,\n            \"domain\");\n        _NS_PRIVATE_DEF_SEL(doubleValue,\n            \"doubleValue\");\n        _NS_PRIVATE_DEF_SEL(drain,\n            \"drain\");\n        _NS_PRIVATE_DEF_SEL(enableAutomaticTermination_,\n            \"enableAutomaticTermination:\");\n        _NS_PRIVATE_DEF_SEL(enableSuddenTermination,\n            \"enableSuddenTermination\");\n        _NS_PRIVATE_DEF_SEL(endActivity_,\n            \"endActivity:\");\n        _NS_PRIVATE_DEF_SEL(environment,\n            \"environment\");\n        _NS_PRIVATE_DEF_SEL(errorWithDomain_code_userInfo_,\n            \"errorWithDomain:code:userInfo:\");\n        _NS_PRIVATE_DEF_SEL(executablePath,\n            \"executablePath\");\n        _NS_PRIVATE_DEF_SEL(executableURL,\n            \"executableURL\");\n        _NS_PRIVATE_DEF_SEL(fileSystemRepresentation,\n            \"fileSystemRepresentation\");\n        _NS_PRIVATE_DEF_SEL(fileURLWithPath_,\n            \"fileURLWithPath:\");\n        _NS_PRIVATE_DEF_SEL(floatValue,\n            \"floatValue\");\n        _NS_PRIVATE_DEF_SEL(fullUserName,\n            \"fullUserName\");\n        _NS_PRIVATE_DEF_SEL(getValue_size_,\n            \"getValue:size:\");\n        _NS_PRIVATE_DEF_SEL(globallyUniqueString,\n            \"globallyUniqueString\");\n        _NS_PRIVATE_DEF_SEL(hash,\n            \"hash\");\n        _NS_PRIVATE_DEF_SEL(hasPerformanceProfile_,\n            \"hasPerformanceProfile:\");\n        _NS_PRIVATE_DEF_SEL(hostName,\n            \"hostName\");\n        _NS_PRIVATE_DEF_SEL(infoDictionary,\n            \"infoDictionary\");\n        _NS_PRIVATE_DEF_SEL(init,\n            \"init\");\n        _NS_PRIVATE_DEF_SEL(initFileURLWithPath_,\n            \"initFileURLWithPath:\");\n        _NS_PRIVATE_DEF_SEL(initWithBool_,\n            \"initWithBool:\");\n        _NS_PRIVATE_DEF_SEL(initWithBytes_objCType_,\n            \"initWithBytes:objCType:\");\n        _NS_PRIVATE_DEF_SEL(initWithBytesNoCopy_length_encoding_freeWhenDone_,\n            \"initWithBytesNoCopy:length:encoding:freeWhenDone:\");\n        _NS_PRIVATE_DEF_SEL(initWithChar_,\n            \"initWithChar:\");\n        _NS_PRIVATE_DEF_SEL(initWithCoder_,\n            \"initWithCoder:\");\n        _NS_PRIVATE_DEF_SEL(initWithCString_encoding_,\n            \"initWithCString:encoding:\");\n        _NS_PRIVATE_DEF_SEL(initWithDomain_code_userInfo_,\n            \"initWithDomain:code:userInfo:\");\n        _NS_PRIVATE_DEF_SEL(initWithDouble_,\n            \"initWithDouble:\");\n        _NS_PRIVATE_DEF_SEL(initWithFloat_,\n            \"initWithFloat:\");\n        _NS_PRIVATE_DEF_SEL(initWithInt_,\n            \"initWithInt:\");\n        _NS_PRIVATE_DEF_SEL(initWithLong_,\n            \"initWithLong:\");\n        _NS_PRIVATE_DEF_SEL(initWithLongLong_,\n            \"initWithLongLong:\");\n        _NS_PRIVATE_DEF_SEL(initWithObjects_count_,\n            \"initWithObjects:count:\");\n        _NS_PRIVATE_DEF_SEL(initWithObjects_forKeys_count_,\n            \"initWithObjects:forKeys:count:\");\n        _NS_PRIVATE_DEF_SEL(initWithPath_,\n            \"initWithPath:\");\n        _NS_PRIVATE_DEF_SEL(initWithShort_,\n            \"initWithShort:\");\n        _NS_PRIVATE_DEF_SEL(initWithString_,\n            \"initWithString:\");\n        _NS_PRIVATE_DEF_SEL(initWithUnsignedChar_,\n            \"initWithUnsignedChar:\");\n        _NS_PRIVATE_DEF_SEL(initWithUnsignedInt_,\n            \"initWithUnsignedInt:\");\n        _NS_PRIVATE_DEF_SEL(initWithUnsignedLong_,\n            \"initWithUnsignedLong:\");\n        _NS_PRIVATE_DEF_SEL(initWithUnsignedLongLong_,\n            \"initWithUnsignedLongLong:\");\n        _NS_PRIVATE_DEF_SEL(initWithUnsignedShort_,\n            \"initWithUnsignedShort:\");\n        _NS_PRIVATE_DEF_SEL(initWithURL_,\n            \"initWithURL:\");\n        _NS_PRIVATE_DEF_SEL(integerValue,\n            \"integerValue\");\n        _NS_PRIVATE_DEF_SEL(intValue,\n            \"intValue\");\n        _NS_PRIVATE_DEF_SEL(isDeviceCertified_,\n            \"isDeviceCertifiedFor:\");\n        _NS_PRIVATE_DEF_SEL(isEqual_,\n            \"isEqual:\");\n        _NS_PRIVATE_DEF_SEL(isEqualToNumber_,\n            \"isEqualToNumber:\");\n        _NS_PRIVATE_DEF_SEL(isEqualToString_,\n            \"isEqualToString:\");\n        _NS_PRIVATE_DEF_SEL(isEqualToValue_,\n            \"isEqualToValue:\");\n        _NS_PRIVATE_DEF_SEL(isiOSAppOnMac,\n            \"isiOSAppOnMac\");\n        _NS_PRIVATE_DEF_SEL(isLoaded,\n            \"isLoaded\");\n        _NS_PRIVATE_DEF_SEL(isLowPowerModeEnabled,\n            \"isLowPowerModeEnabled\");\n        _NS_PRIVATE_DEF_SEL(isMacCatalystApp,\n            \"isMacCatalystApp\");\n        _NS_PRIVATE_DEF_SEL(isOperatingSystemAtLeastVersion_,\n            \"isOperatingSystemAtLeastVersion:\");\n        _NS_PRIVATE_DEF_SEL(keyEnumerator,\n            \"keyEnumerator\");\n        _NS_PRIVATE_DEF_SEL(length,\n            \"length\");\n        _NS_PRIVATE_DEF_SEL(lengthOfBytesUsingEncoding_,\n            \"lengthOfBytesUsingEncoding:\");\n        _NS_PRIVATE_DEF_SEL(load,\n            \"load\");\n        _NS_PRIVATE_DEF_SEL(loadAndReturnError_,\n            \"loadAndReturnError:\");\n        _NS_PRIVATE_DEF_SEL(localizedDescription,\n            \"localizedDescription\");\n        _NS_PRIVATE_DEF_SEL(localizedFailureReason,\n            \"localizedFailureReason\");\n        _NS_PRIVATE_DEF_SEL(localizedInfoDictionary,\n            \"localizedInfoDictionary\");\n        _NS_PRIVATE_DEF_SEL(localizedRecoveryOptions,\n            \"localizedRecoveryOptions\");\n        _NS_PRIVATE_DEF_SEL(localizedRecoverySuggestion,\n            \"localizedRecoverySuggestion\");\n        _NS_PRIVATE_DEF_SEL(localizedStringForKey_value_table_,\n            \"localizedStringForKey:value:table:\");\n        _NS_PRIVATE_DEF_SEL(lock,\n            \"lock\");\n        _NS_PRIVATE_DEF_SEL(longValue,\n            \"longValue\");\n        _NS_PRIVATE_DEF_SEL(longLongValue,\n            \"longLongValue\");\n        _NS_PRIVATE_DEF_SEL(mainBundle,\n            \"mainBundle\");\n        _NS_PRIVATE_DEF_SEL(maximumLengthOfBytesUsingEncoding_,\n            \"maximumLengthOfBytesUsingEncoding:\");\n        _NS_PRIVATE_DEF_SEL(methodSignatureForSelector_,\n            \"methodSignatureForSelector:\");\n        _NS_PRIVATE_DEF_SEL(mutableBytes,\n            \"mutableBytes\");\n        _NS_PRIVATE_DEF_SEL(name,\n            \"name\");\n        _NS_PRIVATE_DEF_SEL(nextObject,\n            \"nextObject\");\n        _NS_PRIVATE_DEF_SEL(numberWithBool_,\n            \"numberWithBool:\");\n        _NS_PRIVATE_DEF_SEL(numberWithChar_,\n            \"numberWithChar:\");\n        _NS_PRIVATE_DEF_SEL(numberWithDouble_,\n            \"numberWithDouble:\");\n        _NS_PRIVATE_DEF_SEL(numberWithFloat_,\n            \"numberWithFloat:\");\n        _NS_PRIVATE_DEF_SEL(numberWithInt_,\n            \"numberWithInt:\");\n        _NS_PRIVATE_DEF_SEL(numberWithLong_,\n            \"numberWithLong:\");\n        _NS_PRIVATE_DEF_SEL(numberWithLongLong_,\n            \"numberWithLongLong:\");\n        _NS_PRIVATE_DEF_SEL(numberWithShort_,\n            \"numberWithShort:\");\n        _NS_PRIVATE_DEF_SEL(numberWithUnsignedChar_,\n            \"numberWithUnsignedChar:\");\n        _NS_PRIVATE_DEF_SEL(numberWithUnsignedInt_,\n            \"numberWithUnsignedInt:\");\n        _NS_PRIVATE_DEF_SEL(numberWithUnsignedLong_,\n            \"numberWithUnsignedLong:\");\n        _NS_PRIVATE_DEF_SEL(numberWithUnsignedLongLong_,\n            \"numberWithUnsignedLongLong:\");\n        _NS_PRIVATE_DEF_SEL(numberWithUnsignedShort_,\n            \"numberWithUnsignedShort:\");\n        _NS_PRIVATE_DEF_SEL(objCType,\n            \"objCType\");\n        _NS_PRIVATE_DEF_SEL(object,\n            \"object\");\n        _NS_PRIVATE_DEF_SEL(objectAtIndex_,\n            \"objectAtIndex:\");\n        _NS_PRIVATE_DEF_SEL(objectEnumerator,\n            \"objectEnumerator\");\n        _NS_PRIVATE_DEF_SEL(objectForInfoDictionaryKey_,\n            \"objectForInfoDictionaryKey:\");\n        _NS_PRIVATE_DEF_SEL(objectForKey_,\n            \"objectForKey:\");\n        _NS_PRIVATE_DEF_SEL(operatingSystem,\n            \"operatingSystem\");\n        _NS_PRIVATE_DEF_SEL(operatingSystemVersion,\n            \"operatingSystemVersion\");\n        _NS_PRIVATE_DEF_SEL(operatingSystemVersionString,\n            \"operatingSystemVersionString\");\n        _NS_PRIVATE_DEF_SEL(pathForAuxiliaryExecutable_,\n            \"pathForAuxiliaryExecutable:\");\n        _NS_PRIVATE_DEF_SEL(performActivityWithOptions_reason_usingBlock_,\n            \"performActivityWithOptions:reason:usingBlock:\");\n        _NS_PRIVATE_DEF_SEL(performExpiringActivityWithReason_usingBlock_,\n            \"performExpiringActivityWithReason:usingBlock:\");\n        _NS_PRIVATE_DEF_SEL(physicalMemory,\n            \"physicalMemory\");\n        _NS_PRIVATE_DEF_SEL(pointerValue,\n            \"pointerValue\");\n        _NS_PRIVATE_DEF_SEL(preflightAndReturnError_,\n            \"preflightAndReturnError:\");\n        _NS_PRIVATE_DEF_SEL(privateFrameworksPath,\n            \"privateFrameworksPath\");\n        _NS_PRIVATE_DEF_SEL(privateFrameworksURL,\n            \"privateFrameworksURL\");\n        _NS_PRIVATE_DEF_SEL(processIdentifier,\n            \"processIdentifier\");\n        _NS_PRIVATE_DEF_SEL(processInfo,\n            \"processInfo\");\n        _NS_PRIVATE_DEF_SEL(processName,\n            \"processName\");\n        _NS_PRIVATE_DEF_SEL(processorCount,\n            \"processorCount\");\n        _NS_PRIVATE_DEF_SEL(rangeOfString_options_,\n            \"rangeOfString:options:\");\n        _NS_PRIVATE_DEF_SEL(release,\n            \"release\");\n        _NS_PRIVATE_DEF_SEL(removeObserver_,\n            \"removeObserver:\");\n        _NS_PRIVATE_DEF_SEL(resourcePath,\n            \"resourcePath\");\n        _NS_PRIVATE_DEF_SEL(resourceURL,\n            \"resourceURL\");\n        _NS_PRIVATE_DEF_SEL(respondsToSelector_,\n            \"respondsToSelector:\");\n        _NS_PRIVATE_DEF_SEL(retain,\n            \"retain\");\n        _NS_PRIVATE_DEF_SEL(retainCount,\n            \"retainCount\");\n        _NS_PRIVATE_DEF_SEL(setAutomaticTerminationSupportEnabled_,\n            \"setAutomaticTerminationSupportEnabled:\");\n        _NS_PRIVATE_DEF_SEL(setProcessName_,\n            \"setProcessName:\");\n        _NS_PRIVATE_DEF_SEL(sharedFrameworksPath,\n            \"sharedFrameworksPath\");\n        _NS_PRIVATE_DEF_SEL(sharedFrameworksURL,\n            \"sharedFrameworksURL\");\n        _NS_PRIVATE_DEF_SEL(sharedSupportPath,\n            \"sharedSupportPath\");\n        _NS_PRIVATE_DEF_SEL(sharedSupportURL,\n            \"sharedSupportURL\");\n        _NS_PRIVATE_DEF_SEL(shortValue,\n            \"shortValue\");\n        _NS_PRIVATE_DEF_SEL(showPools,\n            \"showPools\");\n        _NS_PRIVATE_DEF_SEL(signal,\n            \"signal\");\n        _NS_PRIVATE_DEF_SEL(string,\n            \"string\");\n        _NS_PRIVATE_DEF_SEL(stringValue,\n            \"stringValue\");\n        _NS_PRIVATE_DEF_SEL(stringWithString_,\n            \"stringWithString:\");\n        _NS_PRIVATE_DEF_SEL(stringWithCString_encoding_,\n            \"stringWithCString:encoding:\");\n        _NS_PRIVATE_DEF_SEL(stringByAppendingString_,\n            \"stringByAppendingString:\");\n        _NS_PRIVATE_DEF_SEL(systemUptime,\n            \"systemUptime\");\n        _NS_PRIVATE_DEF_SEL(thermalState,\n            \"thermalState\");\n        _NS_PRIVATE_DEF_SEL(unload,\n            \"unload\");\n        _NS_PRIVATE_DEF_SEL(unlock,\n            \"unlock\");\n        _NS_PRIVATE_DEF_SEL(unsignedCharValue,\n            \"unsignedCharValue\");\n        _NS_PRIVATE_DEF_SEL(unsignedIntegerValue,\n            \"unsignedIntegerValue\");\n        _NS_PRIVATE_DEF_SEL(unsignedIntValue,\n            \"unsignedIntValue\");\n        _NS_PRIVATE_DEF_SEL(unsignedLongValue,\n            \"unsignedLongValue\");\n        _NS_PRIVATE_DEF_SEL(unsignedLongLongValue,\n            \"unsignedLongLongValue\");\n        _NS_PRIVATE_DEF_SEL(unsignedShortValue,\n            \"unsignedShortValue\");\n        _NS_PRIVATE_DEF_SEL(URLForAuxiliaryExecutable_,\n            \"URLForAuxiliaryExecutable:\");\n        _NS_PRIVATE_DEF_SEL(userInfo,\n            \"userInfo\");\n        _NS_PRIVATE_DEF_SEL(userName,\n            \"userName\");\n        _NS_PRIVATE_DEF_SEL(UTF8String,\n            \"UTF8String\");\n        _NS_PRIVATE_DEF_SEL(valueWithBytes_objCType_,\n            \"valueWithBytes:objCType:\");\n        _NS_PRIVATE_DEF_SEL(valueWithPointer_,\n            \"valueWithPointer:\");\n        _NS_PRIVATE_DEF_SEL(wait,\n            \"wait\");\n        _NS_PRIVATE_DEF_SEL(waitUntilDate_,\n            \"waitUntilDate:\");\n    } // Class\n} // Private\n} // MTL\n\n#include <CoreFoundation/CoreFoundation.h>\n#include <cstdint>\n\nnamespace NS\n{\nusing TimeInterval = double;\n\nusing Integer = std::intptr_t;\nusing UInteger = std::uintptr_t;\n\nconst Integer  IntegerMax = INTPTR_MAX;\nconst Integer  IntegerMin = INTPTR_MIN;\nconst UInteger UIntegerMax = UINTPTR_MAX;\n\nstruct OperatingSystemVersion\n{\n    Integer majorVersion;\n    Integer minorVersion;\n    Integer patchVersion;\n} _NS_PACKED;\n}\n\n#include <objc/message.h>\n#include <objc/runtime.h>\n\n#include <type_traits>\n\nnamespace NS\n{\ntemplate <class _Class, class _Base = class Object>\nclass _NS_EXPORT Referencing : public _Base\n{\npublic:\n    _Class*  retain();\n    void     release();\n\n    _Class*  autorelease();\n\n    UInteger retainCount() const;\n};\n\ntemplate <class _Class, class _Base = class Object>\nclass Copying : public Referencing<_Class, _Base>\n{\npublic:\n    _Class* copy() const;\n};\n\ntemplate <class _Class, class _Base = class Object>\nclass SecureCoding : public Referencing<_Class, _Base>\n{\n};\n\nclass Object : public Referencing<Object, objc_object>\n{\npublic:\n    UInteger      hash() const;\n    bool          isEqual(const Object* pObject) const;\n\n    class String* description() const;\n    class String* debugDescription() const;\n\nprotected:\n    friend class Referencing<Object, objc_object>;\n\n    template <class _Class>\n    static _Class* alloc(const char* pClassName);\n    template <class _Class>\n    static _Class* alloc(const void* pClass);\n    template <class _Class>\n    _Class* init();\n\n    template <class _Dst>\n    static _Dst                   bridgingCast(const void* pObj);\n    static class MethodSignature* methodSignatureForSelector(const void* pObj, SEL selector);\n    static bool                   respondsToSelector(const void* pObj, SEL selector);\n    template <typename _Type>\n    static constexpr bool doesRequireMsgSendStret();\n    template <typename _Ret, typename... _Args>\n    static _Ret sendMessage(const void* pObj, SEL selector, _Args... args);\n    template <typename _Ret, typename... _Args>\n    static _Ret sendMessageSafe(const void* pObj, SEL selector, _Args... args);\n\nprivate:\n    Object() = delete;\n    Object(const Object&) = delete;\n    ~Object() = delete;\n\n    Object& operator=(const Object&) = delete;\n};\n}\n\ntemplate <class _Class, class _Base /* = Object */>\n_NS_INLINE _Class* NS::Referencing<_Class, _Base>::retain()\n{\n    return Object::sendMessage<_Class*>(this, _NS_PRIVATE_SEL(retain));\n}\n\ntemplate <class _Class, class _Base /* = Object */>\n_NS_INLINE void NS::Referencing<_Class, _Base>::release()\n{\n    Object::sendMessage<void>(this, _NS_PRIVATE_SEL(release));\n}\n\ntemplate <class _Class, class _Base /* = Object */>\n_NS_INLINE _Class* NS::Referencing<_Class, _Base>::autorelease()\n{\n    return Object::sendMessage<_Class*>(this, _NS_PRIVATE_SEL(autorelease));\n}\n\ntemplate <class _Class, class _Base /* = Object */>\n_NS_INLINE NS::UInteger NS::Referencing<_Class, _Base>::retainCount() const\n{\n    return Object::sendMessage<UInteger>(this, _NS_PRIVATE_SEL(retainCount));\n}\n\ntemplate <class _Class, class _Base /* = Object */>\n_NS_INLINE _Class* NS::Copying<_Class, _Base>::copy() const\n{\n    return Object::sendMessage<_Class*>(this, _NS_PRIVATE_SEL(copy));\n}\n\ntemplate <class _Dst>\n_NS_INLINE _Dst NS::Object::bridgingCast(const void* pObj)\n{\n#ifdef __OBJC__\n    return (__bridge _Dst)pObj;\n#else\n    return (_Dst)pObj;\n#endif // __OBJC__\n}\n\ntemplate <typename _Type>\n_NS_INLINE constexpr bool NS::Object::doesRequireMsgSendStret()\n{\n#if (defined(__i386__) || defined(__x86_64__))\n    constexpr size_t kStructLimit = (sizeof(std::uintptr_t) << 1);\n\n    return sizeof(_Type) > kStructLimit;\n#elif defined(__arm64__)\n    return false;\n#elif defined(__arm__)\n    constexpr size_t kStructLimit = sizeof(std::uintptr_t);\n\n    return std::is_class_v<_Type> && (sizeof(_Type) > kStructLimit);\n#else\n#error \"Unsupported architecture!\"\n#endif\n}\n\ntemplate <>\n_NS_INLINE constexpr bool NS::Object::doesRequireMsgSendStret<void>()\n{\n    return false;\n}\n\ntemplate <typename _Ret, typename... _Args>\n_NS_INLINE _Ret NS::Object::sendMessage(const void* pObj, SEL selector, _Args... args)\n{\n#if (defined(__i386__) || defined(__x86_64__))\n    if constexpr (std::is_floating_point<_Ret>())\n    {\n        using SendMessageProcFpret = _Ret (*)(const void*, SEL, _Args...);\n\n        const SendMessageProcFpret pProc = reinterpret_cast<SendMessageProcFpret>(&objc_msgSend_fpret);\n\n        return (*pProc)(pObj, selector, args...);\n    }\n    else\n#endif // ( defined( __i386__ )  || defined( __x86_64__ )  )\n#if !defined(__arm64__)\n        if constexpr (doesRequireMsgSendStret<_Ret>())\n    {\n        using SendMessageProcStret = void (*)(_Ret*, const void*, SEL, _Args...);\n\n        const SendMessageProcStret pProc = reinterpret_cast<SendMessageProcStret>(&objc_msgSend_stret);\n        _Ret                       ret;\n\n        (*pProc)(&ret, pObj, selector, args...);\n\n        return ret;\n    }\n    else\n#endif // !defined( __arm64__ )\n    {\n        using SendMessageProc = _Ret (*)(const void*, SEL, _Args...);\n\n        const SendMessageProc pProc = reinterpret_cast<SendMessageProc>(&objc_msgSend);\n\n        return (*pProc)(pObj, selector, args...);\n    }\n}\n\n_NS_INLINE NS::MethodSignature* NS::Object::methodSignatureForSelector(const void* pObj, SEL selector)\n{\n    return sendMessage<MethodSignature*>(pObj, _NS_PRIVATE_SEL(methodSignatureForSelector_), selector);\n}\n\n_NS_INLINE bool NS::Object::respondsToSelector(const void* pObj, SEL selector)\n{\n    return sendMessage<bool>(pObj, _NS_PRIVATE_SEL(respondsToSelector_), selector);\n}\n\ntemplate <typename _Ret, typename... _Args>\n_NS_INLINE _Ret NS::Object::sendMessageSafe(const void* pObj, SEL selector, _Args... args)\n{\n    if ((respondsToSelector(pObj, selector)) || (nullptr != methodSignatureForSelector(pObj, selector)))\n    {\n        return sendMessage<_Ret>(pObj, selector, args...);\n    }\n\n    if constexpr (!std::is_void<_Ret>::value)\n    {\n        return _Ret(0);\n    }\n}\n\ntemplate <class _Class>\n_NS_INLINE _Class* NS::Object::alloc(const char* pClassName)\n{\n    return sendMessage<_Class*>(objc_lookUpClass(pClassName), _NS_PRIVATE_SEL(alloc));\n}\n\ntemplate <class _Class>\n_NS_INLINE _Class* NS::Object::alloc(const void* pClass)\n{\n    return sendMessage<_Class*>(pClass, _NS_PRIVATE_SEL(alloc));\n}\n\ntemplate <class _Class>\n_NS_INLINE _Class* NS::Object::init()\n{\n    return sendMessage<_Class*>(this, _NS_PRIVATE_SEL(init));\n}\n\n_NS_INLINE NS::UInteger NS::Object::hash() const\n{\n    return sendMessage<UInteger>(this, _NS_PRIVATE_SEL(hash));\n}\n\n_NS_INLINE bool NS::Object::isEqual(const Object* pObject) const\n{\n    return sendMessage<bool>(this, _NS_PRIVATE_SEL(isEqual_), pObject);\n}\n\n_NS_INLINE NS::String* NS::Object::description() const\n{\n    return sendMessage<String*>(this, _NS_PRIVATE_SEL(description));\n}\n\n_NS_INLINE NS::String* NS::Object::debugDescription() const\n{\n    return sendMessageSafe<String*>(this, _NS_PRIVATE_SEL(debugDescription));\n}\n\nnamespace NS\n{\nstruct FastEnumerationState\n{\n    unsigned long  state;\n    Object**       itemsPtr;\n    unsigned long* mutationsPtr;\n    unsigned long  extra[5];\n} _NS_PACKED;\n\nclass FastEnumeration : public Referencing<FastEnumeration>\n{\npublic:\n    NS::UInteger countByEnumerating(FastEnumerationState* pState, Object** pBuffer, NS::UInteger len);\n};\n\ntemplate <class _ObjectType>\nclass Enumerator : public Referencing<Enumerator<_ObjectType>, FastEnumeration>\n{\npublic:\n    _ObjectType* nextObject();\n    class Array* allObjects();\n};\n}\n\n_NS_INLINE NS::UInteger NS::FastEnumeration::countByEnumerating(FastEnumerationState* pState, Object** pBuffer, NS::UInteger len)\n{\n    return Object::sendMessage<UInteger>(this, _NS_PRIVATE_SEL(countByEnumeratingWithState_objects_count_), pState, pBuffer, len);\n}\n\ntemplate <class _ObjectType>\n_NS_INLINE _ObjectType* NS::Enumerator<_ObjectType>::nextObject()\n{\n    return Object::sendMessage<_ObjectType*>(this, _NS_PRIVATE_SEL(nextObject));\n}\n\ntemplate <class _ObjectType>\n_NS_INLINE NS::Array* NS::Enumerator<_ObjectType>::allObjects()\n{\n    return Object::sendMessage<Array*>(this, _NS_PRIVATE_SEL(allObjects));\n}\n\nnamespace NS\n{\nclass Array : public Copying<Array>\n{\npublic:\n    static Array* array();\n    static Array* array(const Object* pObject);\n    static Array* array(const Object* const* pObjects, UInteger count);\n\n    static Array* alloc();\n\n    Array*        init();\n    Array*        init(const Object* const* pObjects, UInteger count);\n    Array*        init(const class Coder* pCoder);\n\n    template <class _Object = Object>\n    _Object*            object(UInteger index) const;\n    UInteger            count() const;\n    Enumerator<Object>* objectEnumerator() const;\n};\n}\n\n_NS_INLINE NS::Array* NS::Array::array()\n{\n    return Object::sendMessage<Array*>(_NS_PRIVATE_CLS(NSArray), _NS_PRIVATE_SEL(array));\n}\n\n_NS_INLINE NS::Array* NS::Array::array(const Object* pObject)\n{\n    return Object::sendMessage<Array*>(_NS_PRIVATE_CLS(NSArray), _NS_PRIVATE_SEL(arrayWithObject_), pObject);\n}\n\n_NS_INLINE NS::Array* NS::Array::array(const Object* const* pObjects, UInteger count)\n{\n    return Object::sendMessage<Array*>(_NS_PRIVATE_CLS(NSArray), _NS_PRIVATE_SEL(arrayWithObjects_count_), pObjects, count);\n}\n\n_NS_INLINE NS::Array* NS::Array::alloc()\n{\n    return NS::Object::alloc<Array>(_NS_PRIVATE_CLS(NSArray));\n}\n\n_NS_INLINE NS::Array* NS::Array::init()\n{\n    return NS::Object::init<Array>();\n}\n\n_NS_INLINE NS::Array* NS::Array::init(const Object* const* pObjects, UInteger count)\n{\n    return Object::sendMessage<Array*>(this, _NS_PRIVATE_SEL(initWithObjects_count_), pObjects, count);\n}\n\n_NS_INLINE NS::Array* NS::Array::init(const class Coder* pCoder)\n{\n    return Object::sendMessage<Array*>(this, _NS_PRIVATE_SEL(initWithCoder_), pCoder);\n}\n\n_NS_INLINE NS::UInteger NS::Array::count() const\n{\n    return Object::sendMessage<UInteger>(this, _NS_PRIVATE_SEL(count));\n}\n\ntemplate <class _Object>\n_NS_INLINE _Object* NS::Array::object(UInteger index) const\n{\n    return Object::sendMessage<_Object*>(this, _NS_PRIVATE_SEL(objectAtIndex_), index);\n}\n\n_NS_INLINE NS::Enumerator<NS::Object>* NS::Array::objectEnumerator() const\n{\n    return NS::Object::sendMessage<Enumerator<NS::Object>*>(this, _NS_PRIVATE_SEL(objectEnumerator));\n}\n\nnamespace NS\n{\nclass AutoreleasePool : public Object\n{\npublic:\n    static AutoreleasePool* alloc();\n    AutoreleasePool*        init();\n\n    void                    drain();\n\n    void                    addObject(Object* pObject);\n\n    static void             showPools();\n};\n}\n\n_NS_INLINE NS::AutoreleasePool* NS::AutoreleasePool::alloc()\n{\n    return NS::Object::alloc<AutoreleasePool>(_NS_PRIVATE_CLS(NSAutoreleasePool));\n}\n\n_NS_INLINE NS::AutoreleasePool* NS::AutoreleasePool::init()\n{\n    return NS::Object::init<AutoreleasePool>();\n}\n\n_NS_INLINE void NS::AutoreleasePool::drain()\n{\n    Object::sendMessage<void>(this, _NS_PRIVATE_SEL(drain));\n}\n\n_NS_INLINE void NS::AutoreleasePool::addObject(Object* pObject)\n{\n    Object::sendMessage<void>(this, _NS_PRIVATE_SEL(addObject_), pObject);\n}\n\n_NS_INLINE void NS::AutoreleasePool::showPools()\n{\n    Object::sendMessage<void>(_NS_PRIVATE_CLS(NSAutoreleasePool), _NS_PRIVATE_SEL(showPools));\n}\n\nnamespace NS\n{\nclass Dictionary : public NS::Copying<Dictionary>\n{\npublic:\n    static Dictionary* dictionary();\n    static Dictionary* dictionary(const Object* pObject, const Object* pKey);\n    static Dictionary* dictionary(const Object* const* pObjects, const Object* const* pKeys, UInteger count);\n\n    static Dictionary* alloc();\n\n    Dictionary*        init();\n    Dictionary*        init(const Object* const* pObjects, const Object* const* pKeys, UInteger count);\n    Dictionary*        init(const class Coder* pCoder);\n\n    template <class _KeyType = Object>\n    Enumerator<_KeyType>* keyEnumerator() const;\n\n    template <class _Object = Object>\n    _Object* object(const Object* pKey) const;\n    UInteger count() const;\n};\n}\n\n_NS_INLINE NS::Dictionary* NS::Dictionary::dictionary()\n{\n    return Object::sendMessage<Dictionary*>(_NS_PRIVATE_CLS(NSDictionary), _NS_PRIVATE_SEL(dictionary));\n}\n\n_NS_INLINE NS::Dictionary* NS::Dictionary::dictionary(const Object* pObject, const Object* pKey)\n{\n    return Object::sendMessage<Dictionary*>(_NS_PRIVATE_CLS(NSDictionary), _NS_PRIVATE_SEL(dictionaryWithObject_forKey_), pObject, pKey);\n}\n\n_NS_INLINE NS::Dictionary* NS::Dictionary::dictionary(const Object* const* pObjects, const Object* const* pKeys, UInteger count)\n{\n    return Object::sendMessage<Dictionary*>(_NS_PRIVATE_CLS(NSDictionary), _NS_PRIVATE_SEL(dictionaryWithObjects_forKeys_count_),\n        pObjects, pKeys, count);\n}\n\n_NS_INLINE NS::Dictionary* NS::Dictionary::alloc()\n{\n    return NS::Object::alloc<Dictionary>(_NS_PRIVATE_CLS(NSDictionary));\n}\n\n_NS_INLINE NS::Dictionary* NS::Dictionary::init()\n{\n    return NS::Object::init<Dictionary>();\n}\n\n_NS_INLINE NS::Dictionary* NS::Dictionary::init(const Object* const* pObjects, const Object* const* pKeys, UInteger count)\n{\n    return Object::sendMessage<Dictionary*>(this, _NS_PRIVATE_SEL(initWithObjects_forKeys_count_), pObjects, pKeys, count);\n}\n\n_NS_INLINE NS::Dictionary* NS::Dictionary::init(const class Coder* pCoder)\n{\n    return Object::sendMessage<Dictionary*>(this, _NS_PRIVATE_SEL(initWithCoder_), pCoder);\n}\n\ntemplate <class _KeyType>\n_NS_INLINE NS::Enumerator<_KeyType>* NS::Dictionary::keyEnumerator() const\n{\n    return Object::sendMessage<Enumerator<_KeyType>*>(this, _NS_PRIVATE_SEL(keyEnumerator));\n}\n\ntemplate <class _Object>\n_NS_INLINE _Object* NS::Dictionary::object(const Object* pKey) const\n{\n    return Object::sendMessage<_Object*>(this, _NS_PRIVATE_SEL(objectForKey_), pKey);\n}\n\n_NS_INLINE NS::UInteger NS::Dictionary::count() const\n{\n    return Object::sendMessage<UInteger>(this, _NS_PRIVATE_SEL(count));\n}\n\nnamespace NS\n{\n\n_NS_ENUM(Integer, ComparisonResult) {\n    OrderedAscending = -1L,\n    OrderedSame,\n    OrderedDescending\n};\n\nconst Integer NotFound = IntegerMax;\n\n}\n\nnamespace NS\n{\nstruct Range\n{\n    static Range Make(UInteger loc, UInteger len);\n\n    Range(UInteger loc, UInteger len);\n\n    bool     Equal(const Range& range) const;\n    bool     LocationInRange(UInteger loc) const;\n    UInteger Max() const;\n\n    UInteger location;\n    UInteger length;\n} _NS_PACKED;\n}\n\n_NS_INLINE NS::Range::Range(UInteger loc, UInteger len)\n    : location(loc)\n    , length(len)\n{\n}\n\n_NS_INLINE NS::Range NS::Range::Make(UInteger loc, UInteger len)\n{\n    return Range(loc, len);\n}\n\n_NS_INLINE bool NS::Range::Equal(const Range& range) const\n{\n    return (location == range.location) && (length == range.length);\n}\n\n_NS_INLINE bool NS::Range::LocationInRange(UInteger loc) const\n{\n    return (!(loc < location)) && ((loc - location) < length);\n}\n\n_NS_INLINE NS::UInteger NS::Range::Max() const\n{\n    return location + length;\n}\n\nnamespace NS\n{\n_NS_ENUM(NS::UInteger, StringEncoding) {\n    ASCIIStringEncoding = 1,\n    NEXTSTEPStringEncoding = 2,\n    JapaneseEUCStringEncoding = 3,\n    UTF8StringEncoding = 4,\n    ISOLatin1StringEncoding = 5,\n    SymbolStringEncoding = 6,\n    NonLossyASCIIStringEncoding = 7,\n    ShiftJISStringEncoding = 8,\n    ISOLatin2StringEncoding = 9,\n    UnicodeStringEncoding = 10,\n    WindowsCP1251StringEncoding = 11,\n    WindowsCP1252StringEncoding = 12,\n    WindowsCP1253StringEncoding = 13,\n    WindowsCP1254StringEncoding = 14,\n    WindowsCP1250StringEncoding = 15,\n    ISO2022JPStringEncoding = 21,\n    MacOSRomanStringEncoding = 30,\n\n    UTF16StringEncoding = UnicodeStringEncoding,\n\n    UTF16BigEndianStringEncoding = 0x90000100,\n    UTF16LittleEndianStringEncoding = 0x94000100,\n\n    UTF32StringEncoding = 0x8c000100,\n    UTF32BigEndianStringEncoding = 0x98000100,\n    UTF32LittleEndianStringEncoding = 0x9c000100\n};\n\n_NS_OPTIONS(NS::UInteger, StringCompareOptions) {\n    CaseInsensitiveSearch = 1,\n    LiteralSearch = 2,\n    BackwardsSearch = 4,\n    AnchoredSearch = 8,\n    NumericSearch = 64,\n    DiacriticInsensitiveSearch = 128,\n    WidthInsensitiveSearch = 256,\n    ForcedOrderingSearch = 512,\n    RegularExpressionSearch = 1024\n};\n\nusing unichar = unsigned short;\n\nclass String : public Copying<String>\n{\npublic:\n    static String*   string();\n    static String*   string(const String* pString);\n    static String*   string(const char* pString, StringEncoding encoding);\n\n    static String*   alloc();\n    String*          init();\n    String*          init(const String* pString);\n    String*          init(const char* pString, StringEncoding encoding);\n    String*          init(void* pBytes, UInteger len, StringEncoding encoding, bool freeBuffer);\n\n    unichar          character(UInteger index) const;\n    UInteger         length() const;\n\n    const char*      cString(StringEncoding encoding) const;\n    const char*      utf8String() const;\n    UInteger         maximumLengthOfBytes(StringEncoding encoding) const;\n    UInteger         lengthOfBytes(StringEncoding encoding) const;\n\n    bool             isEqualToString(const String* pString) const;\n    Range            rangeOfString(const String* pString, StringCompareOptions options) const;\n\n    const char*      fileSystemRepresentation() const;\n\n    String*          stringByAppendingString(const String* pString) const;\n    ComparisonResult caseInsensitiveCompare(const String* pString) const;\n};\n\n#define MTLSTR(literal) (NS::String*)__builtin___CFStringMakeConstantString(\"\" literal \"\")\n\ntemplate <std::size_t _StringLen>\n[[deprecated(\"please use MTLSTR(str)\")]] constexpr const String* MakeConstantString(const char (&str)[_StringLen])\n{\n    return reinterpret_cast<const String*>(__CFStringMakeConstantString(str));\n}\n\n}\n\n_NS_INLINE NS::String* NS::String::string()\n{\n    return Object::sendMessage<String*>(_NS_PRIVATE_CLS(NSString), _NS_PRIVATE_SEL(string));\n}\n\n_NS_INLINE NS::String* NS::String::string(const String* pString)\n{\n    return Object::sendMessage<String*>(_NS_PRIVATE_CLS(NSString), _NS_PRIVATE_SEL(stringWithString_), pString);\n}\n\n_NS_INLINE NS::String* NS::String::string(const char* pString, StringEncoding encoding)\n{\n    return Object::sendMessage<String*>(_NS_PRIVATE_CLS(NSString), _NS_PRIVATE_SEL(stringWithCString_encoding_), pString, encoding);\n}\n\n_NS_INLINE NS::String* NS::String::alloc()\n{\n    return Object::alloc<String>(_NS_PRIVATE_CLS(NSString));\n}\n\n_NS_INLINE NS::String* NS::String::init()\n{\n    return Object::init<String>();\n}\n\n_NS_INLINE NS::String* NS::String::init(const String* pString)\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(initWithString_), pString);\n}\n\n_NS_INLINE NS::String* NS::String::init(const char* pString, StringEncoding encoding)\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(initWithCString_encoding_), pString, encoding);\n}\n\n_NS_INLINE NS::String* NS::String::init(void* pBytes, UInteger len, StringEncoding encoding, bool freeBuffer)\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(initWithBytesNoCopy_length_encoding_freeWhenDone_), pBytes, len, encoding, freeBuffer);\n}\n\n_NS_INLINE NS::unichar NS::String::character(UInteger index) const\n{\n    return Object::sendMessage<unichar>(this, _NS_PRIVATE_SEL(characterAtIndex_), index);\n}\n\n_NS_INLINE NS::UInteger NS::String::length() const\n{\n    return Object::sendMessage<UInteger>(this, _NS_PRIVATE_SEL(length));\n}\n\n_NS_INLINE const char* NS::String::cString(StringEncoding encoding) const\n{\n    return Object::sendMessage<const char*>(this, _NS_PRIVATE_SEL(cStringUsingEncoding_), encoding);\n}\n\n_NS_INLINE const char* NS::String::utf8String() const\n{\n    return Object::sendMessage<const char*>(this, _NS_PRIVATE_SEL(UTF8String));\n}\n\n_NS_INLINE NS::UInteger NS::String::maximumLengthOfBytes(StringEncoding encoding) const\n{\n    return Object::sendMessage<UInteger>(this, _NS_PRIVATE_SEL(maximumLengthOfBytesUsingEncoding_), encoding);\n}\n\n_NS_INLINE NS::UInteger NS::String::lengthOfBytes(StringEncoding encoding) const\n{\n    return Object::sendMessage<UInteger>(this, _NS_PRIVATE_SEL(lengthOfBytesUsingEncoding_), encoding);\n}\n\n_NS_INLINE bool NS::String::isEqualToString(const NS::String* pString) const\n{\n    return Object::sendMessage<bool>(this, _NS_PRIVATE_SEL(isEqualToString_), pString);\n}\n\n_NS_INLINE NS::Range NS::String::rangeOfString(const NS::String* pString, NS::StringCompareOptions options) const\n{\n    return Object::sendMessage<Range>(this, _NS_PRIVATE_SEL(rangeOfString_options_), pString, options);\n}\n\n_NS_INLINE const char* NS::String::fileSystemRepresentation() const\n{\n    return Object::sendMessage<const char*>(this, _NS_PRIVATE_SEL(fileSystemRepresentation));\n}\n\n_NS_INLINE NS::String* NS::String::stringByAppendingString(const String* pString) const\n{\n    return Object::sendMessage<NS::String*>(this, _NS_PRIVATE_SEL(stringByAppendingString_), pString);\n}\n\n_NS_INLINE NS::ComparisonResult NS::String::caseInsensitiveCompare(const String* pString) const\n{\n    return Object::sendMessage<NS::ComparisonResult>(this, _NS_PRIVATE_SEL(caseInsensitiveCompare_), pString);\n}\n\n#include <functional>\n\nnamespace NS\n{\nusing NotificationName = class String*;\n\nclass Notification : public NS::Referencing<Notification>\n{\npublic:\n    NS::String*     name() const;\n    NS::Object*     object() const;\n    NS::Dictionary* userInfo() const;\n};\n\nusing ObserverBlock = void(^)(Notification*);\nusing ObserverFunction = std::function<void(Notification*)>;\n\nclass NotificationCenter : public NS::Referencing<NotificationCenter>\n{\n    public:\n        static class NotificationCenter* defaultCenter();\n        Object* addObserver(NotificationName name, Object* pObj, void* pQueue, ObserverBlock block);\n        Object* addObserver(NotificationName name, Object* pObj, void* pQueue, ObserverFunction &handler);\n        void removeObserver(Object* pObserver);\n\n};\n}\n\n_NS_INLINE NS::String* NS::Notification::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _NS_PRIVATE_SEL(name));\n}\n\n_NS_INLINE NS::Object* NS::Notification::object() const\n{\n    return Object::sendMessage<NS::Object*>(this, _NS_PRIVATE_SEL(object));\n}\n\n_NS_INLINE NS::Dictionary* NS::Notification::userInfo() const\n{\n    return Object::sendMessage<NS::Dictionary*>(this, _NS_PRIVATE_SEL(userInfo));\n}\n\n_NS_INLINE NS::NotificationCenter* NS::NotificationCenter::defaultCenter()\n{\n    return NS::Object::sendMessage<NS::NotificationCenter*>(_NS_PRIVATE_CLS(NSNotificationCenter), _NS_PRIVATE_SEL(defaultCenter));\n}\n\n_NS_INLINE NS::Object* NS::NotificationCenter::addObserver(NS::NotificationName name, Object* pObj, void* pQueue, NS::ObserverBlock block)\n{\n    return NS::Object::sendMessage<Object*>(this, _NS_PRIVATE_SEL(addObserverName_object_queue_block_), name, pObj, pQueue, block);\n}\n\n_NS_INLINE NS::Object* NS::NotificationCenter::addObserver(NS::NotificationName name, Object* pObj, void* pQueue, NS::ObserverFunction &handler)\n{\n    __block ObserverFunction blockFunction = handler;\n\n    return addObserver(name, pObj, pQueue, ^(NS::Notification* pNotif) {blockFunction(pNotif);});\n}\n\n_NS_INLINE void NS::NotificationCenter::removeObserver(Object* pObserver)\n{\n    return NS::Object::sendMessage<void>(this, _NS_PRIVATE_SEL(removeObserver_), pObserver);\n}\n\nnamespace NS\n{\n_NS_CONST(NotificationName, BundleDidLoadNotification);\n_NS_CONST(NotificationName, BundleResourceRequestLowDiskSpaceNotification);\n\nclass String* LocalizedString(const String* pKey, const String*);\nclass String* LocalizedStringFromTable(const String* pKey, const String* pTbl, const String*);\nclass String* LocalizedStringFromTableInBundle(const String* pKey, const String* pTbl, const class Bundle* pBdle, const String*);\nclass String* LocalizedStringWithDefaultValue(const String* pKey, const String* pTbl, const class Bundle* pBdle, const String* pVal, const String*);\n\nclass Bundle : public Referencing<Bundle>\n{\npublic:\n    static Bundle*      mainBundle();\n\n    static Bundle*      bundle(const class String* pPath);\n    static Bundle*      bundle(const class URL* pURL);\n\n    static class Array* allBundles();\n    static class Array* allFrameworks();\n\n    static Bundle*      alloc();\n\n    Bundle*             init(const class String* pPath);\n    Bundle*             init(const class URL* pURL);\n\n    bool                load();\n    bool                unload();\n\n    bool                isLoaded() const;\n\n    bool                preflightAndReturnError(class Error** pError) const;\n    bool                loadAndReturnError(class Error** pError);\n\n    class URL*          bundleURL() const;\n    class URL*          resourceURL() const;\n    class URL*          executableURL() const;\n    class URL*          URLForAuxiliaryExecutable(const class String* pExecutableName) const;\n\n    class URL*          privateFrameworksURL() const;\n    class URL*          sharedFrameworksURL() const;\n    class URL*          sharedSupportURL() const;\n    class URL*          builtInPlugInsURL() const;\n    class URL*          appStoreReceiptURL() const;\n\n    class String*       bundlePath() const;\n    class String*       resourcePath() const;\n    class String*       executablePath() const;\n    class String*       pathForAuxiliaryExecutable(const class String* pExecutableName) const;\n\n    class String*       privateFrameworksPath() const;\n    class String*       sharedFrameworksPath() const;\n    class String*       sharedSupportPath() const;\n    class String*       builtInPlugInsPath() const;\n\n    class String*       bundleIdentifier() const;\n    class Dictionary*   infoDictionary() const;\n    class Dictionary*   localizedInfoDictionary() const;\n    class Object*       objectForInfoDictionaryKey(const class String* pKey);\n\n    class String*       localizedString(const class String* pKey, const class String* pValue = nullptr, const class String* pTableName = nullptr) const;\n};\n}\n\n_NS_PRIVATE_DEF_CONST(NS::NotificationName, BundleDidLoadNotification);\n_NS_PRIVATE_DEF_CONST(NS::NotificationName, BundleResourceRequestLowDiskSpaceNotification);\n\n_NS_INLINE NS::String* NS::LocalizedString(const String* pKey, const String*)\n{\n    return Bundle::mainBundle()->localizedString(pKey, nullptr, nullptr);\n}\n\n_NS_INLINE NS::String* NS::LocalizedStringFromTable(const String* pKey, const String* pTbl, const String*)\n{\n    return Bundle::mainBundle()->localizedString(pKey, nullptr, pTbl);\n}\n\n_NS_INLINE NS::String* NS::LocalizedStringFromTableInBundle(const String* pKey, const String* pTbl, const Bundle* pBdl, const String*)\n{\n    return pBdl->localizedString(pKey, nullptr, pTbl);\n}\n\n_NS_INLINE NS::String* NS::LocalizedStringWithDefaultValue(const String* pKey, const String* pTbl, const Bundle* pBdl, const String* pVal, const String*)\n{\n    return pBdl->localizedString(pKey, pVal, pTbl);\n}\n\n_NS_INLINE NS::Bundle* NS::Bundle::mainBundle()\n{\n    return Object::sendMessage<Bundle*>(_NS_PRIVATE_CLS(NSBundle), _NS_PRIVATE_SEL(mainBundle));\n}\n\n_NS_INLINE NS::Bundle* NS::Bundle::bundle(const class String* pPath)\n{\n    return Object::sendMessage<Bundle*>(_NS_PRIVATE_CLS(NSBundle), _NS_PRIVATE_SEL(bundleWithPath_), pPath);\n}\n\n_NS_INLINE NS::Bundle* NS::Bundle::bundle(const class URL* pURL)\n{\n    return Object::sendMessage<Bundle*>(_NS_PRIVATE_CLS(NSBundle), _NS_PRIVATE_SEL(bundleWithURL_), pURL);\n}\n\n_NS_INLINE NS::Array* NS::Bundle::allBundles()\n{\n    return Object::sendMessage<Array*>(_NS_PRIVATE_CLS(NSBundle), _NS_PRIVATE_SEL(allBundles));\n}\n\n_NS_INLINE NS::Array* NS::Bundle::allFrameworks()\n{\n    return Object::sendMessage<Array*>(_NS_PRIVATE_CLS(NSBundle), _NS_PRIVATE_SEL(allFrameworks));\n}\n\n_NS_INLINE NS::Bundle* NS::Bundle::alloc()\n{\n    return Object::sendMessage<Bundle*>(_NS_PRIVATE_CLS(NSBundle), _NS_PRIVATE_SEL(alloc));\n}\n\n_NS_INLINE NS::Bundle* NS::Bundle::init(const String* pPath)\n{\n    return Object::sendMessage<Bundle*>(this, _NS_PRIVATE_SEL(initWithPath_), pPath);\n}\n\n_NS_INLINE NS::Bundle* NS::Bundle::init(const URL* pURL)\n{\n    return Object::sendMessage<Bundle*>(this, _NS_PRIVATE_SEL(initWithURL_), pURL);\n}\n\n_NS_INLINE bool NS::Bundle::load()\n{\n    return Object::sendMessage<bool>(this, _NS_PRIVATE_SEL(load));\n}\n\n_NS_INLINE bool NS::Bundle::unload()\n{\n    return Object::sendMessage<bool>(this, _NS_PRIVATE_SEL(unload));\n}\n\n_NS_INLINE bool NS::Bundle::isLoaded() const\n{\n    return Object::sendMessage<bool>(this, _NS_PRIVATE_SEL(isLoaded));\n}\n\n_NS_INLINE bool NS::Bundle::preflightAndReturnError(Error** pError) const\n{\n    return Object::sendMessage<bool>(this, _NS_PRIVATE_SEL(preflightAndReturnError_), pError);\n}\n\n_NS_INLINE bool NS::Bundle::loadAndReturnError(Error** pError)\n{\n    return Object::sendMessage<bool>(this, _NS_PRIVATE_SEL(loadAndReturnError_), pError);\n}\n\n_NS_INLINE NS::URL* NS::Bundle::bundleURL() const\n{\n    return Object::sendMessage<URL*>(this, _NS_PRIVATE_SEL(bundleURL));\n}\n\n_NS_INLINE NS::URL* NS::Bundle::resourceURL() const\n{\n    return Object::sendMessage<URL*>(this, _NS_PRIVATE_SEL(resourceURL));\n}\n\n_NS_INLINE NS::URL* NS::Bundle::executableURL() const\n{\n    return Object::sendMessage<URL*>(this, _NS_PRIVATE_SEL(executableURL));\n}\n\n_NS_INLINE NS::URL* NS::Bundle::URLForAuxiliaryExecutable(const String* pExecutableName) const\n{\n    return Object::sendMessage<URL*>(this, _NS_PRIVATE_SEL(URLForAuxiliaryExecutable_), pExecutableName);\n}\n\n_NS_INLINE NS::URL* NS::Bundle::privateFrameworksURL() const\n{\n    return Object::sendMessage<URL*>(this, _NS_PRIVATE_SEL(privateFrameworksURL));\n}\n\n_NS_INLINE NS::URL* NS::Bundle::sharedFrameworksURL() const\n{\n    return Object::sendMessage<URL*>(this, _NS_PRIVATE_SEL(sharedFrameworksURL));\n}\n\n_NS_INLINE NS::URL* NS::Bundle::sharedSupportURL() const\n{\n    return Object::sendMessage<URL*>(this, _NS_PRIVATE_SEL(sharedSupportURL));\n}\n\n_NS_INLINE NS::URL* NS::Bundle::builtInPlugInsURL() const\n{\n    return Object::sendMessage<URL*>(this, _NS_PRIVATE_SEL(builtInPlugInsURL));\n}\n\n_NS_INLINE NS::URL* NS::Bundle::appStoreReceiptURL() const\n{\n    return Object::sendMessage<URL*>(this, _NS_PRIVATE_SEL(appStoreReceiptURL));\n}\n\n_NS_INLINE NS::String* NS::Bundle::bundlePath() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(bundlePath));\n}\n\n_NS_INLINE NS::String* NS::Bundle::resourcePath() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(resourcePath));\n}\n\n_NS_INLINE NS::String* NS::Bundle::executablePath() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(executablePath));\n}\n\n_NS_INLINE NS::String* NS::Bundle::pathForAuxiliaryExecutable(const String* pExecutableName) const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(pathForAuxiliaryExecutable_), pExecutableName);\n}\n\n_NS_INLINE NS::String* NS::Bundle::privateFrameworksPath() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(privateFrameworksPath));\n}\n\n_NS_INLINE NS::String* NS::Bundle::sharedFrameworksPath() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(sharedFrameworksPath));\n}\n\n_NS_INLINE NS::String* NS::Bundle::sharedSupportPath() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(sharedSupportPath));\n}\n\n_NS_INLINE NS::String* NS::Bundle::builtInPlugInsPath() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(builtInPlugInsPath));\n}\n\n_NS_INLINE NS::String* NS::Bundle::bundleIdentifier() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(bundleIdentifier));\n}\n\n_NS_INLINE NS::Dictionary* NS::Bundle::infoDictionary() const\n{\n    return Object::sendMessage<Dictionary*>(this, _NS_PRIVATE_SEL(infoDictionary));\n}\n\n_NS_INLINE NS::Dictionary* NS::Bundle::localizedInfoDictionary() const\n{\n    return Object::sendMessage<Dictionary*>(this, _NS_PRIVATE_SEL(localizedInfoDictionary));\n}\n\n_NS_INLINE NS::Object* NS::Bundle::objectForInfoDictionaryKey(const String* pKey)\n{\n    return Object::sendMessage<Object*>(this, _NS_PRIVATE_SEL(objectForInfoDictionaryKey_), pKey);\n}\n\n_NS_INLINE NS::String* NS::Bundle::localizedString(const String* pKey, const String* pValue /* = nullptr */, const String* pTableName /* = nullptr */) const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(localizedStringForKey_value_table_), pKey, pValue, pTableName);\n}\n\nnamespace NS\n{\nclass Data : public Copying<Data>\n{\npublic:\n    void*    mutableBytes() const;\n    UInteger length() const;\n};\n}\n\n_NS_INLINE void* NS::Data::mutableBytes() const\n{\n    return Object::sendMessage<void*>(this, _NS_PRIVATE_SEL(mutableBytes));\n}\n\n_NS_INLINE NS::UInteger NS::Data::length() const\n{\n    return Object::sendMessage<UInteger>(this, _NS_PRIVATE_SEL(length));\n}\n\nnamespace NS\n{\n\nusing TimeInterval = double;\n\nclass Date : public Copying<Date>\n{\npublic:\n    static Date* dateWithTimeIntervalSinceNow(TimeInterval secs);\n};\n\n} // NS\n\n_NS_INLINE NS::Date* NS::Date::dateWithTimeIntervalSinceNow(NS::TimeInterval secs)\n{\n    return NS::Object::sendMessage<NS::Date*>(_NS_PRIVATE_CLS(NSDate), _NS_PRIVATE_SEL(dateWithTimeIntervalSinceNow_), secs);\n}\n\n//-------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nnamespace NS\n{\nusing ErrorDomain = class String*;\n\n_NS_CONST(ErrorDomain, CocoaErrorDomain);\n_NS_CONST(ErrorDomain, POSIXErrorDomain);\n_NS_CONST(ErrorDomain, OSStatusErrorDomain);\n_NS_CONST(ErrorDomain, MachErrorDomain);\n\nusing ErrorUserInfoKey = class String*;\n\n_NS_CONST(ErrorUserInfoKey, UnderlyingErrorKey);\n_NS_CONST(ErrorUserInfoKey, LocalizedDescriptionKey);\n_NS_CONST(ErrorUserInfoKey, LocalizedFailureReasonErrorKey);\n_NS_CONST(ErrorUserInfoKey, LocalizedRecoverySuggestionErrorKey);\n_NS_CONST(ErrorUserInfoKey, LocalizedRecoveryOptionsErrorKey);\n_NS_CONST(ErrorUserInfoKey, RecoveryAttempterErrorKey);\n_NS_CONST(ErrorUserInfoKey, HelpAnchorErrorKey);\n_NS_CONST(ErrorUserInfoKey, DebugDescriptionErrorKey);\n_NS_CONST(ErrorUserInfoKey, LocalizedFailureErrorKey);\n_NS_CONST(ErrorUserInfoKey, StringEncodingErrorKey);\n_NS_CONST(ErrorUserInfoKey, URLErrorKey);\n_NS_CONST(ErrorUserInfoKey, FilePathErrorKey);\n\nclass Error : public Copying<Error>\n{\npublic:\n    static Error*     error(ErrorDomain domain, Integer code, class Dictionary* pDictionary);\n\n    static Error*     alloc();\n    Error*            init();\n    Error*            init(ErrorDomain domain, Integer code, class Dictionary* pDictionary);\n\n    Integer           code() const;\n    ErrorDomain       domain() const;\n    class Dictionary* userInfo() const;\n\n    class String*     localizedDescription() const;\n    class Array*      localizedRecoveryOptions() const;\n    class String*     localizedRecoverySuggestion() const;\n    class String*     localizedFailureReason() const;\n};\n}\n\n_NS_PRIVATE_DEF_CONST(NS::ErrorDomain, CocoaErrorDomain);\n_NS_PRIVATE_DEF_CONST(NS::ErrorDomain, POSIXErrorDomain);\n_NS_PRIVATE_DEF_CONST(NS::ErrorDomain, OSStatusErrorDomain);\n_NS_PRIVATE_DEF_CONST(NS::ErrorDomain, MachErrorDomain);\n\n_NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, UnderlyingErrorKey);\n_NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, LocalizedDescriptionKey);\n_NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, LocalizedFailureReasonErrorKey);\n_NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, LocalizedRecoverySuggestionErrorKey);\n_NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, LocalizedRecoveryOptionsErrorKey);\n_NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, RecoveryAttempterErrorKey);\n_NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, HelpAnchorErrorKey);\n_NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, DebugDescriptionErrorKey);\n_NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, LocalizedFailureErrorKey);\n_NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, StringEncodingErrorKey);\n_NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, URLErrorKey);\n_NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, FilePathErrorKey);\n\n_NS_INLINE NS::Error* NS::Error::error(ErrorDomain domain, Integer code, class Dictionary* pDictionary)\n{\n    return Object::sendMessage<Error*>(_NS_PRIVATE_CLS(NSError), _NS_PRIVATE_SEL(errorWithDomain_code_userInfo_), domain, code, pDictionary);\n}\n\n_NS_INLINE NS::Error* NS::Error::alloc()\n{\n    return Object::alloc<Error>(_NS_PRIVATE_CLS(NSError));\n}\n\n_NS_INLINE NS::Error* NS::Error::init()\n{\n    return Object::init<Error>();\n}\n\n_NS_INLINE NS::Error* NS::Error::init(ErrorDomain domain, Integer code, class Dictionary* pDictionary)\n{\n    return Object::sendMessage<Error*>(this, _NS_PRIVATE_SEL(initWithDomain_code_userInfo_), domain, code, pDictionary);\n}\n\n_NS_INLINE NS::Integer NS::Error::code() const\n{\n    return Object::sendMessage<Integer>(this, _NS_PRIVATE_SEL(code));\n}\n\n_NS_INLINE NS::ErrorDomain NS::Error::domain() const\n{\n    return Object::sendMessage<ErrorDomain>(this, _NS_PRIVATE_SEL(domain));\n}\n\n_NS_INLINE NS::Dictionary* NS::Error::userInfo() const\n{\n    return Object::sendMessage<Dictionary*>(this, _NS_PRIVATE_SEL(userInfo));\n}\n\n_NS_INLINE NS::String* NS::Error::localizedDescription() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(localizedDescription));\n}\n\n_NS_INLINE NS::Array* NS::Error::localizedRecoveryOptions() const\n{\n    return Object::sendMessage<Array*>(this, _NS_PRIVATE_SEL(localizedRecoveryOptions));\n}\n\n_NS_INLINE NS::String* NS::Error::localizedRecoverySuggestion() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(localizedRecoverySuggestion));\n}\n\n_NS_INLINE NS::String* NS::Error::localizedFailureReason() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(localizedFailureReason));\n}\n\nnamespace NS\n{\n\ntemplate <class _Class, class _Base = class Object>\nclass Locking : public _Base\n{\npublic:\n    void lock();\n    void unlock();\n};\n\nclass Condition : public Locking<Condition>\n{\npublic:\n    static Condition* alloc();\n\n    Condition*        init();\n\n    void              wait();\n    bool              waitUntilDate(Date* pLimit);\n    void              signal();\n    void              broadcast();\n};\n\n} // NS\n\ntemplate<class _Class, class _Base /* = NS::Object */>\n_NS_INLINE void NS::Locking<_Class, _Base>::lock()\n{\n    NS::Object::sendMessage<void>(this, _NS_PRIVATE_SEL(lock));\n}\n\ntemplate<class _Class, class _Base /* = NS::Object */>\n_NS_INLINE void NS::Locking<_Class, _Base>::unlock()\n{\n    NS::Object::sendMessage<void>(this, _NS_PRIVATE_SEL(unlock));\n}\n\n_NS_INLINE NS::Condition* NS::Condition::alloc()\n{\n    return NS::Object::alloc<NS::Condition>(_NS_PRIVATE_CLS(NSCondition));\n}\n\n_NS_INLINE NS::Condition* NS::Condition::init()\n{\n    return NS::Object::init<NS::Condition>();\n}\n\n_NS_INLINE void NS::Condition::wait()\n{\n    NS::Object::sendMessage<void>(this, _NS_PRIVATE_SEL(wait));\n}\n\n_NS_INLINE bool NS::Condition::waitUntilDate(NS::Date* pLimit)\n{\n    return NS::Object::sendMessage<bool>(this, _NS_PRIVATE_SEL(waitUntilDate_), pLimit);\n}\n\n_NS_INLINE void NS::Condition::signal()\n{\n    NS::Object::sendMessage<void>(this, _NS_PRIVATE_SEL(signal));\n}\n\n_NS_INLINE void NS::Condition::broadcast()\n{\n    NS::Object::sendMessage<void>(this, _NS_PRIVATE_SEL(broadcast));\n}\n\n//-------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nnamespace NS\n{\nclass Value : public Copying<Value>\n{\npublic:\n    static Value* value(const void* pValue, const char* pType);\n    static Value* value(const void* pPointer);\n\n    static Value* alloc();\n\n    Value*        init(const void* pValue, const char* pType);\n    Value*        init(const class Coder* pCoder);\n\n    void          getValue(void* pValue, UInteger size) const;\n    const char*   objCType() const;\n\n    bool          isEqualToValue(Value* pValue) const;\n    void*         pointerValue() const;\n};\n\nclass Number : public Copying<Number, Value>\n{\npublic:\n    static Number*     number(char value);\n    static Number*     number(unsigned char value);\n    static Number*     number(short value);\n    static Number*     number(unsigned short value);\n    static Number*     number(int value);\n    static Number*     number(unsigned int value);\n    static Number*     number(long value);\n    static Number*     number(unsigned long value);\n    static Number*     number(long long value);\n    static Number*     number(unsigned long long value);\n    static Number*     number(float value);\n    static Number*     number(double value);\n    static Number*     number(bool value);\n\n    static Number*     alloc();\n\n    Number*            init(const class Coder* pCoder);\n    Number*            init(char value);\n    Number*            init(unsigned char value);\n    Number*            init(short value);\n    Number*            init(unsigned short value);\n    Number*            init(int value);\n    Number*            init(unsigned int value);\n    Number*            init(long value);\n    Number*            init(unsigned long value);\n    Number*            init(long long value);\n    Number*            init(unsigned long long value);\n    Number*            init(float value);\n    Number*            init(double value);\n    Number*            init(bool value);\n\n    char               charValue() const;\n    unsigned char      unsignedCharValue() const;\n    short              shortValue() const;\n    unsigned short     unsignedShortValue() const;\n    int                intValue() const;\n    unsigned int       unsignedIntValue() const;\n    long               longValue() const;\n    unsigned long      unsignedLongValue() const;\n    long long          longLongValue() const;\n    unsigned long long unsignedLongLongValue() const;\n    float              floatValue() const;\n    double             doubleValue() const;\n    bool               boolValue() const;\n    Integer            integerValue() const;\n    UInteger           unsignedIntegerValue() const;\n    class String*      stringValue() const;\n\n    ComparisonResult   compare(const Number* pOtherNumber) const;\n    bool               isEqualToNumber(const Number* pNumber) const;\n\n    class String*      descriptionWithLocale(const Object* pLocale) const;\n};\n}\n\n_NS_INLINE NS::Value* NS::Value::value(const void* pValue, const char* pType)\n{\n    return Object::sendMessage<Value*>(_NS_PRIVATE_CLS(NSValue), _NS_PRIVATE_SEL(valueWithBytes_objCType_), pValue, pType);\n}\n\n_NS_INLINE NS::Value* NS::Value::value(const void* pPointer)\n{\n    return Object::sendMessage<Value*>(_NS_PRIVATE_CLS(NSValue), _NS_PRIVATE_SEL(valueWithPointer_), pPointer);\n}\n\n_NS_INLINE NS::Value* NS::Value::alloc()\n{\n    return NS::Object::alloc<Value>(_NS_PRIVATE_CLS(NSValue));\n}\n\n_NS_INLINE NS::Value* NS::Value::init(const void* pValue, const char* pType)\n{\n    return Object::sendMessage<Value*>(this, _NS_PRIVATE_SEL(initWithBytes_objCType_), pValue, pType);\n}\n\n_NS_INLINE NS::Value* NS::Value::init(const class Coder* pCoder)\n{\n    return Object::sendMessage<Value*>(this, _NS_PRIVATE_SEL(initWithCoder_), pCoder);\n}\n\n_NS_INLINE void NS::Value::getValue(void* pValue, UInteger size) const\n{\n    Object::sendMessage<void>(this, _NS_PRIVATE_SEL(getValue_size_), pValue, size);\n}\n\n_NS_INLINE const char* NS::Value::objCType() const\n{\n    return Object::sendMessage<const char*>(this, _NS_PRIVATE_SEL(objCType));\n}\n\n_NS_INLINE bool NS::Value::isEqualToValue(Value* pValue) const\n{\n    return Object::sendMessage<bool>(this, _NS_PRIVATE_SEL(isEqualToValue_), pValue);\n}\n\n_NS_INLINE void* NS::Value::pointerValue() const\n{\n    return Object::sendMessage<void*>(this, _NS_PRIVATE_SEL(pointerValue));\n}\n\n_NS_INLINE NS::Number* NS::Number::number(char value)\n{\n    return Object::sendMessage<Number*>(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithChar_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::number(unsigned char value)\n{\n    return Object::sendMessage<Number*>(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithUnsignedChar_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::number(short value)\n{\n    return Object::sendMessage<Number*>(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithShort_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::number(unsigned short value)\n{\n    return Object::sendMessage<Number*>(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithUnsignedShort_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::number(int value)\n{\n    return Object::sendMessage<Number*>(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithInt_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::number(unsigned int value)\n{\n    return Object::sendMessage<Number*>(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithUnsignedInt_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::number(long value)\n{\n    return Object::sendMessage<Number*>(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithLong_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::number(unsigned long value)\n{\n    return Object::sendMessage<Number*>(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithUnsignedLong_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::number(long long value)\n{\n    return Object::sendMessage<Number*>(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithLongLong_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::number(unsigned long long value)\n{\n    return Object::sendMessage<Number*>(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithUnsignedLongLong_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::number(float value)\n{\n    return Object::sendMessage<Number*>(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithFloat_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::number(double value)\n{\n    return Object::sendMessage<Number*>(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithDouble_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::number(bool value)\n{\n    return Object::sendMessage<Number*>(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithBool_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::alloc()\n{\n    return NS::Object::alloc<Number>(_NS_PRIVATE_CLS(NSNumber));\n}\n\n_NS_INLINE NS::Number* NS::Number::init(const Coder* pCoder)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithCoder_), pCoder);\n}\n\n_NS_INLINE NS::Number* NS::Number::init(char value)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithChar_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::init(unsigned char value)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithUnsignedChar_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::init(short value)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithShort_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::init(unsigned short value)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithUnsignedShort_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::init(int value)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithInt_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::init(unsigned int value)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithUnsignedInt_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::init(long value)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithLong_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::init(unsigned long value)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithUnsignedLong_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::init(long long value)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithLongLong_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::init(unsigned long long value)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithUnsignedLongLong_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::init(float value)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithFloat_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::init(double value)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithDouble_), value);\n}\n\n_NS_INLINE NS::Number* NS::Number::init(bool value)\n{\n    return Object::sendMessage<Number*>(this, _NS_PRIVATE_SEL(initWithBool_), value);\n}\n\n_NS_INLINE char NS::Number::charValue() const\n{\n    return Object::sendMessage<char>(this, _NS_PRIVATE_SEL(charValue));\n}\n\n_NS_INLINE unsigned char NS::Number::unsignedCharValue() const\n{\n    return Object::sendMessage<unsigned char>(this, _NS_PRIVATE_SEL(unsignedCharValue));\n}\n\n_NS_INLINE short NS::Number::shortValue() const\n{\n    return Object::sendMessage<short>(this, _NS_PRIVATE_SEL(shortValue));\n}\n\n_NS_INLINE unsigned short NS::Number::unsignedShortValue() const\n{\n    return Object::sendMessage<unsigned short>(this, _NS_PRIVATE_SEL(unsignedShortValue));\n}\n\n_NS_INLINE int NS::Number::intValue() const\n{\n    return Object::sendMessage<int>(this, _NS_PRIVATE_SEL(intValue));\n}\n\n_NS_INLINE unsigned int NS::Number::unsignedIntValue() const\n{\n    return Object::sendMessage<unsigned int>(this, _NS_PRIVATE_SEL(unsignedIntValue));\n}\n\n_NS_INLINE long NS::Number::longValue() const\n{\n    return Object::sendMessage<long>(this, _NS_PRIVATE_SEL(longValue));\n}\n\n_NS_INLINE unsigned long NS::Number::unsignedLongValue() const\n{\n    return Object::sendMessage<unsigned long>(this, _NS_PRIVATE_SEL(unsignedLongValue));\n}\n\n_NS_INLINE long long NS::Number::longLongValue() const\n{\n    return Object::sendMessage<long long>(this, _NS_PRIVATE_SEL(longLongValue));\n}\n\n_NS_INLINE unsigned long long NS::Number::unsignedLongLongValue() const\n{\n    return Object::sendMessage<unsigned long long>(this, _NS_PRIVATE_SEL(unsignedLongLongValue));\n}\n\n_NS_INLINE float NS::Number::floatValue() const\n{\n    return Object::sendMessage<float>(this, _NS_PRIVATE_SEL(floatValue));\n}\n\n_NS_INLINE double NS::Number::doubleValue() const\n{\n    return Object::sendMessage<double>(this, _NS_PRIVATE_SEL(doubleValue));\n}\n\n_NS_INLINE bool NS::Number::boolValue() const\n{\n    return Object::sendMessage<bool>(this, _NS_PRIVATE_SEL(boolValue));\n}\n\n_NS_INLINE NS::Integer NS::Number::integerValue() const\n{\n    return Object::sendMessage<Integer>(this, _NS_PRIVATE_SEL(integerValue));\n}\n\n_NS_INLINE NS::UInteger NS::Number::unsignedIntegerValue() const\n{\n    return Object::sendMessage<UInteger>(this, _NS_PRIVATE_SEL(unsignedIntegerValue));\n}\n\n_NS_INLINE NS::String* NS::Number::stringValue() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(stringValue));\n}\n\n_NS_INLINE NS::ComparisonResult NS::Number::compare(const Number* pOtherNumber) const\n{\n    return Object::sendMessage<ComparisonResult>(this, _NS_PRIVATE_SEL(compare_), pOtherNumber);\n}\n\n_NS_INLINE bool NS::Number::isEqualToNumber(const Number* pNumber) const\n{\n    return Object::sendMessage<bool>(this, _NS_PRIVATE_SEL(isEqualToNumber_), pNumber);\n}\n\n_NS_INLINE NS::String* NS::Number::descriptionWithLocale(const Object* pLocale) const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(descriptionWithLocale_), pLocale);\n}\n\n#include <functional>\n\nnamespace NS\n{\n_NS_CONST(NotificationName, ProcessInfoThermalStateDidChangeNotification);\n_NS_CONST(NotificationName, ProcessInfoPowerStateDidChangeNotification);\n_NS_CONST(NotificationName, ProcessInfoPerformanceProfileDidChangeNotification);\n\n_NS_ENUM(NS::Integer, ProcessInfoThermalState) {\n    ProcessInfoThermalStateNominal = 0,\n    ProcessInfoThermalStateFair = 1,\n    ProcessInfoThermalStateSerious = 2,\n    ProcessInfoThermalStateCritical = 3\n};\n\n_NS_OPTIONS(std::uint64_t, ActivityOptions) {\n    ActivityIdleDisplaySleepDisabled = (1ULL << 40),\n    ActivityIdleSystemSleepDisabled = (1ULL << 20),\n    ActivitySuddenTerminationDisabled = (1ULL << 14),\n    ActivityAutomaticTerminationDisabled = (1ULL << 15),\n    ActivityUserInitiated = (0x00FFFFFFULL | ActivityIdleSystemSleepDisabled),\n    ActivityUserInitiatedAllowingIdleSystemSleep = (ActivityUserInitiated & ~ActivityIdleSystemSleepDisabled),\n    ActivityBackground = 0x000000FFULL,\n    ActivityLatencyCritical = 0xFF00000000ULL,\n};\n\ntypedef NS::Integer DeviceCertification;\n_NS_CONST(DeviceCertification, DeviceCertificationiPhonePerformanceGaming);\n\ntypedef NS::Integer ProcessPerformanceProfile;\n_NS_CONST(ProcessPerformanceProfile, ProcessPerformanceProfileDefault);\n_NS_CONST(ProcessPerformanceProfile, ProcessPerformanceProfileSustained);\n\nclass ProcessInfo : public Referencing<ProcessInfo>\n{\npublic:\n    static ProcessInfo*     processInfo();\n\n    class Array*            arguments() const;\n    class Dictionary*       environment() const;\n    class String*           hostName() const;\n    class String*           processName() const;\n    void                    setProcessName(const String* pString);\n    int                     processIdentifier() const;\n    class String*           globallyUniqueString() const;\n\n    class String*           userName() const;\n    class String*           fullUserName() const;\n\n    UInteger                operatingSystem() const;\n    OperatingSystemVersion  operatingSystemVersion() const;\n    class String*           operatingSystemVersionString() const;\n    bool                    isOperatingSystemAtLeastVersion(OperatingSystemVersion version) const;\n\n    UInteger                processorCount() const;\n    UInteger                activeProcessorCount() const;\n    unsigned long long      physicalMemory() const;\n    TimeInterval            systemUptime() const;\n\n    void                    disableSuddenTermination();\n    void                    enableSuddenTermination();\n\n    void                    disableAutomaticTermination(const class String* pReason);\n    void                    enableAutomaticTermination(const class String* pReason);\n    bool                    automaticTerminationSupportEnabled() const;\n    void                    setAutomaticTerminationSupportEnabled(bool enabled);\n\n    class Object*           beginActivity(ActivityOptions options, const class String* pReason);\n    void                    endActivity(class Object* pActivity);\n    void                    performActivity(ActivityOptions options, const class String* pReason, void (^block)(void));\n    void                    performActivity(ActivityOptions options, const class String* pReason, const std::function<void()>& func);\n    void                    performExpiringActivity(const class String* pReason, void (^block)(bool expired));\n    void                    performExpiringActivity(const class String* pReason, const std::function<void(bool expired)>& func);\n\n    ProcessInfoThermalState thermalState() const;\n    bool                    isLowPowerModeEnabled() const;\n\n    bool                    isiOSAppOnMac() const;\n    bool                    isMacCatalystApp() const;\n\n    bool                    isDeviceCertified(DeviceCertification performanceTier) const;\n    bool                    hasPerformanceProfile(ProcessPerformanceProfile performanceProfile) const;\n\n};\n}\n\n_NS_PRIVATE_DEF_CONST(NS::NotificationName, ProcessInfoThermalStateDidChangeNotification);\n_NS_PRIVATE_DEF_CONST(NS::NotificationName, ProcessInfoPowerStateDidChangeNotification);\n\n_NS_PRIVATE_DEF_CONST(NS::NotificationName, ProcessInfoPerformanceProfileDidChangeNotification);\n_NS_PRIVATE_DEF_CONST(NS::DeviceCertification, DeviceCertificationiPhonePerformanceGaming);\n_NS_PRIVATE_DEF_CONST(NS::ProcessPerformanceProfile, ProcessPerformanceProfileDefault);\n_NS_PRIVATE_DEF_CONST(NS::ProcessPerformanceProfile, ProcessPerformanceProfileSustained);\n\n_NS_INLINE NS::ProcessInfo* NS::ProcessInfo::processInfo()\n{\n    return Object::sendMessage<ProcessInfo*>(_NS_PRIVATE_CLS(NSProcessInfo), _NS_PRIVATE_SEL(processInfo));\n}\n\n_NS_INLINE NS::Array* NS::ProcessInfo::arguments() const\n{\n    return Object::sendMessage<Array*>(this, _NS_PRIVATE_SEL(arguments));\n}\n\n_NS_INLINE NS::Dictionary* NS::ProcessInfo::environment() const\n{\n    return Object::sendMessage<Dictionary*>(this, _NS_PRIVATE_SEL(environment));\n}\n\n_NS_INLINE NS::String* NS::ProcessInfo::hostName() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(hostName));\n}\n\n_NS_INLINE NS::String* NS::ProcessInfo::processName() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(processName));\n}\n\n_NS_INLINE void NS::ProcessInfo::setProcessName(const String* pString)\n{\n    Object::sendMessage<void>(this, _NS_PRIVATE_SEL(setProcessName_), pString);\n}\n\n_NS_INLINE int NS::ProcessInfo::processIdentifier() const\n{\n    return Object::sendMessage<int>(this, _NS_PRIVATE_SEL(processIdentifier));\n}\n\n_NS_INLINE NS::String* NS::ProcessInfo::globallyUniqueString() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(globallyUniqueString));\n}\n\n_NS_INLINE NS::String* NS::ProcessInfo::userName() const\n{\n    return Object::sendMessageSafe<String*>(this, _NS_PRIVATE_SEL(userName));\n}\n\n_NS_INLINE NS::String* NS::ProcessInfo::fullUserName() const\n{\n    return Object::sendMessageSafe<String*>(this, _NS_PRIVATE_SEL(fullUserName));\n}\n\n_NS_INLINE NS::UInteger NS::ProcessInfo::operatingSystem() const\n{\n    return Object::sendMessage<UInteger>(this, _NS_PRIVATE_SEL(operatingSystem));\n}\n\n_NS_INLINE NS::OperatingSystemVersion NS::ProcessInfo::operatingSystemVersion() const\n{\n    return Object::sendMessage<OperatingSystemVersion>(this, _NS_PRIVATE_SEL(operatingSystemVersion));\n}\n\n_NS_INLINE NS::String* NS::ProcessInfo::operatingSystemVersionString() const\n{\n    return Object::sendMessage<String*>(this, _NS_PRIVATE_SEL(operatingSystemVersionString));\n}\n\n_NS_INLINE bool NS::ProcessInfo::isOperatingSystemAtLeastVersion(OperatingSystemVersion version) const\n{\n    return Object::sendMessage<bool>(this, _NS_PRIVATE_SEL(isOperatingSystemAtLeastVersion_), version);\n}\n\n_NS_INLINE NS::UInteger NS::ProcessInfo::processorCount() const\n{\n    return Object::sendMessage<UInteger>(this, _NS_PRIVATE_SEL(processorCount));\n}\n\n_NS_INLINE NS::UInteger NS::ProcessInfo::activeProcessorCount() const\n{\n    return Object::sendMessage<UInteger>(this, _NS_PRIVATE_SEL(activeProcessorCount));\n}\n\n_NS_INLINE unsigned long long NS::ProcessInfo::physicalMemory() const\n{\n    return Object::sendMessage<unsigned long long>(this, _NS_PRIVATE_SEL(physicalMemory));\n}\n\n_NS_INLINE NS::TimeInterval NS::ProcessInfo::systemUptime() const\n{\n    return Object::sendMessage<TimeInterval>(this, _NS_PRIVATE_SEL(systemUptime));\n}\n\n_NS_INLINE void NS::ProcessInfo::disableSuddenTermination()\n{\n    Object::sendMessageSafe<void>(this, _NS_PRIVATE_SEL(disableSuddenTermination));\n}\n\n_NS_INLINE void NS::ProcessInfo::enableSuddenTermination()\n{\n    Object::sendMessageSafe<void>(this, _NS_PRIVATE_SEL(enableSuddenTermination));\n}\n\n_NS_INLINE void NS::ProcessInfo::disableAutomaticTermination(const String* pReason)\n{\n    Object::sendMessageSafe<void>(this, _NS_PRIVATE_SEL(disableAutomaticTermination_), pReason);\n}\n\n_NS_INLINE void NS::ProcessInfo::enableAutomaticTermination(const String* pReason)\n{\n    Object::sendMessageSafe<void>(this, _NS_PRIVATE_SEL(enableAutomaticTermination_), pReason);\n}\n\n_NS_INLINE bool NS::ProcessInfo::automaticTerminationSupportEnabled() const\n{\n    return Object::sendMessageSafe<bool>(this, _NS_PRIVATE_SEL(automaticTerminationSupportEnabled));\n}\n\n_NS_INLINE void NS::ProcessInfo::setAutomaticTerminationSupportEnabled(bool enabled)\n{\n    Object::sendMessageSafe<void>(this, _NS_PRIVATE_SEL(setAutomaticTerminationSupportEnabled_), enabled);\n}\n\n_NS_INLINE NS::Object* NS::ProcessInfo::beginActivity(ActivityOptions options, const String* pReason)\n{\n    return Object::sendMessage<Object*>(this, _NS_PRIVATE_SEL(beginActivityWithOptions_reason_), options, pReason);\n}\n\n_NS_INLINE void NS::ProcessInfo::endActivity(Object* pActivity)\n{\n    Object::sendMessage<void>(this, _NS_PRIVATE_SEL(endActivity_), pActivity);\n}\n\n_NS_INLINE void NS::ProcessInfo::performActivity(ActivityOptions options, const String* pReason, void (^block)(void))\n{\n    Object::sendMessage<void>(this, _NS_PRIVATE_SEL(performActivityWithOptions_reason_usingBlock_), options, pReason, block);\n}\n\n_NS_INLINE void NS::ProcessInfo::performActivity(ActivityOptions options, const String* pReason, const std::function<void()>& function)\n{\n    __block std::function<void()> blockFunction = function;\n\n    performActivity(options, pReason, ^() { blockFunction(); });\n}\n\n_NS_INLINE void NS::ProcessInfo::performExpiringActivity(const String* pReason, void (^block)(bool expired))\n{\n    Object::sendMessageSafe<void>(this, _NS_PRIVATE_SEL(performExpiringActivityWithReason_usingBlock_), pReason, block);\n}\n\n_NS_INLINE void NS::ProcessInfo::performExpiringActivity(const String* pReason, const std::function<void(bool expired)>& function)\n{\n    __block std::function<void(bool expired)> blockFunction = function;\n\n    performExpiringActivity(pReason, ^(bool expired) { blockFunction(expired); });\n}\n\n_NS_INLINE NS::ProcessInfoThermalState NS::ProcessInfo::thermalState() const\n{\n    return Object::sendMessage<ProcessInfoThermalState>(this, _NS_PRIVATE_SEL(thermalState));\n}\n\n_NS_INLINE bool NS::ProcessInfo::isLowPowerModeEnabled() const\n{\n    return Object::sendMessageSafe<bool>(this, _NS_PRIVATE_SEL(isLowPowerModeEnabled));\n}\n\n_NS_INLINE bool NS::ProcessInfo::isiOSAppOnMac() const\n{\n    return Object::sendMessageSafe<bool>(this, _NS_PRIVATE_SEL(isiOSAppOnMac));\n}\n\n_NS_INLINE bool NS::ProcessInfo::isMacCatalystApp() const\n{\n    return Object::sendMessageSafe<bool>(this, _NS_PRIVATE_SEL(isMacCatalystApp));\n}\n\n_NS_INLINE bool NS::ProcessInfo::isDeviceCertified(DeviceCertification performanceTier) const\n{\n    return Object::sendMessageSafe<bool>(this, _NS_PRIVATE_SEL(isDeviceCertified_), performanceTier);\n}\n\n_NS_INLINE bool NS::ProcessInfo::hasPerformanceProfile(ProcessPerformanceProfile performanceProfile) const\n{\n    return Object::sendMessageSafe<bool>(this, _NS_PRIVATE_SEL(hasPerformanceProfile_), performanceProfile);\n}\n\n/*****Immutable Set*******/\n\nnamespace NS\n{\n    class Set : public NS::Copying <Set>\n    {\n        public:\n            UInteger count() const;\n            Enumerator<Object>* objectEnumerator() const;\n\n            static Set* alloc();\n\n            Set* init();\n            Set* init(const Object* const* pObjects, UInteger count);\n            Set* init(const class Coder* pCoder);\n\n    };\n}\n\n_NS_INLINE NS::UInteger NS::Set::count() const\n{\n    return NS::Object::sendMessage<NS::UInteger>(this, _NS_PRIVATE_SEL(count));\n}\n\n_NS_INLINE NS::Enumerator<NS::Object>* NS::Set::objectEnumerator() const\n{\n    return NS::Object::sendMessage<Enumerator<NS::Object>*>(this, _NS_PRIVATE_SEL(objectEnumerator));\n}\n\n_NS_INLINE NS::Set* NS::Set::alloc()\n{\n    return NS::Object::alloc<Set>(_NS_PRIVATE_CLS(NSSet));\n}\n\n_NS_INLINE NS::Set* NS::Set::init()\n{\n    return NS::Object::init<Set>();\n}\n\n_NS_INLINE NS::Set* NS::Set::init(const Object* const* pObjects, NS::UInteger count)\n{\n    return NS::Object::sendMessage<Set*>(this, _NS_PRIVATE_SEL(initWithObjects_count_), pObjects, count);\n}\n\n_NS_INLINE NS::Set* NS::Set::init(const class Coder* pCoder)\n{\n    return Object::sendMessage<Set*>(this, _NS_PRIVATE_SEL(initWithCoder_), pCoder);\n}\n\n#pragma once\n\n#include <cstddef>\n\nnamespace NS\n{\ntemplate <class _Class>\nclass SharedPtr\n{\npublic:\n    /**\n     * Create a new null pointer.\n     */\n    SharedPtr();\n\n    /**\n     * Destroy this SharedPtr, decreasing the reference count.\n     */\n    ~SharedPtr();\n\n    /**\n     * Create a new null pointer.\n     */\n    SharedPtr(std::nullptr_t) noexcept;\n\n    /**\n     * SharedPtr copy constructor.\n     */\n    SharedPtr(const SharedPtr<_Class>& other) noexcept;\n\n    /**\n     * Construction from another pointee type.\n     */\n    template <class _OtherClass>\n    SharedPtr(const SharedPtr<_OtherClass>& other, typename std::enable_if_t<std::is_convertible_v<_OtherClass *, _Class *>> * = nullptr) noexcept;\n\n    /**\n     * SharedPtr move constructor.\n     */\n    SharedPtr(SharedPtr<_Class>&& other) noexcept;\n\n    /**\n     * Move from another pointee type.\n     */\n    template <class _OtherClass>\n    SharedPtr(SharedPtr<_OtherClass>&& other, typename std::enable_if_t<std::is_convertible_v<_OtherClass *, _Class *>> * = nullptr) noexcept;\n\n    /**\n     * Copy assignment operator.\n     * Copying increases reference count. Only releases previous pointee if objects are different.\n     */\n    SharedPtr& operator=(const SharedPtr<_Class>& other);\n\n    /**\n     * Copy-assignment from different pointee.\n     * Copying increases reference count. Only releases previous pointee if objects are different.\n     */\n    template <class _OtherClass>\n    typename std::enable_if_t<std::is_convertible_v<_OtherClass *, _Class *>, SharedPtr &>\n    operator=(const SharedPtr<_OtherClass>& other);\n\n    /**\n     * Move assignment operator.\n     * Move without affecting reference counts, unless pointees are equal. Moved-from object is reset to nullptr.\n     */\n    SharedPtr& operator=(SharedPtr<_Class>&& other);\n\n    /**\n     * Move-asignment from different pointee.\n     * Move without affecting reference counts, unless pointees are equal. Moved-from object is reset to nullptr.\n     */\n    template <class _OtherClass>\n    typename std::enable_if_t<std::is_convertible_v<_OtherClass *, _Class *>, SharedPtr &>\n    operator=(SharedPtr<_OtherClass>&& other);\n\n    /**\n     * Access raw pointee.\n     * @warning Avoid wrapping the returned value again, as it may lead double frees unless this object becomes detached.\n     */\n    _Class* get() const;\n\n    /**\n     * Call operations directly on the pointee.\n     */\n    _Class* operator->() const;\n\n    /**\n     * Implicit cast to bool.\n     */\n    explicit operator bool() const;\n\n    /**\n     * Reset this SharedPtr to null, decreasing the reference count.\n     */\n    void reset();\n\n    /**\n     * Detach the SharedPtr from the pointee, without decreasing the reference count.\n     */\n    void detach();\n\n    template <class _OtherClass>\n    friend SharedPtr<_OtherClass> RetainPtr(_OtherClass* ptr);\n\n    template <class _OtherClass>\n    friend SharedPtr<_OtherClass> TransferPtr(_OtherClass* ptr);\n\nprivate:\n    _Class* m_pObject;\n};\n\n/**\n * Create a SharedPtr by retaining an existing raw pointer.\n * Increases the reference count of the passed-in object.\n * If the passed-in object was in an AutoreleasePool, it will be removed from it.\n */\ntemplate <class _Class>\n_NS_INLINE NS::SharedPtr<_Class> RetainPtr(_Class* pObject)\n{\n    NS::SharedPtr<_Class> ret;\n    ret.m_pObject = pObject->retain();\n    return ret;\n}\n\n/*\n * Create a SharedPtr by transfering the ownership of an existing raw pointer to SharedPtr.\n * Does not increase the reference count of the passed-in pointer, it is assumed to be >= 1.\n * This method does not remove objects from an AutoreleasePool.\n*/\ntemplate <class _Class>\n_NS_INLINE NS::SharedPtr<_Class> TransferPtr(_Class* pObject)\n{\n    NS::SharedPtr<_Class> ret;\n    ret.m_pObject = pObject;\n    return ret;\n}\n\n}\n\ntemplate <class _Class>\n_NS_INLINE NS::SharedPtr<_Class>::SharedPtr()\n    : m_pObject(nullptr)\n{\n}\n\ntemplate <class _Class>\n_NS_INLINE NS::SharedPtr<_Class>::~SharedPtr<_Class>() __attribute__((no_sanitize(\"undefined\")))\n{\n    m_pObject->release();\n}\n\ntemplate <class _Class>\n_NS_INLINE NS::SharedPtr<_Class>::SharedPtr(std::nullptr_t) noexcept\n    : m_pObject(nullptr)\n{\n}\n\ntemplate <class _Class>\n_NS_INLINE NS::SharedPtr<_Class>::SharedPtr(const SharedPtr<_Class>& other) noexcept\n    : m_pObject(other.m_pObject->retain())\n{\n}\n\ntemplate <class _Class>\ntemplate <class _OtherClass>\n_NS_INLINE NS::SharedPtr<_Class>::SharedPtr(const SharedPtr<_OtherClass>& other, typename std::enable_if_t<std::is_convertible_v<_OtherClass *, _Class *>> *) noexcept\n    : m_pObject(reinterpret_cast<_Class*>(other.get()->retain()))\n{\n}\n\ntemplate <class _Class>\n_NS_INLINE NS::SharedPtr<_Class>::SharedPtr(SharedPtr<_Class>&& other) noexcept\n    : m_pObject(other.m_pObject)\n{\n    other.m_pObject = nullptr;\n}\n\ntemplate <class _Class>\ntemplate <class _OtherClass>\n_NS_INLINE NS::SharedPtr<_Class>::SharedPtr(SharedPtr<_OtherClass>&& other, typename std::enable_if_t<std::is_convertible_v<_OtherClass *, _Class *>> *) noexcept\n    : m_pObject(reinterpret_cast<_Class*>(other.get()))\n{\n    other.detach();\n}\n\ntemplate <class _Class>\n_NS_INLINE _Class* NS::SharedPtr<_Class>::get() const\n{\n    return m_pObject;\n}\n\ntemplate <class _Class>\n_NS_INLINE _Class* NS::SharedPtr<_Class>::operator->() const\n{\n    return m_pObject;\n}\n\ntemplate <class _Class>\n_NS_INLINE NS::SharedPtr<_Class>::operator bool() const\n{\n    return nullptr != m_pObject;\n}\n\ntemplate <class _Class>\n_NS_INLINE void NS::SharedPtr<_Class>::reset() __attribute__((no_sanitize(\"undefined\")))\n{\n    m_pObject->release();\n    m_pObject = nullptr;\n}\n\ntemplate <class _Class>\n_NS_INLINE void NS::SharedPtr<_Class>::detach()\n{\n    m_pObject = nullptr;\n}\n\ntemplate <class _Class>\n_NS_INLINE NS::SharedPtr<_Class>& NS::SharedPtr<_Class>::operator=(const SharedPtr<_Class>& other) __attribute__((no_sanitize(\"undefined\")))\n{\n    _Class* pOldObject = m_pObject;\n\n    m_pObject = other.m_pObject->retain();\n\n    pOldObject->release();\n\n    return *this;\n}\n\ntemplate <class _Class>\ntemplate <class _OtherClass>\ntypename std::enable_if_t<std::is_convertible_v<_OtherClass *, _Class *>, NS::SharedPtr<_Class> &>\n_NS_INLINE NS::SharedPtr<_Class>::operator=(const SharedPtr<_OtherClass>& other) __attribute__((no_sanitize(\"undefined\")))\n{\n    _Class* pOldObject = m_pObject;\n\n    m_pObject = reinterpret_cast<_Class*>(other.get()->retain());\n\n    pOldObject->release();\n\n    return *this;\n}\n\ntemplate <class _Class>\n_NS_INLINE NS::SharedPtr<_Class>& NS::SharedPtr<_Class>::operator=(SharedPtr<_Class>&& other) __attribute__((no_sanitize(\"undefined\")))\n{\n    if (m_pObject != other.m_pObject)\n    {\n        m_pObject->release();\n        m_pObject = other.m_pObject;\n    }\n    else\n    {\n        m_pObject = other.m_pObject;\n        other.m_pObject->release();\n    }\n    other.m_pObject = nullptr;\n    return *this;\n}\n\ntemplate <class _Class>\ntemplate <class _OtherClass>\ntypename std::enable_if_t<std::is_convertible_v<_OtherClass *, _Class *>, NS::SharedPtr<_Class> &>\n_NS_INLINE NS::SharedPtr<_Class>::operator=(SharedPtr<_OtherClass>&& other) __attribute__((no_sanitize(\"undefined\")))\n{\n    if (m_pObject != other.get())\n    {\n        m_pObject->release();\n        m_pObject = reinterpret_cast<_Class*>(other.get());\n        other.detach();\n    }\n    else\n    {\n        m_pObject = other.get();\n        other.reset();\n    }\n    return *this;\n}\n\ntemplate <class _ClassLhs, class _ClassRhs>\n_NS_INLINE bool operator==(const NS::SharedPtr<_ClassLhs>& lhs, const NS::SharedPtr<_ClassRhs>& rhs)\n{\n    return lhs.get() == rhs.get();\n}\n\ntemplate <class _ClassLhs, class _ClassRhs>\n_NS_INLINE bool operator!=(const NS::SharedPtr<_ClassLhs>& lhs, const NS::SharedPtr<_ClassRhs>& rhs)\n{\n    return lhs.get() != rhs.get();\n}\n\nnamespace NS\n{\nclass URL : public Copying<URL>\n{\npublic:\n    static URL* fileURLWithPath(const class String* pPath);\n\n    static URL* alloc();\n    URL*        init();\n    URL*        init(const class String* pString);\n    URL*        initFileURLWithPath(const class String* pPath);\n\n    const char* fileSystemRepresentation() const;\n};\n}\n\n_NS_INLINE NS::URL* NS::URL::fileURLWithPath(const String* pPath)\n{\n    return Object::sendMessage<URL*>(_NS_PRIVATE_CLS(NSURL), _NS_PRIVATE_SEL(fileURLWithPath_), pPath);\n}\n\n_NS_INLINE NS::URL* NS::URL::alloc()\n{\n    return Object::alloc<URL>(_NS_PRIVATE_CLS(NSURL));\n}\n\n_NS_INLINE NS::URL* NS::URL::init()\n{\n    return Object::init<URL>();\n}\n\n_NS_INLINE NS::URL* NS::URL::init(const String* pString)\n{\n    return Object::sendMessage<URL*>(this, _NS_PRIVATE_SEL(initWithString_), pString);\n}\n\n_NS_INLINE NS::URL* NS::URL::initFileURLWithPath(const String* pPath)\n{\n    return Object::sendMessage<URL*>(this, _NS_PRIVATE_SEL(initFileURLWithPath_), pPath);\n}\n\n_NS_INLINE const char* NS::URL::fileSystemRepresentation() const\n{\n    return Object::sendMessage<const char*>(this, _NS_PRIVATE_SEL(fileSystemRepresentation));\n}\n\n#pragma once\n\n#define _MTL_EXPORT _NS_EXPORT\n#define _MTL_EXTERN _NS_EXTERN\n#define _MTL_INLINE _NS_INLINE\n#define _MTL_PACKED _NS_PACKED\n\n#define _MTL_CONST(type, name) _NS_CONST(type, name)\n#define _MTL_ENUM(type, name) _NS_ENUM(type, name)\n#define _MTL_OPTIONS(type, name) _NS_OPTIONS(type, name)\n\n#define _MTL_VALIDATE_SIZE(ns, name) _NS_VALIDATE_SIZE(ns, name)\n#define _MTL_VALIDATE_ENUM(ns, name) _NS_VALIDATE_ENUM(ns, name)\n\n#pragma once\n\n#include <objc/runtime.h>\n\n#define _MTL_PRIVATE_CLS(symbol) (Private::Class::s_k##symbol)\n#define _MTL_PRIVATE_SEL(accessor) (Private::Selector::s_k##accessor)\n\n#if defined(MTL_PRIVATE_IMPLEMENTATION)\n\n#ifdef METALCPP_SYMBOL_VISIBILITY_HIDDEN\n#define _MTL_PRIVATE_VISIBILITY __attribute__((visibility(\"hidden\")))\n#else\n#define _MTL_PRIVATE_VISIBILITY __attribute__((visibility(\"default\")))\n#endif // METALCPP_SYMBOL_VISIBILITY_HIDDEN\n\n#define _MTL_PRIVATE_IMPORT __attribute__((weak_import))\n\n#ifdef __OBJC__\n#define _MTL_PRIVATE_OBJC_LOOKUP_CLASS(symbol) ((__bridge void*)objc_lookUpClass(#symbol))\n#define _MTL_PRIVATE_OBJC_GET_PROTOCOL(symbol) ((__bridge void*)objc_getProtocol(#symbol))\n#else\n#define _MTL_PRIVATE_OBJC_LOOKUP_CLASS(symbol) objc_lookUpClass(#symbol)\n#define _MTL_PRIVATE_OBJC_GET_PROTOCOL(symbol) objc_getProtocol(#symbol)\n#endif // __OBJC__\n\n#define _MTL_PRIVATE_DEF_CLS(symbol) void* s_k##symbol _MTL_PRIVATE_VISIBILITY = _MTL_PRIVATE_OBJC_LOOKUP_CLASS(symbol)\n#define _MTL_PRIVATE_DEF_PRO(symbol) void* s_k##symbol _MTL_PRIVATE_VISIBILITY = _MTL_PRIVATE_OBJC_GET_PROTOCOL(symbol)\n#define _MTL_PRIVATE_DEF_SEL(accessor, symbol) SEL s_k##accessor _MTL_PRIVATE_VISIBILITY = sel_registerName(symbol)\n\n#include <dlfcn.h>\n#define MTL_DEF_FUNC( name, signature ) \\\n    using Fn##name = signature; \\\n    Fn##name name = reinterpret_cast< Fn##name >( dlsym( RTLD_DEFAULT, #name ) )\n\nnamespace MTL::Private\n{\n    template <typename _Type>\n    inline _Type const LoadSymbol(const char* pSymbol)\n    {\n        const _Type* pAddress = static_cast<_Type*>(dlsym(RTLD_DEFAULT, pSymbol));\n\n        return pAddress ? *pAddress : nullptr;\n    }\n} // MTL::Private\n\n#if defined(__MAC_15_0) || defined(__IPHONE_18_0) || defined(__TVOS_18_0)\n\n#define _MTL_PRIVATE_DEF_STR(type, symbol)                  \\\n    _MTL_EXTERN type const MTL##symbol _MTL_PRIVATE_IMPORT; \\\n    type const                         MTL::symbol = (nullptr != &MTL##symbol) ? MTL##symbol : nullptr\n\n#define _MTL_PRIVATE_DEF_CONST(type, symbol)              \\\n    _MTL_EXTERN type const MTL##symbol _MTL_PRIVATE_IMPORT; \\\n    type const                         MTL::symbol = (nullptr != &MTL##symbol) ? MTL##symbol : nullptr\n\n#define _MTL_PRIVATE_DEF_WEAK_CONST(type, symbol) \\\n    _MTL_EXTERN type const MTL##symbol;    \\\n    type const             MTL::symbol = Private::LoadSymbol<type>(\"MTL\" #symbol)\n\n#else\n\n#define _MTL_PRIVATE_DEF_STR(type, symbol) \\\n    _MTL_EXTERN type const MTL##symbol;    \\\n    type const             MTL::symbol = Private::LoadSymbol<type>(\"MTL\" #symbol)\n\n#define _MTL_PRIVATE_DEF_CONST(type, symbol) \\\n    _MTL_EXTERN type const MTL##symbol;    \\\n    type const             MTL::symbol = Private::LoadSymbol<type>(\"MTL\" #symbol)\n\n#define _MTL_PRIVATE_DEF_WEAK_CONST(type, symbol) _MTL_PRIVATE_DEF_CONST(type, symbol)\n\n#endif // defined(__MAC_15_0) || defined(__IPHONE_18_0) || defined(__TVOS_18_0)\n\n#else\n\n#define _MTL_PRIVATE_DEF_CLS(symbol) extern void* s_k##symbol\n#define _MTL_PRIVATE_DEF_PRO(symbol) extern void* s_k##symbol\n#define _MTL_PRIVATE_DEF_SEL(accessor, symbol) extern SEL s_k##accessor\n#define _MTL_PRIVATE_DEF_STR(type, symbol) extern type const MTL::symbol\n#define _MTL_PRIVATE_DEF_CONST(type, symbol) extern type const MTL::symbol\n#define _MTL_PRIVATE_DEF_WEAK_CONST(type, symbol) extern type const MTL::symbol\n\n#endif // MTL_PRIVATE_IMPLEMENTATION\n\nnamespace MTL\n{\nnamespace Private\n{\n    namespace Class\n    {\n\n    } // Class\n} // Private\n} // MTL\n\nnamespace MTL\n{\nnamespace Private\n{\n    namespace Protocol\n    {\n\n    } // Protocol\n} // Private\n} // MTL\n\nnamespace MTL\n{\nnamespace Private\n{\n    namespace Selector\n    {\n\n        _MTL_PRIVATE_DEF_SEL(beginScope,\n            \"beginScope\");\n        _MTL_PRIVATE_DEF_SEL(endScope,\n            \"endScope\");\n    } // Class\n} // Private\n} // MTL\n\nnamespace MTL::Private::Class\n{\n\n_MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureBoundingBoxGeometryDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureCurveGeometryDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureGeometryDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureMotionBoundingBoxGeometryDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureMotionCurveGeometryDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureMotionTriangleGeometryDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLAccelerationStructurePassDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLAccelerationStructurePassSampleBufferAttachmentDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLAccelerationStructurePassSampleBufferAttachmentDescriptorArray);\n_MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureTriangleGeometryDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLArchitecture);\n_MTL_PRIVATE_DEF_CLS(MTLArgument);\n_MTL_PRIVATE_DEF_CLS(MTLArgumentDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLArrayType);\n_MTL_PRIVATE_DEF_CLS(MTLAttribute);\n_MTL_PRIVATE_DEF_CLS(MTLAttributeDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLAttributeDescriptorArray);\n_MTL_PRIVATE_DEF_CLS(MTLBinaryArchiveDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLBlitPassDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLBlitPassSampleBufferAttachmentDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLBlitPassSampleBufferAttachmentDescriptorArray);\n_MTL_PRIVATE_DEF_CLS(MTLBufferLayoutDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLBufferLayoutDescriptorArray);\n_MTL_PRIVATE_DEF_CLS(MTLCaptureDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLCaptureManager);\n_MTL_PRIVATE_DEF_CLS(MTLCommandBufferDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLCommandQueueDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLCompileOptions);\n_MTL_PRIVATE_DEF_CLS(MTLComputePassDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLComputePassSampleBufferAttachmentDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLComputePassSampleBufferAttachmentDescriptorArray);\n_MTL_PRIVATE_DEF_CLS(MTLComputePipelineDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLComputePipelineReflection);\n_MTL_PRIVATE_DEF_CLS(MTLCounterSampleBufferDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLDepthStencilDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLFunctionConstant);\n_MTL_PRIVATE_DEF_CLS(MTLFunctionConstantValues);\n_MTL_PRIVATE_DEF_CLS(MTLFunctionDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLFunctionStitchingAttributeAlwaysInline);\n_MTL_PRIVATE_DEF_CLS(MTLFunctionStitchingFunctionNode);\n_MTL_PRIVATE_DEF_CLS(MTLFunctionStitchingGraph);\n_MTL_PRIVATE_DEF_CLS(MTLFunctionStitchingInputNode);\n_MTL_PRIVATE_DEF_CLS(MTLHeapDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLIOCommandQueueDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLIndirectCommandBufferDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLIndirectInstanceAccelerationStructureDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLInstanceAccelerationStructureDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLIntersectionFunctionDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLIntersectionFunctionTableDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLLinkedFunctions);\n_MTL_PRIVATE_DEF_CLS(MTLLogStateDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLMeshRenderPipelineDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLMotionKeyframeData);\n_MTL_PRIVATE_DEF_CLS(MTLPipelineBufferDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLPipelineBufferDescriptorArray);\n_MTL_PRIVATE_DEF_CLS(MTLPointerType);\n_MTL_PRIVATE_DEF_CLS(MTLPrimitiveAccelerationStructureDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLRasterizationRateLayerArray);\n_MTL_PRIVATE_DEF_CLS(MTLRasterizationRateLayerDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLRasterizationRateMapDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLRasterizationRateSampleArray);\n_MTL_PRIVATE_DEF_CLS(MTLRenderPassAttachmentDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLRenderPassColorAttachmentDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLRenderPassColorAttachmentDescriptorArray);\n_MTL_PRIVATE_DEF_CLS(MTLRenderPassDepthAttachmentDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLRenderPassDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLRenderPassSampleBufferAttachmentDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLRenderPassSampleBufferAttachmentDescriptorArray);\n_MTL_PRIVATE_DEF_CLS(MTLRenderPassStencilAttachmentDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLRenderPipelineColorAttachmentDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLRenderPipelineColorAttachmentDescriptorArray);\n_MTL_PRIVATE_DEF_CLS(MTLRenderPipelineDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLRenderPipelineFunctionsDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLRenderPipelineReflection);\n_MTL_PRIVATE_DEF_CLS(MTLResidencySetDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLResourceStatePassDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLResourceStatePassSampleBufferAttachmentDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLResourceStatePassSampleBufferAttachmentDescriptorArray);\n_MTL_PRIVATE_DEF_CLS(MTLSamplerDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLSharedEventHandle);\n_MTL_PRIVATE_DEF_CLS(MTLSharedEventListener);\n_MTL_PRIVATE_DEF_CLS(MTLSharedTextureHandle);\n_MTL_PRIVATE_DEF_CLS(MTLStageInputOutputDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLStencilDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLStitchedLibraryDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLStructMember);\n_MTL_PRIVATE_DEF_CLS(MTLStructType);\n_MTL_PRIVATE_DEF_CLS(MTLTextureDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLTextureReferenceType);\n_MTL_PRIVATE_DEF_CLS(MTLTileRenderPipelineColorAttachmentDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLTileRenderPipelineColorAttachmentDescriptorArray);\n_MTL_PRIVATE_DEF_CLS(MTLTileRenderPipelineDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLType);\n_MTL_PRIVATE_DEF_CLS(MTLVertexAttribute);\n_MTL_PRIVATE_DEF_CLS(MTLVertexAttributeDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLVertexAttributeDescriptorArray);\n_MTL_PRIVATE_DEF_CLS(MTLVertexBufferLayoutDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLVertexBufferLayoutDescriptorArray);\n_MTL_PRIVATE_DEF_CLS(MTLVertexDescriptor);\n_MTL_PRIVATE_DEF_CLS(MTLVisibleFunctionTableDescriptor);\n\n}\n\nnamespace MTL::Private::Protocol\n{\n\n_MTL_PRIVATE_DEF_PRO(MTLAccelerationStructure);\n_MTL_PRIVATE_DEF_PRO(MTLAccelerationStructureCommandEncoder);\n_MTL_PRIVATE_DEF_PRO(MTLAllocation);\n_MTL_PRIVATE_DEF_PRO(MTLArgumentEncoder);\n_MTL_PRIVATE_DEF_PRO(MTLBinaryArchive);\n_MTL_PRIVATE_DEF_PRO(MTLBinding);\n_MTL_PRIVATE_DEF_PRO(MTLBlitCommandEncoder);\n_MTL_PRIVATE_DEF_PRO(MTLBuffer);\n_MTL_PRIVATE_DEF_PRO(MTLBufferBinding);\n_MTL_PRIVATE_DEF_PRO(MTLCommandBuffer);\n_MTL_PRIVATE_DEF_PRO(MTLCommandBufferEncoderInfo);\n_MTL_PRIVATE_DEF_PRO(MTLCommandEncoder);\n_MTL_PRIVATE_DEF_PRO(MTLCommandQueue);\n_MTL_PRIVATE_DEF_PRO(MTLComputeCommandEncoder);\n_MTL_PRIVATE_DEF_PRO(MTLComputePipelineState);\n_MTL_PRIVATE_DEF_PRO(MTLCounter);\n_MTL_PRIVATE_DEF_PRO(MTLCounterSampleBuffer);\n_MTL_PRIVATE_DEF_PRO(MTLCounterSet);\n_MTL_PRIVATE_DEF_PRO(MTLDepthStencilState);\n_MTL_PRIVATE_DEF_PRO(MTLDevice);\n_MTL_PRIVATE_DEF_PRO(MTLDrawable);\n_MTL_PRIVATE_DEF_PRO(MTLDynamicLibrary);\n_MTL_PRIVATE_DEF_PRO(MTLEvent);\n_MTL_PRIVATE_DEF_PRO(MTLFence);\n_MTL_PRIVATE_DEF_PRO(MTLFunction);\n_MTL_PRIVATE_DEF_PRO(MTLFunctionHandle);\n_MTL_PRIVATE_DEF_PRO(MTLFunctionLog);\n_MTL_PRIVATE_DEF_PRO(MTLFunctionLogDebugLocation);\n_MTL_PRIVATE_DEF_PRO(MTLFunctionStitchingAttribute);\n_MTL_PRIVATE_DEF_PRO(MTLFunctionStitchingNode);\n_MTL_PRIVATE_DEF_PRO(MTLHeap);\n_MTL_PRIVATE_DEF_PRO(MTLIOCommandBuffer);\n_MTL_PRIVATE_DEF_PRO(MTLIOCommandQueue);\n_MTL_PRIVATE_DEF_PRO(MTLIOFileHandle);\n_MTL_PRIVATE_DEF_PRO(MTLIOScratchBuffer);\n_MTL_PRIVATE_DEF_PRO(MTLIOScratchBufferAllocator);\n_MTL_PRIVATE_DEF_PRO(MTLIndirectCommandBuffer);\n_MTL_PRIVATE_DEF_PRO(MTLIndirectComputeCommand);\n_MTL_PRIVATE_DEF_PRO(MTLIndirectRenderCommand);\n_MTL_PRIVATE_DEF_PRO(MTLIntersectionFunctionTable);\n_MTL_PRIVATE_DEF_PRO(MTLLibrary);\n_MTL_PRIVATE_DEF_PRO(MTLLogContainer);\n_MTL_PRIVATE_DEF_PRO(MTLLogState);\n_MTL_PRIVATE_DEF_PRO(MTLObjectPayloadBinding);\n_MTL_PRIVATE_DEF_PRO(MTLParallelRenderCommandEncoder);\n_MTL_PRIVATE_DEF_PRO(MTLRasterizationRateMap);\n_MTL_PRIVATE_DEF_PRO(MTLRenderCommandEncoder);\n_MTL_PRIVATE_DEF_PRO(MTLRenderPipelineState);\n_MTL_PRIVATE_DEF_PRO(MTLResidencySet);\n_MTL_PRIVATE_DEF_PRO(MTLResource);\n_MTL_PRIVATE_DEF_PRO(MTLResourceStateCommandEncoder);\n_MTL_PRIVATE_DEF_PRO(MTLSamplerState);\n_MTL_PRIVATE_DEF_PRO(MTLSharedEvent);\n_MTL_PRIVATE_DEF_PRO(MTLTexture);\n_MTL_PRIVATE_DEF_PRO(MTLTextureBinding);\n_MTL_PRIVATE_DEF_PRO(MTLThreadgroupBinding);\n_MTL_PRIVATE_DEF_PRO(MTLVisibleFunctionTable);\n\n}\n\nnamespace MTL::Private::Selector\n{\n\n_MTL_PRIVATE_DEF_SEL(GPUEndTime,\n    \"GPUEndTime\");\n_MTL_PRIVATE_DEF_SEL(GPUStartTime,\n    \"GPUStartTime\");\n_MTL_PRIVATE_DEF_SEL(URL,\n    \"URL\");\n_MTL_PRIVATE_DEF_SEL(accelerationStructureCommandEncoder,\n    \"accelerationStructureCommandEncoder\");\n_MTL_PRIVATE_DEF_SEL(accelerationStructureCommandEncoderWithDescriptor_,\n    \"accelerationStructureCommandEncoderWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(accelerationStructurePassDescriptor,\n    \"accelerationStructurePassDescriptor\");\n_MTL_PRIVATE_DEF_SEL(accelerationStructureSizesWithDescriptor_,\n    \"accelerationStructureSizesWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(access,\n    \"access\");\n_MTL_PRIVATE_DEF_SEL(addAllocation_,\n    \"addAllocation:\");\n_MTL_PRIVATE_DEF_SEL(addAllocations_count_,\n    \"addAllocations:count:\");\n_MTL_PRIVATE_DEF_SEL(addBarrier,\n    \"addBarrier\");\n_MTL_PRIVATE_DEF_SEL(addCompletedHandler_,\n    \"addCompletedHandler:\");\n_MTL_PRIVATE_DEF_SEL(addComputePipelineFunctionsWithDescriptor_error_,\n    \"addComputePipelineFunctionsWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(addDebugMarker_range_,\n    \"addDebugMarker:range:\");\n_MTL_PRIVATE_DEF_SEL(addFunctionWithDescriptor_library_error_,\n    \"addFunctionWithDescriptor:library:error:\");\n_MTL_PRIVATE_DEF_SEL(addLibraryWithDescriptor_error_,\n    \"addLibraryWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(addLogHandler_,\n    \"addLogHandler:\");\n_MTL_PRIVATE_DEF_SEL(addMeshRenderPipelineFunctionsWithDescriptor_error_,\n    \"addMeshRenderPipelineFunctionsWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(addPresentedHandler_,\n    \"addPresentedHandler:\");\n_MTL_PRIVATE_DEF_SEL(addRenderPipelineFunctionsWithDescriptor_error_,\n    \"addRenderPipelineFunctionsWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(addResidencySet_,\n    \"addResidencySet:\");\n_MTL_PRIVATE_DEF_SEL(addResidencySets_count_,\n    \"addResidencySets:count:\");\n_MTL_PRIVATE_DEF_SEL(addScheduledHandler_,\n    \"addScheduledHandler:\");\n_MTL_PRIVATE_DEF_SEL(addTileRenderPipelineFunctionsWithDescriptor_error_,\n    \"addTileRenderPipelineFunctionsWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(alignment,\n    \"alignment\");\n_MTL_PRIVATE_DEF_SEL(allAllocations,\n    \"allAllocations\");\n_MTL_PRIVATE_DEF_SEL(allocatedSize,\n    \"allocatedSize\");\n_MTL_PRIVATE_DEF_SEL(allocationCount,\n    \"allocationCount\");\n_MTL_PRIVATE_DEF_SEL(allowDuplicateIntersectionFunctionInvocation,\n    \"allowDuplicateIntersectionFunctionInvocation\");\n_MTL_PRIVATE_DEF_SEL(allowGPUOptimizedContents,\n    \"allowGPUOptimizedContents\");\n_MTL_PRIVATE_DEF_SEL(allowReferencingUndefinedSymbols,\n    \"allowReferencingUndefinedSymbols\");\n_MTL_PRIVATE_DEF_SEL(alphaBlendOperation,\n    \"alphaBlendOperation\");\n_MTL_PRIVATE_DEF_SEL(architecture,\n    \"architecture\");\n_MTL_PRIVATE_DEF_SEL(areBarycentricCoordsSupported,\n    \"areBarycentricCoordsSupported\");\n_MTL_PRIVATE_DEF_SEL(areProgrammableSamplePositionsSupported,\n    \"areProgrammableSamplePositionsSupported\");\n_MTL_PRIVATE_DEF_SEL(areRasterOrderGroupsSupported,\n    \"areRasterOrderGroupsSupported\");\n_MTL_PRIVATE_DEF_SEL(argumentBuffersSupport,\n    \"argumentBuffersSupport\");\n_MTL_PRIVATE_DEF_SEL(argumentDescriptor,\n    \"argumentDescriptor\");\n_MTL_PRIVATE_DEF_SEL(argumentIndex,\n    \"argumentIndex\");\n_MTL_PRIVATE_DEF_SEL(argumentIndexStride,\n    \"argumentIndexStride\");\n_MTL_PRIVATE_DEF_SEL(arguments,\n    \"arguments\");\n_MTL_PRIVATE_DEF_SEL(arrayLength,\n    \"arrayLength\");\n_MTL_PRIVATE_DEF_SEL(arrayType,\n    \"arrayType\");\n_MTL_PRIVATE_DEF_SEL(attributeIndex,\n    \"attributeIndex\");\n_MTL_PRIVATE_DEF_SEL(attributeType,\n    \"attributeType\");\n_MTL_PRIVATE_DEF_SEL(attributes,\n    \"attributes\");\n_MTL_PRIVATE_DEF_SEL(backFaceStencil,\n    \"backFaceStencil\");\n_MTL_PRIVATE_DEF_SEL(binaryArchives,\n    \"binaryArchives\");\n_MTL_PRIVATE_DEF_SEL(binaryFunctions,\n    \"binaryFunctions\");\n_MTL_PRIVATE_DEF_SEL(bindings,\n    \"bindings\");\n_MTL_PRIVATE_DEF_SEL(blitCommandEncoder,\n    \"blitCommandEncoder\");\n_MTL_PRIVATE_DEF_SEL(blitCommandEncoderWithDescriptor_,\n    \"blitCommandEncoderWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(blitPassDescriptor,\n    \"blitPassDescriptor\");\n_MTL_PRIVATE_DEF_SEL(borderColor,\n    \"borderColor\");\n_MTL_PRIVATE_DEF_SEL(boundingBoxBuffer,\n    \"boundingBoxBuffer\");\n_MTL_PRIVATE_DEF_SEL(boundingBoxBufferOffset,\n    \"boundingBoxBufferOffset\");\n_MTL_PRIVATE_DEF_SEL(boundingBoxBuffers,\n    \"boundingBoxBuffers\");\n_MTL_PRIVATE_DEF_SEL(boundingBoxCount,\n    \"boundingBoxCount\");\n_MTL_PRIVATE_DEF_SEL(boundingBoxStride,\n    \"boundingBoxStride\");\n_MTL_PRIVATE_DEF_SEL(buffer,\n    \"buffer\");\n_MTL_PRIVATE_DEF_SEL(bufferAlignment,\n    \"bufferAlignment\");\n_MTL_PRIVATE_DEF_SEL(bufferBytesPerRow,\n    \"bufferBytesPerRow\");\n_MTL_PRIVATE_DEF_SEL(bufferDataSize,\n    \"bufferDataSize\");\n_MTL_PRIVATE_DEF_SEL(bufferDataType,\n    \"bufferDataType\");\n_MTL_PRIVATE_DEF_SEL(bufferIndex,\n    \"bufferIndex\");\n_MTL_PRIVATE_DEF_SEL(bufferOffset,\n    \"bufferOffset\");\n_MTL_PRIVATE_DEF_SEL(bufferPointerType,\n    \"bufferPointerType\");\n_MTL_PRIVATE_DEF_SEL(bufferSize,\n    \"bufferSize\");\n_MTL_PRIVATE_DEF_SEL(bufferStructType,\n    \"bufferStructType\");\n_MTL_PRIVATE_DEF_SEL(buffers,\n    \"buffers\");\n_MTL_PRIVATE_DEF_SEL(buildAccelerationStructure_descriptor_scratchBuffer_scratchBufferOffset_,\n    \"buildAccelerationStructure:descriptor:scratchBuffer:scratchBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(captureObject,\n    \"captureObject\");\n_MTL_PRIVATE_DEF_SEL(clearBarrier,\n    \"clearBarrier\");\n_MTL_PRIVATE_DEF_SEL(clearColor,\n    \"clearColor\");\n_MTL_PRIVATE_DEF_SEL(clearDepth,\n    \"clearDepth\");\n_MTL_PRIVATE_DEF_SEL(clearStencil,\n    \"clearStencil\");\n_MTL_PRIVATE_DEF_SEL(colorAttachments,\n    \"colorAttachments\");\n_MTL_PRIVATE_DEF_SEL(column,\n    \"column\");\n_MTL_PRIVATE_DEF_SEL(commandBuffer,\n    \"commandBuffer\");\n_MTL_PRIVATE_DEF_SEL(commandBufferWithDescriptor_,\n    \"commandBufferWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(commandBufferWithUnretainedReferences,\n    \"commandBufferWithUnretainedReferences\");\n_MTL_PRIVATE_DEF_SEL(commandQueue,\n    \"commandQueue\");\n_MTL_PRIVATE_DEF_SEL(commandTypes,\n    \"commandTypes\");\n_MTL_PRIVATE_DEF_SEL(commit,\n    \"commit\");\n_MTL_PRIVATE_DEF_SEL(compareFunction,\n    \"compareFunction\");\n_MTL_PRIVATE_DEF_SEL(compileSymbolVisibility,\n    \"compileSymbolVisibility\");\n_MTL_PRIVATE_DEF_SEL(compressionType,\n    \"compressionType\");\n_MTL_PRIVATE_DEF_SEL(computeCommandEncoder,\n    \"computeCommandEncoder\");\n_MTL_PRIVATE_DEF_SEL(computeCommandEncoderWithDescriptor_,\n    \"computeCommandEncoderWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(computeCommandEncoderWithDispatchType_,\n    \"computeCommandEncoderWithDispatchType:\");\n_MTL_PRIVATE_DEF_SEL(computeFunction,\n    \"computeFunction\");\n_MTL_PRIVATE_DEF_SEL(computePassDescriptor,\n    \"computePassDescriptor\");\n_MTL_PRIVATE_DEF_SEL(concurrentDispatchThreadgroups_threadsPerThreadgroup_,\n    \"concurrentDispatchThreadgroups:threadsPerThreadgroup:\");\n_MTL_PRIVATE_DEF_SEL(concurrentDispatchThreads_threadsPerThreadgroup_,\n    \"concurrentDispatchThreads:threadsPerThreadgroup:\");\n_MTL_PRIVATE_DEF_SEL(constantBlockAlignment,\n    \"constantBlockAlignment\");\n_MTL_PRIVATE_DEF_SEL(constantDataAtIndex_,\n    \"constantDataAtIndex:\");\n_MTL_PRIVATE_DEF_SEL(constantValues,\n    \"constantValues\");\n_MTL_PRIVATE_DEF_SEL(containsAllocation_,\n    \"containsAllocation:\");\n_MTL_PRIVATE_DEF_SEL(contents,\n    \"contents\");\n_MTL_PRIVATE_DEF_SEL(controlDependencies,\n    \"controlDependencies\");\n_MTL_PRIVATE_DEF_SEL(controlPointBuffer,\n    \"controlPointBuffer\");\n_MTL_PRIVATE_DEF_SEL(controlPointBufferOffset,\n    \"controlPointBufferOffset\");\n_MTL_PRIVATE_DEF_SEL(controlPointBuffers,\n    \"controlPointBuffers\");\n_MTL_PRIVATE_DEF_SEL(controlPointCount,\n    \"controlPointCount\");\n_MTL_PRIVATE_DEF_SEL(controlPointFormat,\n    \"controlPointFormat\");\n_MTL_PRIVATE_DEF_SEL(controlPointStride,\n    \"controlPointStride\");\n_MTL_PRIVATE_DEF_SEL(convertSparsePixelRegions_toTileRegions_withTileSize_alignmentMode_numRegions_,\n    \"convertSparsePixelRegions:toTileRegions:withTileSize:alignmentMode:numRegions:\");\n_MTL_PRIVATE_DEF_SEL(convertSparseTileRegions_toPixelRegions_withTileSize_numRegions_,\n    \"convertSparseTileRegions:toPixelRegions:withTileSize:numRegions:\");\n_MTL_PRIVATE_DEF_SEL(copyAccelerationStructure_toAccelerationStructure_,\n    \"copyAccelerationStructure:toAccelerationStructure:\");\n_MTL_PRIVATE_DEF_SEL(copyAndCompactAccelerationStructure_toAccelerationStructure_,\n    \"copyAndCompactAccelerationStructure:toAccelerationStructure:\");\n_MTL_PRIVATE_DEF_SEL(copyFromBuffer_sourceOffset_sourceBytesPerRow_sourceBytesPerImage_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_,\n    \"copyFromBuffer:sourceOffset:sourceBytesPerRow:sourceBytesPerImage:sourceSize:toTexture:destinationSlice:destinationLevel:destinationOrigin:\");\n_MTL_PRIVATE_DEF_SEL(copyFromBuffer_sourceOffset_sourceBytesPerRow_sourceBytesPerImage_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_options_,\n    \"copyFromBuffer:sourceOffset:sourceBytesPerRow:sourceBytesPerImage:sourceSize:toTexture:destinationSlice:destinationLevel:destinationOrigin:options:\");\n_MTL_PRIVATE_DEF_SEL(copyFromBuffer_sourceOffset_toBuffer_destinationOffset_size_,\n    \"copyFromBuffer:sourceOffset:toBuffer:destinationOffset:size:\");\n_MTL_PRIVATE_DEF_SEL(copyFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toBuffer_destinationOffset_destinationBytesPerRow_destinationBytesPerImage_,\n    \"copyFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toBuffer:destinationOffset:destinationBytesPerRow:destinationBytesPerImage:\");\n_MTL_PRIVATE_DEF_SEL(copyFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toBuffer_destinationOffset_destinationBytesPerRow_destinationBytesPerImage_options_,\n    \"copyFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toBuffer:destinationOffset:destinationBytesPerRow:destinationBytesPerImage:options:\");\n_MTL_PRIVATE_DEF_SEL(copyFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_,\n    \"copyFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toTexture:destinationSlice:destinationLevel:destinationOrigin:\");\n_MTL_PRIVATE_DEF_SEL(copyFromTexture_sourceSlice_sourceLevel_toTexture_destinationSlice_destinationLevel_sliceCount_levelCount_,\n    \"copyFromTexture:sourceSlice:sourceLevel:toTexture:destinationSlice:destinationLevel:sliceCount:levelCount:\");\n_MTL_PRIVATE_DEF_SEL(copyFromTexture_toTexture_,\n    \"copyFromTexture:toTexture:\");\n_MTL_PRIVATE_DEF_SEL(copyIndirectCommandBuffer_sourceRange_destination_destinationIndex_,\n    \"copyIndirectCommandBuffer:sourceRange:destination:destinationIndex:\");\n_MTL_PRIVATE_DEF_SEL(copyParameterDataToBuffer_offset_,\n    \"copyParameterDataToBuffer:offset:\");\n_MTL_PRIVATE_DEF_SEL(copyStatusToBuffer_offset_,\n    \"copyStatusToBuffer:offset:\");\n_MTL_PRIVATE_DEF_SEL(counterSet,\n    \"counterSet\");\n_MTL_PRIVATE_DEF_SEL(counterSets,\n    \"counterSets\");\n_MTL_PRIVATE_DEF_SEL(counters,\n    \"counters\");\n_MTL_PRIVATE_DEF_SEL(cpuCacheMode,\n    \"cpuCacheMode\");\n_MTL_PRIVATE_DEF_SEL(currentAllocatedSize,\n    \"currentAllocatedSize\");\n_MTL_PRIVATE_DEF_SEL(curveBasis,\n    \"curveBasis\");\n_MTL_PRIVATE_DEF_SEL(curveEndCaps,\n    \"curveEndCaps\");\n_MTL_PRIVATE_DEF_SEL(curveType,\n    \"curveType\");\n_MTL_PRIVATE_DEF_SEL(data,\n    \"data\");\n_MTL_PRIVATE_DEF_SEL(dataSize,\n    \"dataSize\");\n_MTL_PRIVATE_DEF_SEL(dataType,\n    \"dataType\");\n_MTL_PRIVATE_DEF_SEL(dealloc,\n    \"dealloc\");\n_MTL_PRIVATE_DEF_SEL(debugLocation,\n    \"debugLocation\");\n_MTL_PRIVATE_DEF_SEL(debugSignposts,\n    \"debugSignposts\");\n_MTL_PRIVATE_DEF_SEL(defaultCaptureScope,\n    \"defaultCaptureScope\");\n_MTL_PRIVATE_DEF_SEL(defaultRasterSampleCount,\n    \"defaultRasterSampleCount\");\n_MTL_PRIVATE_DEF_SEL(depth,\n    \"depth\");\n_MTL_PRIVATE_DEF_SEL(depthAttachment,\n    \"depthAttachment\");\n_MTL_PRIVATE_DEF_SEL(depthAttachmentPixelFormat,\n    \"depthAttachmentPixelFormat\");\n_MTL_PRIVATE_DEF_SEL(depthCompareFunction,\n    \"depthCompareFunction\");\n_MTL_PRIVATE_DEF_SEL(depthFailureOperation,\n    \"depthFailureOperation\");\n_MTL_PRIVATE_DEF_SEL(depthPlane,\n    \"depthPlane\");\n_MTL_PRIVATE_DEF_SEL(depthResolveFilter,\n    \"depthResolveFilter\");\n_MTL_PRIVATE_DEF_SEL(depthStencilPassOperation,\n    \"depthStencilPassOperation\");\n_MTL_PRIVATE_DEF_SEL(descriptor,\n    \"descriptor\");\n_MTL_PRIVATE_DEF_SEL(destination,\n    \"destination\");\n_MTL_PRIVATE_DEF_SEL(destinationAlphaBlendFactor,\n    \"destinationAlphaBlendFactor\");\n_MTL_PRIVATE_DEF_SEL(destinationRGBBlendFactor,\n    \"destinationRGBBlendFactor\");\n_MTL_PRIVATE_DEF_SEL(device,\n    \"device\");\n_MTL_PRIVATE_DEF_SEL(didModifyRange_,\n    \"didModifyRange:\");\n_MTL_PRIVATE_DEF_SEL(dispatchQueue,\n    \"dispatchQueue\");\n_MTL_PRIVATE_DEF_SEL(dispatchThreadgroups_threadsPerThreadgroup_,\n    \"dispatchThreadgroups:threadsPerThreadgroup:\");\n_MTL_PRIVATE_DEF_SEL(dispatchThreadgroupsWithIndirectBuffer_indirectBufferOffset_threadsPerThreadgroup_,\n    \"dispatchThreadgroupsWithIndirectBuffer:indirectBufferOffset:threadsPerThreadgroup:\");\n_MTL_PRIVATE_DEF_SEL(dispatchThreads_threadsPerThreadgroup_,\n    \"dispatchThreads:threadsPerThreadgroup:\");\n_MTL_PRIVATE_DEF_SEL(dispatchThreadsPerTile_,\n    \"dispatchThreadsPerTile:\");\n_MTL_PRIVATE_DEF_SEL(dispatchType,\n    \"dispatchType\");\n_MTL_PRIVATE_DEF_SEL(drawIndexedPatches_patchIndexBuffer_patchIndexBufferOffset_controlPointIndexBuffer_controlPointIndexBufferOffset_indirectBuffer_indirectBufferOffset_,\n    \"drawIndexedPatches:patchIndexBuffer:patchIndexBufferOffset:controlPointIndexBuffer:controlPointIndexBufferOffset:indirectBuffer:indirectBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(drawIndexedPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_controlPointIndexBuffer_controlPointIndexBufferOffset_instanceCount_baseInstance_,\n    \"drawIndexedPatches:patchStart:patchCount:patchIndexBuffer:patchIndexBufferOffset:controlPointIndexBuffer:controlPointIndexBufferOffset:instanceCount:baseInstance:\");\n_MTL_PRIVATE_DEF_SEL(drawIndexedPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_controlPointIndexBuffer_controlPointIndexBufferOffset_instanceCount_baseInstance_tessellationFactorBuffer_tessellationFactorBufferOffset_tessellationFactorBufferInstanceStride_,\n    \"drawIndexedPatches:patchStart:patchCount:patchIndexBuffer:patchIndexBufferOffset:controlPointIndexBuffer:controlPointIndexBufferOffset:instanceCount:baseInstance:tessellationFactorBuffer:tessellationFactorBufferOffset:tessellationFactorBufferInstanceStride:\");\n_MTL_PRIVATE_DEF_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_,\n    \"drawIndexedPrimitives:indexCount:indexType:indexBuffer:indexBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_instanceCount_,\n    \"drawIndexedPrimitives:indexCount:indexType:indexBuffer:indexBufferOffset:instanceCount:\");\n_MTL_PRIVATE_DEF_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_instanceCount_baseVertex_baseInstance_,\n    \"drawIndexedPrimitives:indexCount:indexType:indexBuffer:indexBufferOffset:instanceCount:baseVertex:baseInstance:\");\n_MTL_PRIVATE_DEF_SEL(drawIndexedPrimitives_indexType_indexBuffer_indexBufferOffset_indirectBuffer_indirectBufferOffset_,\n    \"drawIndexedPrimitives:indexType:indexBuffer:indexBufferOffset:indirectBuffer:indirectBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(drawMeshThreadgroups_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_,\n    \"drawMeshThreadgroups:threadsPerObjectThreadgroup:threadsPerMeshThreadgroup:\");\n_MTL_PRIVATE_DEF_SEL(drawMeshThreadgroupsWithIndirectBuffer_indirectBufferOffset_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_,\n    \"drawMeshThreadgroupsWithIndirectBuffer:indirectBufferOffset:threadsPerObjectThreadgroup:threadsPerMeshThreadgroup:\");\n_MTL_PRIVATE_DEF_SEL(drawMeshThreads_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_,\n    \"drawMeshThreads:threadsPerObjectThreadgroup:threadsPerMeshThreadgroup:\");\n_MTL_PRIVATE_DEF_SEL(drawPatches_patchIndexBuffer_patchIndexBufferOffset_indirectBuffer_indirectBufferOffset_,\n    \"drawPatches:patchIndexBuffer:patchIndexBufferOffset:indirectBuffer:indirectBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(drawPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_instanceCount_baseInstance_,\n    \"drawPatches:patchStart:patchCount:patchIndexBuffer:patchIndexBufferOffset:instanceCount:baseInstance:\");\n_MTL_PRIVATE_DEF_SEL(drawPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_instanceCount_baseInstance_tessellationFactorBuffer_tessellationFactorBufferOffset_tessellationFactorBufferInstanceStride_,\n    \"drawPatches:patchStart:patchCount:patchIndexBuffer:patchIndexBufferOffset:instanceCount:baseInstance:tessellationFactorBuffer:tessellationFactorBufferOffset:tessellationFactorBufferInstanceStride:\");\n_MTL_PRIVATE_DEF_SEL(drawPrimitives_indirectBuffer_indirectBufferOffset_,\n    \"drawPrimitives:indirectBuffer:indirectBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(drawPrimitives_vertexStart_vertexCount_,\n    \"drawPrimitives:vertexStart:vertexCount:\");\n_MTL_PRIVATE_DEF_SEL(drawPrimitives_vertexStart_vertexCount_instanceCount_,\n    \"drawPrimitives:vertexStart:vertexCount:instanceCount:\");\n_MTL_PRIVATE_DEF_SEL(drawPrimitives_vertexStart_vertexCount_instanceCount_baseInstance_,\n    \"drawPrimitives:vertexStart:vertexCount:instanceCount:baseInstance:\");\n_MTL_PRIVATE_DEF_SEL(drawableID,\n    \"drawableID\");\n_MTL_PRIVATE_DEF_SEL(elementArrayType,\n    \"elementArrayType\");\n_MTL_PRIVATE_DEF_SEL(elementIsArgumentBuffer,\n    \"elementIsArgumentBuffer\");\n_MTL_PRIVATE_DEF_SEL(elementPointerType,\n    \"elementPointerType\");\n_MTL_PRIVATE_DEF_SEL(elementStructType,\n    \"elementStructType\");\n_MTL_PRIVATE_DEF_SEL(elementTextureReferenceType,\n    \"elementTextureReferenceType\");\n_MTL_PRIVATE_DEF_SEL(elementType,\n    \"elementType\");\n_MTL_PRIVATE_DEF_SEL(enableLogging,\n    \"enableLogging\");\n_MTL_PRIVATE_DEF_SEL(encodeSignalEvent_value_,\n    \"encodeSignalEvent:value:\");\n_MTL_PRIVATE_DEF_SEL(encodeWaitForEvent_value_,\n    \"encodeWaitForEvent:value:\");\n_MTL_PRIVATE_DEF_SEL(encodedLength,\n    \"encodedLength\");\n_MTL_PRIVATE_DEF_SEL(encoderLabel,\n    \"encoderLabel\");\n_MTL_PRIVATE_DEF_SEL(endEncoding,\n    \"endEncoding\");\n_MTL_PRIVATE_DEF_SEL(endOfEncoderSampleIndex,\n    \"endOfEncoderSampleIndex\");\n_MTL_PRIVATE_DEF_SEL(endOfFragmentSampleIndex,\n    \"endOfFragmentSampleIndex\");\n_MTL_PRIVATE_DEF_SEL(endOfVertexSampleIndex,\n    \"endOfVertexSampleIndex\");\n_MTL_PRIVATE_DEF_SEL(endResidency,\n    \"endResidency\");\n_MTL_PRIVATE_DEF_SEL(enqueue,\n    \"enqueue\");\n_MTL_PRIVATE_DEF_SEL(enqueueBarrier,\n    \"enqueueBarrier\");\n_MTL_PRIVATE_DEF_SEL(error,\n    \"error\");\n_MTL_PRIVATE_DEF_SEL(errorOptions,\n    \"errorOptions\");\n_MTL_PRIVATE_DEF_SEL(errorState,\n    \"errorState\");\n_MTL_PRIVATE_DEF_SEL(executeCommandsInBuffer_indirectBuffer_indirectBufferOffset_,\n    \"executeCommandsInBuffer:indirectBuffer:indirectBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(executeCommandsInBuffer_withRange_,\n    \"executeCommandsInBuffer:withRange:\");\n_MTL_PRIVATE_DEF_SEL(fastMathEnabled,\n    \"fastMathEnabled\");\n_MTL_PRIVATE_DEF_SEL(fillBuffer_range_value_,\n    \"fillBuffer:range:value:\");\n_MTL_PRIVATE_DEF_SEL(firstMipmapInTail,\n    \"firstMipmapInTail\");\n_MTL_PRIVATE_DEF_SEL(format,\n    \"format\");\n_MTL_PRIVATE_DEF_SEL(fragmentAdditionalBinaryFunctions,\n    \"fragmentAdditionalBinaryFunctions\");\n_MTL_PRIVATE_DEF_SEL(fragmentArguments,\n    \"fragmentArguments\");\n_MTL_PRIVATE_DEF_SEL(fragmentBindings,\n    \"fragmentBindings\");\n_MTL_PRIVATE_DEF_SEL(fragmentBuffers,\n    \"fragmentBuffers\");\n_MTL_PRIVATE_DEF_SEL(fragmentFunction,\n    \"fragmentFunction\");\n_MTL_PRIVATE_DEF_SEL(fragmentLinkedFunctions,\n    \"fragmentLinkedFunctions\");\n_MTL_PRIVATE_DEF_SEL(fragmentPreloadedLibraries,\n    \"fragmentPreloadedLibraries\");\n_MTL_PRIVATE_DEF_SEL(frontFaceStencil,\n    \"frontFaceStencil\");\n_MTL_PRIVATE_DEF_SEL(function,\n    \"function\");\n_MTL_PRIVATE_DEF_SEL(functionConstantsDictionary,\n    \"functionConstantsDictionary\");\n_MTL_PRIVATE_DEF_SEL(functionCount,\n    \"functionCount\");\n_MTL_PRIVATE_DEF_SEL(functionDescriptor,\n    \"functionDescriptor\");\n_MTL_PRIVATE_DEF_SEL(functionGraphs,\n    \"functionGraphs\");\n_MTL_PRIVATE_DEF_SEL(functionHandleWithFunction_,\n    \"functionHandleWithFunction:\");\n_MTL_PRIVATE_DEF_SEL(functionHandleWithFunction_stage_,\n    \"functionHandleWithFunction:stage:\");\n_MTL_PRIVATE_DEF_SEL(functionName,\n    \"functionName\");\n_MTL_PRIVATE_DEF_SEL(functionNames,\n    \"functionNames\");\n_MTL_PRIVATE_DEF_SEL(functionType,\n    \"functionType\");\n_MTL_PRIVATE_DEF_SEL(functions,\n    \"functions\");\n_MTL_PRIVATE_DEF_SEL(generateMipmapsForTexture_,\n    \"generateMipmapsForTexture:\");\n_MTL_PRIVATE_DEF_SEL(geometryDescriptors,\n    \"geometryDescriptors\");\n_MTL_PRIVATE_DEF_SEL(getBytes_bytesPerRow_bytesPerImage_fromRegion_mipmapLevel_slice_,\n    \"getBytes:bytesPerRow:bytesPerImage:fromRegion:mipmapLevel:slice:\");\n_MTL_PRIVATE_DEF_SEL(getBytes_bytesPerRow_fromRegion_mipmapLevel_,\n    \"getBytes:bytesPerRow:fromRegion:mipmapLevel:\");\n_MTL_PRIVATE_DEF_SEL(getDefaultSamplePositions_count_,\n    \"getDefaultSamplePositions:count:\");\n_MTL_PRIVATE_DEF_SEL(getSamplePositions_count_,\n    \"getSamplePositions:count:\");\n_MTL_PRIVATE_DEF_SEL(getTextureAccessCounters_region_mipLevel_slice_resetCounters_countersBuffer_countersBufferOffset_,\n    \"getTextureAccessCounters:region:mipLevel:slice:resetCounters:countersBuffer:countersBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(gpuAddress,\n    \"gpuAddress\");\n_MTL_PRIVATE_DEF_SEL(gpuResourceID,\n    \"gpuResourceID\");\n_MTL_PRIVATE_DEF_SEL(groups,\n    \"groups\");\n_MTL_PRIVATE_DEF_SEL(hasUnifiedMemory,\n    \"hasUnifiedMemory\");\n_MTL_PRIVATE_DEF_SEL(hazardTrackingMode,\n    \"hazardTrackingMode\");\n_MTL_PRIVATE_DEF_SEL(heap,\n    \"heap\");\n_MTL_PRIVATE_DEF_SEL(heapAccelerationStructureSizeAndAlignWithDescriptor_,\n    \"heapAccelerationStructureSizeAndAlignWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(heapAccelerationStructureSizeAndAlignWithSize_,\n    \"heapAccelerationStructureSizeAndAlignWithSize:\");\n_MTL_PRIVATE_DEF_SEL(heapBufferSizeAndAlignWithLength_options_,\n    \"heapBufferSizeAndAlignWithLength:options:\");\n_MTL_PRIVATE_DEF_SEL(heapOffset,\n    \"heapOffset\");\n_MTL_PRIVATE_DEF_SEL(heapTextureSizeAndAlignWithDescriptor_,\n    \"heapTextureSizeAndAlignWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(height,\n    \"height\");\n_MTL_PRIVATE_DEF_SEL(horizontal,\n    \"horizontal\");\n_MTL_PRIVATE_DEF_SEL(horizontalSampleStorage,\n    \"horizontalSampleStorage\");\n_MTL_PRIVATE_DEF_SEL(imageblockMemoryLengthForDimensions_,\n    \"imageblockMemoryLengthForDimensions:\");\n_MTL_PRIVATE_DEF_SEL(imageblockSampleLength,\n    \"imageblockSampleLength\");\n_MTL_PRIVATE_DEF_SEL(index,\n    \"index\");\n_MTL_PRIVATE_DEF_SEL(indexBuffer,\n    \"indexBuffer\");\n_MTL_PRIVATE_DEF_SEL(indexBufferIndex,\n    \"indexBufferIndex\");\n_MTL_PRIVATE_DEF_SEL(indexBufferOffset,\n    \"indexBufferOffset\");\n_MTL_PRIVATE_DEF_SEL(indexType,\n    \"indexType\");\n_MTL_PRIVATE_DEF_SEL(indirectComputeCommandAtIndex_,\n    \"indirectComputeCommandAtIndex:\");\n_MTL_PRIVATE_DEF_SEL(indirectRenderCommandAtIndex_,\n    \"indirectRenderCommandAtIndex:\");\n_MTL_PRIVATE_DEF_SEL(inheritBuffers,\n    \"inheritBuffers\");\n_MTL_PRIVATE_DEF_SEL(inheritPipelineState,\n    \"inheritPipelineState\");\n_MTL_PRIVATE_DEF_SEL(init,\n    \"init\");\n_MTL_PRIVATE_DEF_SEL(initWithArgumentIndex_,\n    \"initWithArgumentIndex:\");\n_MTL_PRIVATE_DEF_SEL(initWithDispatchQueue_,\n    \"initWithDispatchQueue:\");\n_MTL_PRIVATE_DEF_SEL(initWithFunctionName_nodes_outputNode_attributes_,\n    \"initWithFunctionName:nodes:outputNode:attributes:\");\n_MTL_PRIVATE_DEF_SEL(initWithName_arguments_controlDependencies_,\n    \"initWithName:arguments:controlDependencies:\");\n_MTL_PRIVATE_DEF_SEL(initWithSampleCount_,\n    \"initWithSampleCount:\");\n_MTL_PRIVATE_DEF_SEL(initWithSampleCount_horizontal_vertical_,\n    \"initWithSampleCount:horizontal:vertical:\");\n_MTL_PRIVATE_DEF_SEL(initialCapacity,\n    \"initialCapacity\");\n_MTL_PRIVATE_DEF_SEL(inputPrimitiveTopology,\n    \"inputPrimitiveTopology\");\n_MTL_PRIVATE_DEF_SEL(insertDebugCaptureBoundary,\n    \"insertDebugCaptureBoundary\");\n_MTL_PRIVATE_DEF_SEL(insertDebugSignpost_,\n    \"insertDebugSignpost:\");\n_MTL_PRIVATE_DEF_SEL(insertLibraries,\n    \"insertLibraries\");\n_MTL_PRIVATE_DEF_SEL(installName,\n    \"installName\");\n_MTL_PRIVATE_DEF_SEL(instanceCount,\n    \"instanceCount\");\n_MTL_PRIVATE_DEF_SEL(instanceCountBuffer,\n    \"instanceCountBuffer\");\n_MTL_PRIVATE_DEF_SEL(instanceCountBufferOffset,\n    \"instanceCountBufferOffset\");\n_MTL_PRIVATE_DEF_SEL(instanceDescriptorBuffer,\n    \"instanceDescriptorBuffer\");\n_MTL_PRIVATE_DEF_SEL(instanceDescriptorBufferOffset,\n    \"instanceDescriptorBufferOffset\");\n_MTL_PRIVATE_DEF_SEL(instanceDescriptorStride,\n    \"instanceDescriptorStride\");\n_MTL_PRIVATE_DEF_SEL(instanceDescriptorType,\n    \"instanceDescriptorType\");\n_MTL_PRIVATE_DEF_SEL(instanceTransformationMatrixLayout,\n    \"instanceTransformationMatrixLayout\");\n_MTL_PRIVATE_DEF_SEL(instancedAccelerationStructures,\n    \"instancedAccelerationStructures\");\n_MTL_PRIVATE_DEF_SEL(intersectionFunctionTableDescriptor,\n    \"intersectionFunctionTableDescriptor\");\n_MTL_PRIVATE_DEF_SEL(intersectionFunctionTableOffset,\n    \"intersectionFunctionTableOffset\");\n_MTL_PRIVATE_DEF_SEL(iosurface,\n    \"iosurface\");\n_MTL_PRIVATE_DEF_SEL(iosurfacePlane,\n    \"iosurfacePlane\");\n_MTL_PRIVATE_DEF_SEL(isActive,\n    \"isActive\");\n_MTL_PRIVATE_DEF_SEL(isAliasable,\n    \"isAliasable\");\n_MTL_PRIVATE_DEF_SEL(isAlphaToCoverageEnabled,\n    \"isAlphaToCoverageEnabled\");\n_MTL_PRIVATE_DEF_SEL(isAlphaToOneEnabled,\n    \"isAlphaToOneEnabled\");\n_MTL_PRIVATE_DEF_SEL(isArgument,\n    \"isArgument\");\n_MTL_PRIVATE_DEF_SEL(isBlendingEnabled,\n    \"isBlendingEnabled\");\n_MTL_PRIVATE_DEF_SEL(isCapturing,\n    \"isCapturing\");\n_MTL_PRIVATE_DEF_SEL(isDepth24Stencil8PixelFormatSupported,\n    \"isDepth24Stencil8PixelFormatSupported\");\n_MTL_PRIVATE_DEF_SEL(isDepthTexture,\n    \"isDepthTexture\");\n_MTL_PRIVATE_DEF_SEL(isDepthWriteEnabled,\n    \"isDepthWriteEnabled\");\n_MTL_PRIVATE_DEF_SEL(isFramebufferOnly,\n    \"isFramebufferOnly\");\n_MTL_PRIVATE_DEF_SEL(isHeadless,\n    \"isHeadless\");\n_MTL_PRIVATE_DEF_SEL(isLowPower,\n    \"isLowPower\");\n_MTL_PRIVATE_DEF_SEL(isPatchControlPointData,\n    \"isPatchControlPointData\");\n_MTL_PRIVATE_DEF_SEL(isPatchData,\n    \"isPatchData\");\n_MTL_PRIVATE_DEF_SEL(isRasterizationEnabled,\n    \"isRasterizationEnabled\");\n_MTL_PRIVATE_DEF_SEL(isRemovable,\n    \"isRemovable\");\n_MTL_PRIVATE_DEF_SEL(isShareable,\n    \"isShareable\");\n_MTL_PRIVATE_DEF_SEL(isSparse,\n    \"isSparse\");\n_MTL_PRIVATE_DEF_SEL(isTessellationFactorScaleEnabled,\n    \"isTessellationFactorScaleEnabled\");\n_MTL_PRIVATE_DEF_SEL(isUsed,\n    \"isUsed\");\n_MTL_PRIVATE_DEF_SEL(kernelEndTime,\n    \"kernelEndTime\");\n_MTL_PRIVATE_DEF_SEL(kernelStartTime,\n    \"kernelStartTime\");\n_MTL_PRIVATE_DEF_SEL(label,\n    \"label\");\n_MTL_PRIVATE_DEF_SEL(languageVersion,\n    \"languageVersion\");\n_MTL_PRIVATE_DEF_SEL(layerAtIndex_,\n    \"layerAtIndex:\");\n_MTL_PRIVATE_DEF_SEL(layerCount,\n    \"layerCount\");\n_MTL_PRIVATE_DEF_SEL(layers,\n    \"layers\");\n_MTL_PRIVATE_DEF_SEL(layouts,\n    \"layouts\");\n_MTL_PRIVATE_DEF_SEL(length,\n    \"length\");\n_MTL_PRIVATE_DEF_SEL(level,\n    \"level\");\n_MTL_PRIVATE_DEF_SEL(libraries,\n    \"libraries\");\n_MTL_PRIVATE_DEF_SEL(libraryType,\n    \"libraryType\");\n_MTL_PRIVATE_DEF_SEL(line,\n    \"line\");\n_MTL_PRIVATE_DEF_SEL(linkedFunctions,\n    \"linkedFunctions\");\n_MTL_PRIVATE_DEF_SEL(loadAction,\n    \"loadAction\");\n_MTL_PRIVATE_DEF_SEL(loadBuffer_offset_size_sourceHandle_sourceHandleOffset_,\n    \"loadBuffer:offset:size:sourceHandle:sourceHandleOffset:\");\n_MTL_PRIVATE_DEF_SEL(loadBytes_size_sourceHandle_sourceHandleOffset_,\n    \"loadBytes:size:sourceHandle:sourceHandleOffset:\");\n_MTL_PRIVATE_DEF_SEL(loadTexture_slice_level_size_sourceBytesPerRow_sourceBytesPerImage_destinationOrigin_sourceHandle_sourceHandleOffset_,\n    \"loadTexture:slice:level:size:sourceBytesPerRow:sourceBytesPerImage:destinationOrigin:sourceHandle:sourceHandleOffset:\");\n_MTL_PRIVATE_DEF_SEL(location,\n    \"location\");\n_MTL_PRIVATE_DEF_SEL(locationNumber,\n    \"locationNumber\");\n_MTL_PRIVATE_DEF_SEL(lodAverage,\n    \"lodAverage\");\n_MTL_PRIVATE_DEF_SEL(lodMaxClamp,\n    \"lodMaxClamp\");\n_MTL_PRIVATE_DEF_SEL(lodMinClamp,\n    \"lodMinClamp\");\n_MTL_PRIVATE_DEF_SEL(logState,\n    \"logState\");\n_MTL_PRIVATE_DEF_SEL(logs,\n    \"logs\");\n_MTL_PRIVATE_DEF_SEL(magFilter,\n    \"magFilter\");\n_MTL_PRIVATE_DEF_SEL(makeAliasable,\n    \"makeAliasable\");\n_MTL_PRIVATE_DEF_SEL(mapPhysicalToScreenCoordinates_forLayer_,\n    \"mapPhysicalToScreenCoordinates:forLayer:\");\n_MTL_PRIVATE_DEF_SEL(mapScreenToPhysicalCoordinates_forLayer_,\n    \"mapScreenToPhysicalCoordinates:forLayer:\");\n_MTL_PRIVATE_DEF_SEL(mathFloatingPointFunctions,\n    \"mathFloatingPointFunctions\");\n_MTL_PRIVATE_DEF_SEL(mathMode,\n    \"mathMode\");\n_MTL_PRIVATE_DEF_SEL(maxAnisotropy,\n    \"maxAnisotropy\");\n_MTL_PRIVATE_DEF_SEL(maxArgumentBufferSamplerCount,\n    \"maxArgumentBufferSamplerCount\");\n_MTL_PRIVATE_DEF_SEL(maxAvailableSizeWithAlignment_,\n    \"maxAvailableSizeWithAlignment:\");\n_MTL_PRIVATE_DEF_SEL(maxBufferLength,\n    \"maxBufferLength\");\n_MTL_PRIVATE_DEF_SEL(maxCallStackDepth,\n    \"maxCallStackDepth\");\n_MTL_PRIVATE_DEF_SEL(maxCommandBufferCount,\n    \"maxCommandBufferCount\");\n_MTL_PRIVATE_DEF_SEL(maxCommandsInFlight,\n    \"maxCommandsInFlight\");\n_MTL_PRIVATE_DEF_SEL(maxFragmentBufferBindCount,\n    \"maxFragmentBufferBindCount\");\n_MTL_PRIVATE_DEF_SEL(maxFragmentCallStackDepth,\n    \"maxFragmentCallStackDepth\");\n_MTL_PRIVATE_DEF_SEL(maxInstanceCount,\n    \"maxInstanceCount\");\n_MTL_PRIVATE_DEF_SEL(maxKernelBufferBindCount,\n    \"maxKernelBufferBindCount\");\n_MTL_PRIVATE_DEF_SEL(maxKernelThreadgroupMemoryBindCount,\n    \"maxKernelThreadgroupMemoryBindCount\");\n_MTL_PRIVATE_DEF_SEL(maxMeshBufferBindCount,\n    \"maxMeshBufferBindCount\");\n_MTL_PRIVATE_DEF_SEL(maxMotionTransformCount,\n    \"maxMotionTransformCount\");\n_MTL_PRIVATE_DEF_SEL(maxObjectBufferBindCount,\n    \"maxObjectBufferBindCount\");\n_MTL_PRIVATE_DEF_SEL(maxObjectThreadgroupMemoryBindCount,\n    \"maxObjectThreadgroupMemoryBindCount\");\n_MTL_PRIVATE_DEF_SEL(maxSampleCount,\n    \"maxSampleCount\");\n_MTL_PRIVATE_DEF_SEL(maxTessellationFactor,\n    \"maxTessellationFactor\");\n_MTL_PRIVATE_DEF_SEL(maxThreadgroupMemoryLength,\n    \"maxThreadgroupMemoryLength\");\n_MTL_PRIVATE_DEF_SEL(maxThreadsPerThreadgroup,\n    \"maxThreadsPerThreadgroup\");\n_MTL_PRIVATE_DEF_SEL(maxTotalThreadgroupsPerMeshGrid,\n    \"maxTotalThreadgroupsPerMeshGrid\");\n_MTL_PRIVATE_DEF_SEL(maxTotalThreadsPerMeshThreadgroup,\n    \"maxTotalThreadsPerMeshThreadgroup\");\n_MTL_PRIVATE_DEF_SEL(maxTotalThreadsPerObjectThreadgroup,\n    \"maxTotalThreadsPerObjectThreadgroup\");\n_MTL_PRIVATE_DEF_SEL(maxTotalThreadsPerThreadgroup,\n    \"maxTotalThreadsPerThreadgroup\");\n_MTL_PRIVATE_DEF_SEL(maxTransferRate,\n    \"maxTransferRate\");\n_MTL_PRIVATE_DEF_SEL(maxVertexAmplificationCount,\n    \"maxVertexAmplificationCount\");\n_MTL_PRIVATE_DEF_SEL(maxVertexBufferBindCount,\n    \"maxVertexBufferBindCount\");\n_MTL_PRIVATE_DEF_SEL(maxVertexCallStackDepth,\n    \"maxVertexCallStackDepth\");\n_MTL_PRIVATE_DEF_SEL(maximumConcurrentCompilationTaskCount,\n    \"maximumConcurrentCompilationTaskCount\");\n_MTL_PRIVATE_DEF_SEL(memberByName_,\n    \"memberByName:\");\n_MTL_PRIVATE_DEF_SEL(members,\n    \"members\");\n_MTL_PRIVATE_DEF_SEL(memoryBarrierWithResources_count_,\n    \"memoryBarrierWithResources:count:\");\n_MTL_PRIVATE_DEF_SEL(memoryBarrierWithResources_count_afterStages_beforeStages_,\n    \"memoryBarrierWithResources:count:afterStages:beforeStages:\");\n_MTL_PRIVATE_DEF_SEL(memoryBarrierWithScope_,\n    \"memoryBarrierWithScope:\");\n_MTL_PRIVATE_DEF_SEL(memoryBarrierWithScope_afterStages_beforeStages_,\n    \"memoryBarrierWithScope:afterStages:beforeStages:\");\n_MTL_PRIVATE_DEF_SEL(meshBindings,\n    \"meshBindings\");\n_MTL_PRIVATE_DEF_SEL(meshBuffers,\n    \"meshBuffers\");\n_MTL_PRIVATE_DEF_SEL(meshFunction,\n    \"meshFunction\");\n_MTL_PRIVATE_DEF_SEL(meshLinkedFunctions,\n    \"meshLinkedFunctions\");\n_MTL_PRIVATE_DEF_SEL(meshThreadExecutionWidth,\n    \"meshThreadExecutionWidth\");\n_MTL_PRIVATE_DEF_SEL(meshThreadgroupSizeIsMultipleOfThreadExecutionWidth,\n    \"meshThreadgroupSizeIsMultipleOfThreadExecutionWidth\");\n_MTL_PRIVATE_DEF_SEL(minFilter,\n    \"minFilter\");\n_MTL_PRIVATE_DEF_SEL(minimumLinearTextureAlignmentForPixelFormat_,\n    \"minimumLinearTextureAlignmentForPixelFormat:\");\n_MTL_PRIVATE_DEF_SEL(minimumTextureBufferAlignmentForPixelFormat_,\n    \"minimumTextureBufferAlignmentForPixelFormat:\");\n_MTL_PRIVATE_DEF_SEL(mipFilter,\n    \"mipFilter\");\n_MTL_PRIVATE_DEF_SEL(mipmapLevelCount,\n    \"mipmapLevelCount\");\n_MTL_PRIVATE_DEF_SEL(motionEndBorderMode,\n    \"motionEndBorderMode\");\n_MTL_PRIVATE_DEF_SEL(motionEndTime,\n    \"motionEndTime\");\n_MTL_PRIVATE_DEF_SEL(motionKeyframeCount,\n    \"motionKeyframeCount\");\n_MTL_PRIVATE_DEF_SEL(motionStartBorderMode,\n    \"motionStartBorderMode\");\n_MTL_PRIVATE_DEF_SEL(motionStartTime,\n    \"motionStartTime\");\n_MTL_PRIVATE_DEF_SEL(motionTransformBuffer,\n    \"motionTransformBuffer\");\n_MTL_PRIVATE_DEF_SEL(motionTransformBufferOffset,\n    \"motionTransformBufferOffset\");\n_MTL_PRIVATE_DEF_SEL(motionTransformCount,\n    \"motionTransformCount\");\n_MTL_PRIVATE_DEF_SEL(motionTransformCountBuffer,\n    \"motionTransformCountBuffer\");\n_MTL_PRIVATE_DEF_SEL(motionTransformCountBufferOffset,\n    \"motionTransformCountBufferOffset\");\n_MTL_PRIVATE_DEF_SEL(motionTransformStride,\n    \"motionTransformStride\");\n_MTL_PRIVATE_DEF_SEL(motionTransformType,\n    \"motionTransformType\");\n_MTL_PRIVATE_DEF_SEL(moveTextureMappingsFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_,\n    \"moveTextureMappingsFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toTexture:destinationSlice:destinationLevel:destinationOrigin:\");\n_MTL_PRIVATE_DEF_SEL(mutability,\n    \"mutability\");\n_MTL_PRIVATE_DEF_SEL(name,\n    \"name\");\n_MTL_PRIVATE_DEF_SEL(newAccelerationStructureWithDescriptor_,\n    \"newAccelerationStructureWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(newAccelerationStructureWithDescriptor_offset_,\n    \"newAccelerationStructureWithDescriptor:offset:\");\n_MTL_PRIVATE_DEF_SEL(newAccelerationStructureWithSize_,\n    \"newAccelerationStructureWithSize:\");\n_MTL_PRIVATE_DEF_SEL(newAccelerationStructureWithSize_offset_,\n    \"newAccelerationStructureWithSize:offset:\");\n_MTL_PRIVATE_DEF_SEL(newArgumentEncoderForBufferAtIndex_,\n    \"newArgumentEncoderForBufferAtIndex:\");\n_MTL_PRIVATE_DEF_SEL(newArgumentEncoderWithArguments_,\n    \"newArgumentEncoderWithArguments:\");\n_MTL_PRIVATE_DEF_SEL(newArgumentEncoderWithBufferBinding_,\n    \"newArgumentEncoderWithBufferBinding:\");\n_MTL_PRIVATE_DEF_SEL(newArgumentEncoderWithBufferIndex_,\n    \"newArgumentEncoderWithBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(newArgumentEncoderWithBufferIndex_reflection_,\n    \"newArgumentEncoderWithBufferIndex:reflection:\");\n_MTL_PRIVATE_DEF_SEL(newBinaryArchiveWithDescriptor_error_,\n    \"newBinaryArchiveWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(newBufferWithBytes_length_options_,\n    \"newBufferWithBytes:length:options:\");\n_MTL_PRIVATE_DEF_SEL(newBufferWithBytesNoCopy_length_options_deallocator_,\n    \"newBufferWithBytesNoCopy:length:options:deallocator:\");\n_MTL_PRIVATE_DEF_SEL(newBufferWithLength_options_,\n    \"newBufferWithLength:options:\");\n_MTL_PRIVATE_DEF_SEL(newBufferWithLength_options_offset_,\n    \"newBufferWithLength:options:offset:\");\n_MTL_PRIVATE_DEF_SEL(newCaptureScopeWithCommandQueue_,\n    \"newCaptureScopeWithCommandQueue:\");\n_MTL_PRIVATE_DEF_SEL(newCaptureScopeWithDevice_,\n    \"newCaptureScopeWithDevice:\");\n_MTL_PRIVATE_DEF_SEL(newCommandQueue,\n    \"newCommandQueue\");\n_MTL_PRIVATE_DEF_SEL(newCommandQueueWithDescriptor_,\n    \"newCommandQueueWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(newCommandQueueWithMaxCommandBufferCount_,\n    \"newCommandQueueWithMaxCommandBufferCount:\");\n_MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithAdditionalBinaryFunctions_error_,\n    \"newComputePipelineStateWithAdditionalBinaryFunctions:error:\");\n_MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithDescriptor_options_completionHandler_,\n    \"newComputePipelineStateWithDescriptor:options:completionHandler:\");\n_MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithDescriptor_options_reflection_error_,\n    \"newComputePipelineStateWithDescriptor:options:reflection:error:\");\n_MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithFunction_completionHandler_,\n    \"newComputePipelineStateWithFunction:completionHandler:\");\n_MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithFunction_error_,\n    \"newComputePipelineStateWithFunction:error:\");\n_MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithFunction_options_completionHandler_,\n    \"newComputePipelineStateWithFunction:options:completionHandler:\");\n_MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithFunction_options_reflection_error_,\n    \"newComputePipelineStateWithFunction:options:reflection:error:\");\n_MTL_PRIVATE_DEF_SEL(newCounterSampleBufferWithDescriptor_error_,\n    \"newCounterSampleBufferWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(newDefaultLibrary,\n    \"newDefaultLibrary\");\n_MTL_PRIVATE_DEF_SEL(newDefaultLibraryWithBundle_error_,\n    \"newDefaultLibraryWithBundle:error:\");\n_MTL_PRIVATE_DEF_SEL(newDepthStencilStateWithDescriptor_,\n    \"newDepthStencilStateWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(newDynamicLibrary_error_,\n    \"newDynamicLibrary:error:\");\n_MTL_PRIVATE_DEF_SEL(newDynamicLibraryWithURL_error_,\n    \"newDynamicLibraryWithURL:error:\");\n_MTL_PRIVATE_DEF_SEL(newEvent,\n    \"newEvent\");\n_MTL_PRIVATE_DEF_SEL(newFence,\n    \"newFence\");\n_MTL_PRIVATE_DEF_SEL(newFunctionWithDescriptor_completionHandler_,\n    \"newFunctionWithDescriptor:completionHandler:\");\n_MTL_PRIVATE_DEF_SEL(newFunctionWithDescriptor_error_,\n    \"newFunctionWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(newFunctionWithName_,\n    \"newFunctionWithName:\");\n_MTL_PRIVATE_DEF_SEL(newFunctionWithName_constantValues_completionHandler_,\n    \"newFunctionWithName:constantValues:completionHandler:\");\n_MTL_PRIVATE_DEF_SEL(newFunctionWithName_constantValues_error_,\n    \"newFunctionWithName:constantValues:error:\");\n_MTL_PRIVATE_DEF_SEL(newHeapWithDescriptor_,\n    \"newHeapWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(newIOCommandQueueWithDescriptor_error_,\n    \"newIOCommandQueueWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(newIOFileHandleWithURL_compressionMethod_error_,\n    \"newIOFileHandleWithURL:compressionMethod:error:\");\n_MTL_PRIVATE_DEF_SEL(newIOFileHandleWithURL_error_,\n    \"newIOFileHandleWithURL:error:\");\n_MTL_PRIVATE_DEF_SEL(newIOHandleWithURL_compressionMethod_error_,\n    \"newIOHandleWithURL:compressionMethod:error:\");\n_MTL_PRIVATE_DEF_SEL(newIOHandleWithURL_error_,\n    \"newIOHandleWithURL:error:\");\n_MTL_PRIVATE_DEF_SEL(newIndirectCommandBufferWithDescriptor_maxCommandCount_options_,\n    \"newIndirectCommandBufferWithDescriptor:maxCommandCount:options:\");\n_MTL_PRIVATE_DEF_SEL(newIntersectionFunctionTableWithDescriptor_,\n    \"newIntersectionFunctionTableWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(newIntersectionFunctionTableWithDescriptor_stage_,\n    \"newIntersectionFunctionTableWithDescriptor:stage:\");\n_MTL_PRIVATE_DEF_SEL(newIntersectionFunctionWithDescriptor_completionHandler_,\n    \"newIntersectionFunctionWithDescriptor:completionHandler:\");\n_MTL_PRIVATE_DEF_SEL(newIntersectionFunctionWithDescriptor_error_,\n    \"newIntersectionFunctionWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(newLibraryWithData_error_,\n    \"newLibraryWithData:error:\");\n_MTL_PRIVATE_DEF_SEL(newLibraryWithFile_error_,\n    \"newLibraryWithFile:error:\");\n_MTL_PRIVATE_DEF_SEL(newLibraryWithSource_options_completionHandler_,\n    \"newLibraryWithSource:options:completionHandler:\");\n_MTL_PRIVATE_DEF_SEL(newLibraryWithSource_options_error_,\n    \"newLibraryWithSource:options:error:\");\n_MTL_PRIVATE_DEF_SEL(newLibraryWithStitchedDescriptor_completionHandler_,\n    \"newLibraryWithStitchedDescriptor:completionHandler:\");\n_MTL_PRIVATE_DEF_SEL(newLibraryWithStitchedDescriptor_error_,\n    \"newLibraryWithStitchedDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(newLibraryWithURL_error_,\n    \"newLibraryWithURL:error:\");\n_MTL_PRIVATE_DEF_SEL(newLogStateWithDescriptor_error_,\n    \"newLogStateWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(newRasterizationRateMapWithDescriptor_,\n    \"newRasterizationRateMapWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(newRemoteBufferViewForDevice_,\n    \"newRemoteBufferViewForDevice:\");\n_MTL_PRIVATE_DEF_SEL(newRemoteTextureViewForDevice_,\n    \"newRemoteTextureViewForDevice:\");\n_MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithAdditionalBinaryFunctions_error_,\n    \"newRenderPipelineStateWithAdditionalBinaryFunctions:error:\");\n_MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithDescriptor_completionHandler_,\n    \"newRenderPipelineStateWithDescriptor:completionHandler:\");\n_MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithDescriptor_error_,\n    \"newRenderPipelineStateWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithDescriptor_options_completionHandler_,\n    \"newRenderPipelineStateWithDescriptor:options:completionHandler:\");\n_MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithDescriptor_options_reflection_error_,\n    \"newRenderPipelineStateWithDescriptor:options:reflection:error:\");\n_MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithMeshDescriptor_options_completionHandler_,\n    \"newRenderPipelineStateWithMeshDescriptor:options:completionHandler:\");\n_MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithMeshDescriptor_options_reflection_error_,\n    \"newRenderPipelineStateWithMeshDescriptor:options:reflection:error:\");\n_MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithTileDescriptor_options_completionHandler_,\n    \"newRenderPipelineStateWithTileDescriptor:options:completionHandler:\");\n_MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithTileDescriptor_options_reflection_error_,\n    \"newRenderPipelineStateWithTileDescriptor:options:reflection:error:\");\n_MTL_PRIVATE_DEF_SEL(newResidencySetWithDescriptor_error_,\n    \"newResidencySetWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(newSamplerStateWithDescriptor_,\n    \"newSamplerStateWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(newScratchBufferWithMinimumSize_,\n    \"newScratchBufferWithMinimumSize:\");\n_MTL_PRIVATE_DEF_SEL(newSharedEvent,\n    \"newSharedEvent\");\n_MTL_PRIVATE_DEF_SEL(newSharedEventHandle,\n    \"newSharedEventHandle\");\n_MTL_PRIVATE_DEF_SEL(newSharedEventWithHandle_,\n    \"newSharedEventWithHandle:\");\n_MTL_PRIVATE_DEF_SEL(newSharedTextureHandle,\n    \"newSharedTextureHandle\");\n_MTL_PRIVATE_DEF_SEL(newSharedTextureWithDescriptor_,\n    \"newSharedTextureWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(newSharedTextureWithHandle_,\n    \"newSharedTextureWithHandle:\");\n_MTL_PRIVATE_DEF_SEL(newTextureViewWithPixelFormat_,\n    \"newTextureViewWithPixelFormat:\");\n_MTL_PRIVATE_DEF_SEL(newTextureViewWithPixelFormat_textureType_levels_slices_,\n    \"newTextureViewWithPixelFormat:textureType:levels:slices:\");\n_MTL_PRIVATE_DEF_SEL(newTextureViewWithPixelFormat_textureType_levels_slices_swizzle_,\n    \"newTextureViewWithPixelFormat:textureType:levels:slices:swizzle:\");\n_MTL_PRIVATE_DEF_SEL(newTextureWithDescriptor_,\n    \"newTextureWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(newTextureWithDescriptor_iosurface_plane_,\n    \"newTextureWithDescriptor:iosurface:plane:\");\n_MTL_PRIVATE_DEF_SEL(newTextureWithDescriptor_offset_,\n    \"newTextureWithDescriptor:offset:\");\n_MTL_PRIVATE_DEF_SEL(newTextureWithDescriptor_offset_bytesPerRow_,\n    \"newTextureWithDescriptor:offset:bytesPerRow:\");\n_MTL_PRIVATE_DEF_SEL(newVisibleFunctionTableWithDescriptor_,\n    \"newVisibleFunctionTableWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(newVisibleFunctionTableWithDescriptor_stage_,\n    \"newVisibleFunctionTableWithDescriptor:stage:\");\n_MTL_PRIVATE_DEF_SEL(nodes,\n    \"nodes\");\n_MTL_PRIVATE_DEF_SEL(normalizedCoordinates,\n    \"normalizedCoordinates\");\n_MTL_PRIVATE_DEF_SEL(notifyListener_atValue_block_,\n    \"notifyListener:atValue:block:\");\n_MTL_PRIVATE_DEF_SEL(objectAtIndexedSubscript_,\n    \"objectAtIndexedSubscript:\");\n_MTL_PRIVATE_DEF_SEL(objectBindings,\n    \"objectBindings\");\n_MTL_PRIVATE_DEF_SEL(objectBuffers,\n    \"objectBuffers\");\n_MTL_PRIVATE_DEF_SEL(objectFunction,\n    \"objectFunction\");\n_MTL_PRIVATE_DEF_SEL(objectLinkedFunctions,\n    \"objectLinkedFunctions\");\n_MTL_PRIVATE_DEF_SEL(objectPayloadAlignment,\n    \"objectPayloadAlignment\");\n_MTL_PRIVATE_DEF_SEL(objectPayloadDataSize,\n    \"objectPayloadDataSize\");\n_MTL_PRIVATE_DEF_SEL(objectThreadExecutionWidth,\n    \"objectThreadExecutionWidth\");\n_MTL_PRIVATE_DEF_SEL(objectThreadgroupSizeIsMultipleOfThreadExecutionWidth,\n    \"objectThreadgroupSizeIsMultipleOfThreadExecutionWidth\");\n_MTL_PRIVATE_DEF_SEL(offset,\n    \"offset\");\n_MTL_PRIVATE_DEF_SEL(opaque,\n    \"opaque\");\n_MTL_PRIVATE_DEF_SEL(optimizationLevel,\n    \"optimizationLevel\");\n_MTL_PRIVATE_DEF_SEL(optimizeContentsForCPUAccess_,\n    \"optimizeContentsForCPUAccess:\");\n_MTL_PRIVATE_DEF_SEL(optimizeContentsForCPUAccess_slice_level_,\n    \"optimizeContentsForCPUAccess:slice:level:\");\n_MTL_PRIVATE_DEF_SEL(optimizeContentsForGPUAccess_,\n    \"optimizeContentsForGPUAccess:\");\n_MTL_PRIVATE_DEF_SEL(optimizeContentsForGPUAccess_slice_level_,\n    \"optimizeContentsForGPUAccess:slice:level:\");\n_MTL_PRIVATE_DEF_SEL(optimizeIndirectCommandBuffer_withRange_,\n    \"optimizeIndirectCommandBuffer:withRange:\");\n_MTL_PRIVATE_DEF_SEL(options,\n    \"options\");\n_MTL_PRIVATE_DEF_SEL(outputNode,\n    \"outputNode\");\n_MTL_PRIVATE_DEF_SEL(outputURL,\n    \"outputURL\");\n_MTL_PRIVATE_DEF_SEL(parallelRenderCommandEncoderWithDescriptor_,\n    \"parallelRenderCommandEncoderWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(parameterBufferSizeAndAlign,\n    \"parameterBufferSizeAndAlign\");\n_MTL_PRIVATE_DEF_SEL(parentRelativeLevel,\n    \"parentRelativeLevel\");\n_MTL_PRIVATE_DEF_SEL(parentRelativeSlice,\n    \"parentRelativeSlice\");\n_MTL_PRIVATE_DEF_SEL(parentTexture,\n    \"parentTexture\");\n_MTL_PRIVATE_DEF_SEL(patchControlPointCount,\n    \"patchControlPointCount\");\n_MTL_PRIVATE_DEF_SEL(patchType,\n    \"patchType\");\n_MTL_PRIVATE_DEF_SEL(payloadMemoryLength,\n    \"payloadMemoryLength\");\n_MTL_PRIVATE_DEF_SEL(peerCount,\n    \"peerCount\");\n_MTL_PRIVATE_DEF_SEL(peerGroupID,\n    \"peerGroupID\");\n_MTL_PRIVATE_DEF_SEL(peerIndex,\n    \"peerIndex\");\n_MTL_PRIVATE_DEF_SEL(physicalGranularity,\n    \"physicalGranularity\");\n_MTL_PRIVATE_DEF_SEL(physicalSizeForLayer_,\n    \"physicalSizeForLayer:\");\n_MTL_PRIVATE_DEF_SEL(pixelFormat,\n    \"pixelFormat\");\n_MTL_PRIVATE_DEF_SEL(pointerType,\n    \"pointerType\");\n_MTL_PRIVATE_DEF_SEL(popDebugGroup,\n    \"popDebugGroup\");\n_MTL_PRIVATE_DEF_SEL(preloadedLibraries,\n    \"preloadedLibraries\");\n_MTL_PRIVATE_DEF_SEL(preprocessorMacros,\n    \"preprocessorMacros\");\n_MTL_PRIVATE_DEF_SEL(present,\n    \"present\");\n_MTL_PRIVATE_DEF_SEL(presentAfterMinimumDuration_,\n    \"presentAfterMinimumDuration:\");\n_MTL_PRIVATE_DEF_SEL(presentAtTime_,\n    \"presentAtTime:\");\n_MTL_PRIVATE_DEF_SEL(presentDrawable_,\n    \"presentDrawable:\");\n_MTL_PRIVATE_DEF_SEL(presentDrawable_afterMinimumDuration_,\n    \"presentDrawable:afterMinimumDuration:\");\n_MTL_PRIVATE_DEF_SEL(presentDrawable_atTime_,\n    \"presentDrawable:atTime:\");\n_MTL_PRIVATE_DEF_SEL(presentedTime,\n    \"presentedTime\");\n_MTL_PRIVATE_DEF_SEL(preserveInvariance,\n    \"preserveInvariance\");\n_MTL_PRIVATE_DEF_SEL(primitiveDataBuffer,\n    \"primitiveDataBuffer\");\n_MTL_PRIVATE_DEF_SEL(primitiveDataBufferOffset,\n    \"primitiveDataBufferOffset\");\n_MTL_PRIVATE_DEF_SEL(primitiveDataElementSize,\n    \"primitiveDataElementSize\");\n_MTL_PRIVATE_DEF_SEL(primitiveDataStride,\n    \"primitiveDataStride\");\n_MTL_PRIVATE_DEF_SEL(priority,\n    \"priority\");\n_MTL_PRIVATE_DEF_SEL(privateFunctions,\n    \"privateFunctions\");\n_MTL_PRIVATE_DEF_SEL(pushDebugGroup_,\n    \"pushDebugGroup:\");\n_MTL_PRIVATE_DEF_SEL(rAddressMode,\n    \"rAddressMode\");\n_MTL_PRIVATE_DEF_SEL(radiusBuffer,\n    \"radiusBuffer\");\n_MTL_PRIVATE_DEF_SEL(radiusBufferOffset,\n    \"radiusBufferOffset\");\n_MTL_PRIVATE_DEF_SEL(radiusBuffers,\n    \"radiusBuffers\");\n_MTL_PRIVATE_DEF_SEL(radiusFormat,\n    \"radiusFormat\");\n_MTL_PRIVATE_DEF_SEL(radiusStride,\n    \"radiusStride\");\n_MTL_PRIVATE_DEF_SEL(rasterSampleCount,\n    \"rasterSampleCount\");\n_MTL_PRIVATE_DEF_SEL(rasterizationRateMap,\n    \"rasterizationRateMap\");\n_MTL_PRIVATE_DEF_SEL(rasterizationRateMapDescriptorWithScreenSize_,\n    \"rasterizationRateMapDescriptorWithScreenSize:\");\n_MTL_PRIVATE_DEF_SEL(rasterizationRateMapDescriptorWithScreenSize_layer_,\n    \"rasterizationRateMapDescriptorWithScreenSize:layer:\");\n_MTL_PRIVATE_DEF_SEL(rasterizationRateMapDescriptorWithScreenSize_layerCount_layers_,\n    \"rasterizationRateMapDescriptorWithScreenSize:layerCount:layers:\");\n_MTL_PRIVATE_DEF_SEL(readMask,\n    \"readMask\");\n_MTL_PRIVATE_DEF_SEL(readWriteTextureSupport,\n    \"readWriteTextureSupport\");\n_MTL_PRIVATE_DEF_SEL(recommendedMaxWorkingSetSize,\n    \"recommendedMaxWorkingSetSize\");\n_MTL_PRIVATE_DEF_SEL(refitAccelerationStructure_descriptor_destination_scratchBuffer_scratchBufferOffset_,\n    \"refitAccelerationStructure:descriptor:destination:scratchBuffer:scratchBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(refitAccelerationStructure_descriptor_destination_scratchBuffer_scratchBufferOffset_options_,\n    \"refitAccelerationStructure:descriptor:destination:scratchBuffer:scratchBufferOffset:options:\");\n_MTL_PRIVATE_DEF_SEL(registryID,\n    \"registryID\");\n_MTL_PRIVATE_DEF_SEL(remoteStorageBuffer,\n    \"remoteStorageBuffer\");\n_MTL_PRIVATE_DEF_SEL(remoteStorageTexture,\n    \"remoteStorageTexture\");\n_MTL_PRIVATE_DEF_SEL(removeAllAllocations,\n    \"removeAllAllocations\");\n_MTL_PRIVATE_DEF_SEL(removeAllDebugMarkers,\n    \"removeAllDebugMarkers\");\n_MTL_PRIVATE_DEF_SEL(removeAllocation_,\n    \"removeAllocation:\");\n_MTL_PRIVATE_DEF_SEL(removeAllocations_count_,\n    \"removeAllocations:count:\");\n_MTL_PRIVATE_DEF_SEL(removeResidencySet_,\n    \"removeResidencySet:\");\n_MTL_PRIVATE_DEF_SEL(removeResidencySets_count_,\n    \"removeResidencySets:count:\");\n_MTL_PRIVATE_DEF_SEL(renderCommandEncoder,\n    \"renderCommandEncoder\");\n_MTL_PRIVATE_DEF_SEL(renderCommandEncoderWithDescriptor_,\n    \"renderCommandEncoderWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(renderPassDescriptor,\n    \"renderPassDescriptor\");\n_MTL_PRIVATE_DEF_SEL(renderTargetArrayLength,\n    \"renderTargetArrayLength\");\n_MTL_PRIVATE_DEF_SEL(renderTargetHeight,\n    \"renderTargetHeight\");\n_MTL_PRIVATE_DEF_SEL(renderTargetWidth,\n    \"renderTargetWidth\");\n_MTL_PRIVATE_DEF_SEL(replaceRegion_mipmapLevel_slice_withBytes_bytesPerRow_bytesPerImage_,\n    \"replaceRegion:mipmapLevel:slice:withBytes:bytesPerRow:bytesPerImage:\");\n_MTL_PRIVATE_DEF_SEL(replaceRegion_mipmapLevel_withBytes_bytesPerRow_,\n    \"replaceRegion:mipmapLevel:withBytes:bytesPerRow:\");\n_MTL_PRIVATE_DEF_SEL(requestResidency,\n    \"requestResidency\");\n_MTL_PRIVATE_DEF_SEL(required,\n    \"required\");\n_MTL_PRIVATE_DEF_SEL(reset,\n    \"reset\");\n_MTL_PRIVATE_DEF_SEL(resetCommandsInBuffer_withRange_,\n    \"resetCommandsInBuffer:withRange:\");\n_MTL_PRIVATE_DEF_SEL(resetTextureAccessCounters_region_mipLevel_slice_,\n    \"resetTextureAccessCounters:region:mipLevel:slice:\");\n_MTL_PRIVATE_DEF_SEL(resetWithRange_,\n    \"resetWithRange:\");\n_MTL_PRIVATE_DEF_SEL(resolveCounterRange_,\n    \"resolveCounterRange:\");\n_MTL_PRIVATE_DEF_SEL(resolveCounters_inRange_destinationBuffer_destinationOffset_,\n    \"resolveCounters:inRange:destinationBuffer:destinationOffset:\");\n_MTL_PRIVATE_DEF_SEL(resolveDepthPlane,\n    \"resolveDepthPlane\");\n_MTL_PRIVATE_DEF_SEL(resolveLevel,\n    \"resolveLevel\");\n_MTL_PRIVATE_DEF_SEL(resolveSlice,\n    \"resolveSlice\");\n_MTL_PRIVATE_DEF_SEL(resolveTexture,\n    \"resolveTexture\");\n_MTL_PRIVATE_DEF_SEL(resourceOptions,\n    \"resourceOptions\");\n_MTL_PRIVATE_DEF_SEL(resourceStateCommandEncoder,\n    \"resourceStateCommandEncoder\");\n_MTL_PRIVATE_DEF_SEL(resourceStateCommandEncoderWithDescriptor_,\n    \"resourceStateCommandEncoderWithDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(resourceStatePassDescriptor,\n    \"resourceStatePassDescriptor\");\n_MTL_PRIVATE_DEF_SEL(retainedReferences,\n    \"retainedReferences\");\n_MTL_PRIVATE_DEF_SEL(rgbBlendOperation,\n    \"rgbBlendOperation\");\n_MTL_PRIVATE_DEF_SEL(rootResource,\n    \"rootResource\");\n_MTL_PRIVATE_DEF_SEL(sAddressMode,\n    \"sAddressMode\");\n_MTL_PRIVATE_DEF_SEL(sampleBuffer,\n    \"sampleBuffer\");\n_MTL_PRIVATE_DEF_SEL(sampleBufferAttachments,\n    \"sampleBufferAttachments\");\n_MTL_PRIVATE_DEF_SEL(sampleCount,\n    \"sampleCount\");\n_MTL_PRIVATE_DEF_SEL(sampleCountersInBuffer_atSampleIndex_withBarrier_,\n    \"sampleCountersInBuffer:atSampleIndex:withBarrier:\");\n_MTL_PRIVATE_DEF_SEL(sampleTimestamps_gpuTimestamp_,\n    \"sampleTimestamps:gpuTimestamp:\");\n_MTL_PRIVATE_DEF_SEL(scratchBufferAllocator,\n    \"scratchBufferAllocator\");\n_MTL_PRIVATE_DEF_SEL(screenSize,\n    \"screenSize\");\n_MTL_PRIVATE_DEF_SEL(segmentControlPointCount,\n    \"segmentControlPointCount\");\n_MTL_PRIVATE_DEF_SEL(segmentCount,\n    \"segmentCount\");\n_MTL_PRIVATE_DEF_SEL(serializeToURL_error_,\n    \"serializeToURL:error:\");\n_MTL_PRIVATE_DEF_SEL(setAccelerationStructure_atBufferIndex_,\n    \"setAccelerationStructure:atBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setAccelerationStructure_atIndex_,\n    \"setAccelerationStructure:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setAccess_,\n    \"setAccess:\");\n_MTL_PRIVATE_DEF_SEL(setAllowDuplicateIntersectionFunctionInvocation_,\n    \"setAllowDuplicateIntersectionFunctionInvocation:\");\n_MTL_PRIVATE_DEF_SEL(setAllowGPUOptimizedContents_,\n    \"setAllowGPUOptimizedContents:\");\n_MTL_PRIVATE_DEF_SEL(setAllowReferencingUndefinedSymbols_,\n    \"setAllowReferencingUndefinedSymbols:\");\n_MTL_PRIVATE_DEF_SEL(setAlphaBlendOperation_,\n    \"setAlphaBlendOperation:\");\n_MTL_PRIVATE_DEF_SEL(setAlphaToCoverageEnabled_,\n    \"setAlphaToCoverageEnabled:\");\n_MTL_PRIVATE_DEF_SEL(setAlphaToOneEnabled_,\n    \"setAlphaToOneEnabled:\");\n_MTL_PRIVATE_DEF_SEL(setArgumentBuffer_offset_,\n    \"setArgumentBuffer:offset:\");\n_MTL_PRIVATE_DEF_SEL(setArgumentBuffer_startOffset_arrayElement_,\n    \"setArgumentBuffer:startOffset:arrayElement:\");\n_MTL_PRIVATE_DEF_SEL(setArgumentIndex_,\n    \"setArgumentIndex:\");\n_MTL_PRIVATE_DEF_SEL(setArguments_,\n    \"setArguments:\");\n_MTL_PRIVATE_DEF_SEL(setArrayLength_,\n    \"setArrayLength:\");\n_MTL_PRIVATE_DEF_SEL(setAttributes_,\n    \"setAttributes:\");\n_MTL_PRIVATE_DEF_SEL(setBackFaceStencil_,\n    \"setBackFaceStencil:\");\n_MTL_PRIVATE_DEF_SEL(setBarrier,\n    \"setBarrier\");\n_MTL_PRIVATE_DEF_SEL(setBinaryArchives_,\n    \"setBinaryArchives:\");\n_MTL_PRIVATE_DEF_SEL(setBinaryFunctions_,\n    \"setBinaryFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setBlendColorRed_green_blue_alpha_,\n    \"setBlendColorRed:green:blue:alpha:\");\n_MTL_PRIVATE_DEF_SEL(setBlendingEnabled_,\n    \"setBlendingEnabled:\");\n_MTL_PRIVATE_DEF_SEL(setBorderColor_,\n    \"setBorderColor:\");\n_MTL_PRIVATE_DEF_SEL(setBoundingBoxBuffer_,\n    \"setBoundingBoxBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setBoundingBoxBufferOffset_,\n    \"setBoundingBoxBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(setBoundingBoxBuffers_,\n    \"setBoundingBoxBuffers:\");\n_MTL_PRIVATE_DEF_SEL(setBoundingBoxCount_,\n    \"setBoundingBoxCount:\");\n_MTL_PRIVATE_DEF_SEL(setBoundingBoxStride_,\n    \"setBoundingBoxStride:\");\n_MTL_PRIVATE_DEF_SEL(setBuffer_,\n    \"setBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setBuffer_offset_atIndex_,\n    \"setBuffer:offset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setBuffer_offset_attributeStride_atIndex_,\n    \"setBuffer:offset:attributeStride:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setBufferIndex_,\n    \"setBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setBufferOffset_atIndex_,\n    \"setBufferOffset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setBufferOffset_attributeStride_atIndex_,\n    \"setBufferOffset:attributeStride:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setBufferSize_,\n    \"setBufferSize:\");\n_MTL_PRIVATE_DEF_SEL(setBuffers_offsets_attributeStrides_withRange_,\n    \"setBuffers:offsets:attributeStrides:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setBuffers_offsets_withRange_,\n    \"setBuffers:offsets:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setBytes_length_atIndex_,\n    \"setBytes:length:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setBytes_length_attributeStride_atIndex_,\n    \"setBytes:length:attributeStride:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setCaptureObject_,\n    \"setCaptureObject:\");\n_MTL_PRIVATE_DEF_SEL(setClearColor_,\n    \"setClearColor:\");\n_MTL_PRIVATE_DEF_SEL(setClearDepth_,\n    \"setClearDepth:\");\n_MTL_PRIVATE_DEF_SEL(setClearStencil_,\n    \"setClearStencil:\");\n_MTL_PRIVATE_DEF_SEL(setColorStoreAction_atIndex_,\n    \"setColorStoreAction:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setColorStoreActionOptions_atIndex_,\n    \"setColorStoreActionOptions:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setCommandTypes_,\n    \"setCommandTypes:\");\n_MTL_PRIVATE_DEF_SEL(setCompareFunction_,\n    \"setCompareFunction:\");\n_MTL_PRIVATE_DEF_SEL(setCompileSymbolVisibility_,\n    \"setCompileSymbolVisibility:\");\n_MTL_PRIVATE_DEF_SEL(setCompressionType_,\n    \"setCompressionType:\");\n_MTL_PRIVATE_DEF_SEL(setComputeFunction_,\n    \"setComputeFunction:\");\n_MTL_PRIVATE_DEF_SEL(setComputePipelineState_,\n    \"setComputePipelineState:\");\n_MTL_PRIVATE_DEF_SEL(setComputePipelineState_atIndex_,\n    \"setComputePipelineState:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setComputePipelineStates_withRange_,\n    \"setComputePipelineStates:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setConstantBlockAlignment_,\n    \"setConstantBlockAlignment:\");\n_MTL_PRIVATE_DEF_SEL(setConstantValue_type_atIndex_,\n    \"setConstantValue:type:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setConstantValue_type_withName_,\n    \"setConstantValue:type:withName:\");\n_MTL_PRIVATE_DEF_SEL(setConstantValues_,\n    \"setConstantValues:\");\n_MTL_PRIVATE_DEF_SEL(setConstantValues_type_withRange_,\n    \"setConstantValues:type:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setControlDependencies_,\n    \"setControlDependencies:\");\n_MTL_PRIVATE_DEF_SEL(setControlPointBuffer_,\n    \"setControlPointBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setControlPointBufferOffset_,\n    \"setControlPointBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(setControlPointBuffers_,\n    \"setControlPointBuffers:\");\n_MTL_PRIVATE_DEF_SEL(setControlPointCount_,\n    \"setControlPointCount:\");\n_MTL_PRIVATE_DEF_SEL(setControlPointFormat_,\n    \"setControlPointFormat:\");\n_MTL_PRIVATE_DEF_SEL(setControlPointStride_,\n    \"setControlPointStride:\");\n_MTL_PRIVATE_DEF_SEL(setCounterSet_,\n    \"setCounterSet:\");\n_MTL_PRIVATE_DEF_SEL(setCpuCacheMode_,\n    \"setCpuCacheMode:\");\n_MTL_PRIVATE_DEF_SEL(setCullMode_,\n    \"setCullMode:\");\n_MTL_PRIVATE_DEF_SEL(setCurveBasis_,\n    \"setCurveBasis:\");\n_MTL_PRIVATE_DEF_SEL(setCurveEndCaps_,\n    \"setCurveEndCaps:\");\n_MTL_PRIVATE_DEF_SEL(setCurveType_,\n    \"setCurveType:\");\n_MTL_PRIVATE_DEF_SEL(setDataType_,\n    \"setDataType:\");\n_MTL_PRIVATE_DEF_SEL(setDefaultCaptureScope_,\n    \"setDefaultCaptureScope:\");\n_MTL_PRIVATE_DEF_SEL(setDefaultRasterSampleCount_,\n    \"setDefaultRasterSampleCount:\");\n_MTL_PRIVATE_DEF_SEL(setDepth_,\n    \"setDepth:\");\n_MTL_PRIVATE_DEF_SEL(setDepthAttachment_,\n    \"setDepthAttachment:\");\n_MTL_PRIVATE_DEF_SEL(setDepthAttachmentPixelFormat_,\n    \"setDepthAttachmentPixelFormat:\");\n_MTL_PRIVATE_DEF_SEL(setDepthBias_slopeScale_clamp_,\n    \"setDepthBias:slopeScale:clamp:\");\n_MTL_PRIVATE_DEF_SEL(setDepthClipMode_,\n    \"setDepthClipMode:\");\n_MTL_PRIVATE_DEF_SEL(setDepthCompareFunction_,\n    \"setDepthCompareFunction:\");\n_MTL_PRIVATE_DEF_SEL(setDepthFailureOperation_,\n    \"setDepthFailureOperation:\");\n_MTL_PRIVATE_DEF_SEL(setDepthPlane_,\n    \"setDepthPlane:\");\n_MTL_PRIVATE_DEF_SEL(setDepthResolveFilter_,\n    \"setDepthResolveFilter:\");\n_MTL_PRIVATE_DEF_SEL(setDepthStencilPassOperation_,\n    \"setDepthStencilPassOperation:\");\n_MTL_PRIVATE_DEF_SEL(setDepthStencilState_,\n    \"setDepthStencilState:\");\n_MTL_PRIVATE_DEF_SEL(setDepthStoreAction_,\n    \"setDepthStoreAction:\");\n_MTL_PRIVATE_DEF_SEL(setDepthStoreActionOptions_,\n    \"setDepthStoreActionOptions:\");\n_MTL_PRIVATE_DEF_SEL(setDepthWriteEnabled_,\n    \"setDepthWriteEnabled:\");\n_MTL_PRIVATE_DEF_SEL(setDestination_,\n    \"setDestination:\");\n_MTL_PRIVATE_DEF_SEL(setDestinationAlphaBlendFactor_,\n    \"setDestinationAlphaBlendFactor:\");\n_MTL_PRIVATE_DEF_SEL(setDestinationRGBBlendFactor_,\n    \"setDestinationRGBBlendFactor:\");\n_MTL_PRIVATE_DEF_SEL(setDispatchType_,\n    \"setDispatchType:\");\n_MTL_PRIVATE_DEF_SEL(setEnableLogging_,\n    \"setEnableLogging:\");\n_MTL_PRIVATE_DEF_SEL(setEndOfEncoderSampleIndex_,\n    \"setEndOfEncoderSampleIndex:\");\n_MTL_PRIVATE_DEF_SEL(setEndOfFragmentSampleIndex_,\n    \"setEndOfFragmentSampleIndex:\");\n_MTL_PRIVATE_DEF_SEL(setEndOfVertexSampleIndex_,\n    \"setEndOfVertexSampleIndex:\");\n_MTL_PRIVATE_DEF_SEL(setErrorOptions_,\n    \"setErrorOptions:\");\n_MTL_PRIVATE_DEF_SEL(setFastMathEnabled_,\n    \"setFastMathEnabled:\");\n_MTL_PRIVATE_DEF_SEL(setFormat_,\n    \"setFormat:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentAccelerationStructure_atBufferIndex_,\n    \"setFragmentAccelerationStructure:atBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentAdditionalBinaryFunctions_,\n    \"setFragmentAdditionalBinaryFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentBuffer_offset_atIndex_,\n    \"setFragmentBuffer:offset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentBufferOffset_atIndex_,\n    \"setFragmentBufferOffset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentBuffers_offsets_withRange_,\n    \"setFragmentBuffers:offsets:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentBytes_length_atIndex_,\n    \"setFragmentBytes:length:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentFunction_,\n    \"setFragmentFunction:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentIntersectionFunctionTable_atBufferIndex_,\n    \"setFragmentIntersectionFunctionTable:atBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentIntersectionFunctionTables_withBufferRange_,\n    \"setFragmentIntersectionFunctionTables:withBufferRange:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentLinkedFunctions_,\n    \"setFragmentLinkedFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentPreloadedLibraries_,\n    \"setFragmentPreloadedLibraries:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentSamplerState_atIndex_,\n    \"setFragmentSamplerState:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentSamplerState_lodMinClamp_lodMaxClamp_atIndex_,\n    \"setFragmentSamplerState:lodMinClamp:lodMaxClamp:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentSamplerStates_lodMinClamps_lodMaxClamps_withRange_,\n    \"setFragmentSamplerStates:lodMinClamps:lodMaxClamps:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentSamplerStates_withRange_,\n    \"setFragmentSamplerStates:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentTexture_atIndex_,\n    \"setFragmentTexture:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentTextures_withRange_,\n    \"setFragmentTextures:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentVisibleFunctionTable_atBufferIndex_,\n    \"setFragmentVisibleFunctionTable:atBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setFragmentVisibleFunctionTables_withBufferRange_,\n    \"setFragmentVisibleFunctionTables:withBufferRange:\");\n_MTL_PRIVATE_DEF_SEL(setFrontFaceStencil_,\n    \"setFrontFaceStencil:\");\n_MTL_PRIVATE_DEF_SEL(setFrontFacingWinding_,\n    \"setFrontFacingWinding:\");\n_MTL_PRIVATE_DEF_SEL(setFunction_atIndex_,\n    \"setFunction:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setFunctionCount_,\n    \"setFunctionCount:\");\n_MTL_PRIVATE_DEF_SEL(setFunctionGraphs_,\n    \"setFunctionGraphs:\");\n_MTL_PRIVATE_DEF_SEL(setFunctionName_,\n    \"setFunctionName:\");\n_MTL_PRIVATE_DEF_SEL(setFunctions_,\n    \"setFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setFunctions_withRange_,\n    \"setFunctions:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setGeometryDescriptors_,\n    \"setGeometryDescriptors:\");\n_MTL_PRIVATE_DEF_SEL(setGroups_,\n    \"setGroups:\");\n_MTL_PRIVATE_DEF_SEL(setHazardTrackingMode_,\n    \"setHazardTrackingMode:\");\n_MTL_PRIVATE_DEF_SEL(setHeight_,\n    \"setHeight:\");\n_MTL_PRIVATE_DEF_SEL(setImageblockSampleLength_,\n    \"setImageblockSampleLength:\");\n_MTL_PRIVATE_DEF_SEL(setImageblockWidth_height_,\n    \"setImageblockWidth:height:\");\n_MTL_PRIVATE_DEF_SEL(setIndex_,\n    \"setIndex:\");\n_MTL_PRIVATE_DEF_SEL(setIndexBuffer_,\n    \"setIndexBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setIndexBufferIndex_,\n    \"setIndexBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setIndexBufferOffset_,\n    \"setIndexBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(setIndexType_,\n    \"setIndexType:\");\n_MTL_PRIVATE_DEF_SEL(setIndirectCommandBuffer_atIndex_,\n    \"setIndirectCommandBuffer:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setIndirectCommandBuffers_withRange_,\n    \"setIndirectCommandBuffers:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setInheritBuffers_,\n    \"setInheritBuffers:\");\n_MTL_PRIVATE_DEF_SEL(setInheritPipelineState_,\n    \"setInheritPipelineState:\");\n_MTL_PRIVATE_DEF_SEL(setInitialCapacity_,\n    \"setInitialCapacity:\");\n_MTL_PRIVATE_DEF_SEL(setInputPrimitiveTopology_,\n    \"setInputPrimitiveTopology:\");\n_MTL_PRIVATE_DEF_SEL(setInsertLibraries_,\n    \"setInsertLibraries:\");\n_MTL_PRIVATE_DEF_SEL(setInstallName_,\n    \"setInstallName:\");\n_MTL_PRIVATE_DEF_SEL(setInstanceCount_,\n    \"setInstanceCount:\");\n_MTL_PRIVATE_DEF_SEL(setInstanceCountBuffer_,\n    \"setInstanceCountBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setInstanceCountBufferOffset_,\n    \"setInstanceCountBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(setInstanceDescriptorBuffer_,\n    \"setInstanceDescriptorBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setInstanceDescriptorBufferOffset_,\n    \"setInstanceDescriptorBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(setInstanceDescriptorStride_,\n    \"setInstanceDescriptorStride:\");\n_MTL_PRIVATE_DEF_SEL(setInstanceDescriptorType_,\n    \"setInstanceDescriptorType:\");\n_MTL_PRIVATE_DEF_SEL(setInstanceTransformationMatrixLayout_,\n    \"setInstanceTransformationMatrixLayout:\");\n_MTL_PRIVATE_DEF_SEL(setInstancedAccelerationStructures_,\n    \"setInstancedAccelerationStructures:\");\n_MTL_PRIVATE_DEF_SEL(setIntersectionFunctionTable_atBufferIndex_,\n    \"setIntersectionFunctionTable:atBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setIntersectionFunctionTable_atIndex_,\n    \"setIntersectionFunctionTable:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setIntersectionFunctionTableOffset_,\n    \"setIntersectionFunctionTableOffset:\");\n_MTL_PRIVATE_DEF_SEL(setIntersectionFunctionTables_withBufferRange_,\n    \"setIntersectionFunctionTables:withBufferRange:\");\n_MTL_PRIVATE_DEF_SEL(setIntersectionFunctionTables_withRange_,\n    \"setIntersectionFunctionTables:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setKernelBuffer_offset_atIndex_,\n    \"setKernelBuffer:offset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setKernelBuffer_offset_attributeStride_atIndex_,\n    \"setKernelBuffer:offset:attributeStride:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setLabel_,\n    \"setLabel:\");\n_MTL_PRIVATE_DEF_SEL(setLanguageVersion_,\n    \"setLanguageVersion:\");\n_MTL_PRIVATE_DEF_SEL(setLayer_atIndex_,\n    \"setLayer:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setLevel_,\n    \"setLevel:\");\n_MTL_PRIVATE_DEF_SEL(setLibraries_,\n    \"setLibraries:\");\n_MTL_PRIVATE_DEF_SEL(setLibraryType_,\n    \"setLibraryType:\");\n_MTL_PRIVATE_DEF_SEL(setLinkedFunctions_,\n    \"setLinkedFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setLoadAction_,\n    \"setLoadAction:\");\n_MTL_PRIVATE_DEF_SEL(setLodAverage_,\n    \"setLodAverage:\");\n_MTL_PRIVATE_DEF_SEL(setLodMaxClamp_,\n    \"setLodMaxClamp:\");\n_MTL_PRIVATE_DEF_SEL(setLodMinClamp_,\n    \"setLodMinClamp:\");\n_MTL_PRIVATE_DEF_SEL(setLogState_,\n    \"setLogState:\");\n_MTL_PRIVATE_DEF_SEL(setMagFilter_,\n    \"setMagFilter:\");\n_MTL_PRIVATE_DEF_SEL(setMathFloatingPointFunctions_,\n    \"setMathFloatingPointFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setMathMode_,\n    \"setMathMode:\");\n_MTL_PRIVATE_DEF_SEL(setMaxAnisotropy_,\n    \"setMaxAnisotropy:\");\n_MTL_PRIVATE_DEF_SEL(setMaxCallStackDepth_,\n    \"setMaxCallStackDepth:\");\n_MTL_PRIVATE_DEF_SEL(setMaxCommandBufferCount_,\n    \"setMaxCommandBufferCount:\");\n_MTL_PRIVATE_DEF_SEL(setMaxCommandsInFlight_,\n    \"setMaxCommandsInFlight:\");\n_MTL_PRIVATE_DEF_SEL(setMaxFragmentBufferBindCount_,\n    \"setMaxFragmentBufferBindCount:\");\n_MTL_PRIVATE_DEF_SEL(setMaxFragmentCallStackDepth_,\n    \"setMaxFragmentCallStackDepth:\");\n_MTL_PRIVATE_DEF_SEL(setMaxInstanceCount_,\n    \"setMaxInstanceCount:\");\n_MTL_PRIVATE_DEF_SEL(setMaxKernelBufferBindCount_,\n    \"setMaxKernelBufferBindCount:\");\n_MTL_PRIVATE_DEF_SEL(setMaxKernelThreadgroupMemoryBindCount_,\n    \"setMaxKernelThreadgroupMemoryBindCount:\");\n_MTL_PRIVATE_DEF_SEL(setMaxMeshBufferBindCount_,\n    \"setMaxMeshBufferBindCount:\");\n_MTL_PRIVATE_DEF_SEL(setMaxMotionTransformCount_,\n    \"setMaxMotionTransformCount:\");\n_MTL_PRIVATE_DEF_SEL(setMaxObjectBufferBindCount_,\n    \"setMaxObjectBufferBindCount:\");\n_MTL_PRIVATE_DEF_SEL(setMaxObjectThreadgroupMemoryBindCount_,\n    \"setMaxObjectThreadgroupMemoryBindCount:\");\n_MTL_PRIVATE_DEF_SEL(setMaxTessellationFactor_,\n    \"setMaxTessellationFactor:\");\n_MTL_PRIVATE_DEF_SEL(setMaxTotalThreadgroupsPerMeshGrid_,\n    \"setMaxTotalThreadgroupsPerMeshGrid:\");\n_MTL_PRIVATE_DEF_SEL(setMaxTotalThreadsPerMeshThreadgroup_,\n    \"setMaxTotalThreadsPerMeshThreadgroup:\");\n_MTL_PRIVATE_DEF_SEL(setMaxTotalThreadsPerObjectThreadgroup_,\n    \"setMaxTotalThreadsPerObjectThreadgroup:\");\n_MTL_PRIVATE_DEF_SEL(setMaxTotalThreadsPerThreadgroup_,\n    \"setMaxTotalThreadsPerThreadgroup:\");\n_MTL_PRIVATE_DEF_SEL(setMaxVertexAmplificationCount_,\n    \"setMaxVertexAmplificationCount:\");\n_MTL_PRIVATE_DEF_SEL(setMaxVertexBufferBindCount_,\n    \"setMaxVertexBufferBindCount:\");\n_MTL_PRIVATE_DEF_SEL(setMaxVertexCallStackDepth_,\n    \"setMaxVertexCallStackDepth:\");\n_MTL_PRIVATE_DEF_SEL(setMeshBuffer_offset_atIndex_,\n    \"setMeshBuffer:offset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setMeshBufferOffset_atIndex_,\n    \"setMeshBufferOffset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setMeshBuffers_offsets_withRange_,\n    \"setMeshBuffers:offsets:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setMeshBytes_length_atIndex_,\n    \"setMeshBytes:length:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setMeshFunction_,\n    \"setMeshFunction:\");\n_MTL_PRIVATE_DEF_SEL(setMeshLinkedFunctions_,\n    \"setMeshLinkedFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setMeshSamplerState_atIndex_,\n    \"setMeshSamplerState:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setMeshSamplerState_lodMinClamp_lodMaxClamp_atIndex_,\n    \"setMeshSamplerState:lodMinClamp:lodMaxClamp:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setMeshSamplerStates_lodMinClamps_lodMaxClamps_withRange_,\n    \"setMeshSamplerStates:lodMinClamps:lodMaxClamps:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setMeshSamplerStates_withRange_,\n    \"setMeshSamplerStates:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setMeshTexture_atIndex_,\n    \"setMeshTexture:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setMeshTextures_withRange_,\n    \"setMeshTextures:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setMeshThreadgroupSizeIsMultipleOfThreadExecutionWidth_,\n    \"setMeshThreadgroupSizeIsMultipleOfThreadExecutionWidth:\");\n_MTL_PRIVATE_DEF_SEL(setMinFilter_,\n    \"setMinFilter:\");\n_MTL_PRIVATE_DEF_SEL(setMipFilter_,\n    \"setMipFilter:\");\n_MTL_PRIVATE_DEF_SEL(setMipmapLevelCount_,\n    \"setMipmapLevelCount:\");\n_MTL_PRIVATE_DEF_SEL(setMotionEndBorderMode_,\n    \"setMotionEndBorderMode:\");\n_MTL_PRIVATE_DEF_SEL(setMotionEndTime_,\n    \"setMotionEndTime:\");\n_MTL_PRIVATE_DEF_SEL(setMotionKeyframeCount_,\n    \"setMotionKeyframeCount:\");\n_MTL_PRIVATE_DEF_SEL(setMotionStartBorderMode_,\n    \"setMotionStartBorderMode:\");\n_MTL_PRIVATE_DEF_SEL(setMotionStartTime_,\n    \"setMotionStartTime:\");\n_MTL_PRIVATE_DEF_SEL(setMotionTransformBuffer_,\n    \"setMotionTransformBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setMotionTransformBufferOffset_,\n    \"setMotionTransformBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(setMotionTransformCount_,\n    \"setMotionTransformCount:\");\n_MTL_PRIVATE_DEF_SEL(setMotionTransformCountBuffer_,\n    \"setMotionTransformCountBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setMotionTransformCountBufferOffset_,\n    \"setMotionTransformCountBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(setMotionTransformStride_,\n    \"setMotionTransformStride:\");\n_MTL_PRIVATE_DEF_SEL(setMotionTransformType_,\n    \"setMotionTransformType:\");\n_MTL_PRIVATE_DEF_SEL(setMutability_,\n    \"setMutability:\");\n_MTL_PRIVATE_DEF_SEL(setName_,\n    \"setName:\");\n_MTL_PRIVATE_DEF_SEL(setNodes_,\n    \"setNodes:\");\n_MTL_PRIVATE_DEF_SEL(setNormalizedCoordinates_,\n    \"setNormalizedCoordinates:\");\n_MTL_PRIVATE_DEF_SEL(setObject_atIndexedSubscript_,\n    \"setObject:atIndexedSubscript:\");\n_MTL_PRIVATE_DEF_SEL(setObjectBuffer_offset_atIndex_,\n    \"setObjectBuffer:offset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setObjectBufferOffset_atIndex_,\n    \"setObjectBufferOffset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setObjectBuffers_offsets_withRange_,\n    \"setObjectBuffers:offsets:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setObjectBytes_length_atIndex_,\n    \"setObjectBytes:length:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setObjectFunction_,\n    \"setObjectFunction:\");\n_MTL_PRIVATE_DEF_SEL(setObjectLinkedFunctions_,\n    \"setObjectLinkedFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setObjectSamplerState_atIndex_,\n    \"setObjectSamplerState:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setObjectSamplerState_lodMinClamp_lodMaxClamp_atIndex_,\n    \"setObjectSamplerState:lodMinClamp:lodMaxClamp:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setObjectSamplerStates_lodMinClamps_lodMaxClamps_withRange_,\n    \"setObjectSamplerStates:lodMinClamps:lodMaxClamps:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setObjectSamplerStates_withRange_,\n    \"setObjectSamplerStates:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setObjectTexture_atIndex_,\n    \"setObjectTexture:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setObjectTextures_withRange_,\n    \"setObjectTextures:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setObjectThreadgroupMemoryLength_atIndex_,\n    \"setObjectThreadgroupMemoryLength:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setObjectThreadgroupSizeIsMultipleOfThreadExecutionWidth_,\n    \"setObjectThreadgroupSizeIsMultipleOfThreadExecutionWidth:\");\n_MTL_PRIVATE_DEF_SEL(setOffset_,\n    \"setOffset:\");\n_MTL_PRIVATE_DEF_SEL(setOpaque_,\n    \"setOpaque:\");\n_MTL_PRIVATE_DEF_SEL(setOpaqueCurveIntersectionFunctionWithSignature_atIndex_,\n    \"setOpaqueCurveIntersectionFunctionWithSignature:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setOpaqueCurveIntersectionFunctionWithSignature_withRange_,\n    \"setOpaqueCurveIntersectionFunctionWithSignature:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setOpaqueTriangleIntersectionFunctionWithSignature_atIndex_,\n    \"setOpaqueTriangleIntersectionFunctionWithSignature:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setOpaqueTriangleIntersectionFunctionWithSignature_withRange_,\n    \"setOpaqueTriangleIntersectionFunctionWithSignature:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setOptimizationLevel_,\n    \"setOptimizationLevel:\");\n_MTL_PRIVATE_DEF_SEL(setOptions_,\n    \"setOptions:\");\n_MTL_PRIVATE_DEF_SEL(setOutputNode_,\n    \"setOutputNode:\");\n_MTL_PRIVATE_DEF_SEL(setOutputURL_,\n    \"setOutputURL:\");\n_MTL_PRIVATE_DEF_SEL(setOwnerWithIdentity_,\n    \"setOwnerWithIdentity:\");\n_MTL_PRIVATE_DEF_SEL(setPayloadMemoryLength_,\n    \"setPayloadMemoryLength:\");\n_MTL_PRIVATE_DEF_SEL(setPixelFormat_,\n    \"setPixelFormat:\");\n_MTL_PRIVATE_DEF_SEL(setPreloadedLibraries_,\n    \"setPreloadedLibraries:\");\n_MTL_PRIVATE_DEF_SEL(setPreprocessorMacros_,\n    \"setPreprocessorMacros:\");\n_MTL_PRIVATE_DEF_SEL(setPreserveInvariance_,\n    \"setPreserveInvariance:\");\n_MTL_PRIVATE_DEF_SEL(setPrimitiveDataBuffer_,\n    \"setPrimitiveDataBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setPrimitiveDataBufferOffset_,\n    \"setPrimitiveDataBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(setPrimitiveDataElementSize_,\n    \"setPrimitiveDataElementSize:\");\n_MTL_PRIVATE_DEF_SEL(setPrimitiveDataStride_,\n    \"setPrimitiveDataStride:\");\n_MTL_PRIVATE_DEF_SEL(setPriority_,\n    \"setPriority:\");\n_MTL_PRIVATE_DEF_SEL(setPrivateFunctions_,\n    \"setPrivateFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setPurgeableState_,\n    \"setPurgeableState:\");\n_MTL_PRIVATE_DEF_SEL(setRAddressMode_,\n    \"setRAddressMode:\");\n_MTL_PRIVATE_DEF_SEL(setRadiusBuffer_,\n    \"setRadiusBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setRadiusBufferOffset_,\n    \"setRadiusBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(setRadiusBuffers_,\n    \"setRadiusBuffers:\");\n_MTL_PRIVATE_DEF_SEL(setRadiusFormat_,\n    \"setRadiusFormat:\");\n_MTL_PRIVATE_DEF_SEL(setRadiusStride_,\n    \"setRadiusStride:\");\n_MTL_PRIVATE_DEF_SEL(setRasterSampleCount_,\n    \"setRasterSampleCount:\");\n_MTL_PRIVATE_DEF_SEL(setRasterizationEnabled_,\n    \"setRasterizationEnabled:\");\n_MTL_PRIVATE_DEF_SEL(setRasterizationRateMap_,\n    \"setRasterizationRateMap:\");\n_MTL_PRIVATE_DEF_SEL(setReadMask_,\n    \"setReadMask:\");\n_MTL_PRIVATE_DEF_SEL(setRenderPipelineState_,\n    \"setRenderPipelineState:\");\n_MTL_PRIVATE_DEF_SEL(setRenderPipelineState_atIndex_,\n    \"setRenderPipelineState:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setRenderPipelineStates_withRange_,\n    \"setRenderPipelineStates:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setRenderTargetArrayLength_,\n    \"setRenderTargetArrayLength:\");\n_MTL_PRIVATE_DEF_SEL(setRenderTargetHeight_,\n    \"setRenderTargetHeight:\");\n_MTL_PRIVATE_DEF_SEL(setRenderTargetWidth_,\n    \"setRenderTargetWidth:\");\n_MTL_PRIVATE_DEF_SEL(setResolveDepthPlane_,\n    \"setResolveDepthPlane:\");\n_MTL_PRIVATE_DEF_SEL(setResolveLevel_,\n    \"setResolveLevel:\");\n_MTL_PRIVATE_DEF_SEL(setResolveSlice_,\n    \"setResolveSlice:\");\n_MTL_PRIVATE_DEF_SEL(setResolveTexture_,\n    \"setResolveTexture:\");\n_MTL_PRIVATE_DEF_SEL(setResourceOptions_,\n    \"setResourceOptions:\");\n_MTL_PRIVATE_DEF_SEL(setRetainedReferences_,\n    \"setRetainedReferences:\");\n_MTL_PRIVATE_DEF_SEL(setRgbBlendOperation_,\n    \"setRgbBlendOperation:\");\n_MTL_PRIVATE_DEF_SEL(setSAddressMode_,\n    \"setSAddressMode:\");\n_MTL_PRIVATE_DEF_SEL(setSampleBuffer_,\n    \"setSampleBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setSampleCount_,\n    \"setSampleCount:\");\n_MTL_PRIVATE_DEF_SEL(setSamplePositions_count_,\n    \"setSamplePositions:count:\");\n_MTL_PRIVATE_DEF_SEL(setSamplerState_atIndex_,\n    \"setSamplerState:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setSamplerState_lodMinClamp_lodMaxClamp_atIndex_,\n    \"setSamplerState:lodMinClamp:lodMaxClamp:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setSamplerStates_lodMinClamps_lodMaxClamps_withRange_,\n    \"setSamplerStates:lodMinClamps:lodMaxClamps:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setSamplerStates_withRange_,\n    \"setSamplerStates:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setScissorRect_,\n    \"setScissorRect:\");\n_MTL_PRIVATE_DEF_SEL(setScissorRects_count_,\n    \"setScissorRects:count:\");\n_MTL_PRIVATE_DEF_SEL(setScratchBufferAllocator_,\n    \"setScratchBufferAllocator:\");\n_MTL_PRIVATE_DEF_SEL(setScreenSize_,\n    \"setScreenSize:\");\n_MTL_PRIVATE_DEF_SEL(setSegmentControlPointCount_,\n    \"setSegmentControlPointCount:\");\n_MTL_PRIVATE_DEF_SEL(setSegmentCount_,\n    \"setSegmentCount:\");\n_MTL_PRIVATE_DEF_SEL(setShaderValidation_,\n    \"setShaderValidation:\");\n_MTL_PRIVATE_DEF_SEL(setShouldMaximizeConcurrentCompilation_,\n    \"setShouldMaximizeConcurrentCompilation:\");\n_MTL_PRIVATE_DEF_SEL(setSignaledValue_,\n    \"setSignaledValue:\");\n_MTL_PRIVATE_DEF_SEL(setSize_,\n    \"setSize:\");\n_MTL_PRIVATE_DEF_SEL(setSlice_,\n    \"setSlice:\");\n_MTL_PRIVATE_DEF_SEL(setSourceAlphaBlendFactor_,\n    \"setSourceAlphaBlendFactor:\");\n_MTL_PRIVATE_DEF_SEL(setSourceRGBBlendFactor_,\n    \"setSourceRGBBlendFactor:\");\n_MTL_PRIVATE_DEF_SEL(setSparsePageSize_,\n    \"setSparsePageSize:\");\n_MTL_PRIVATE_DEF_SEL(setSpecializedName_,\n    \"setSpecializedName:\");\n_MTL_PRIVATE_DEF_SEL(setStageInRegion_,\n    \"setStageInRegion:\");\n_MTL_PRIVATE_DEF_SEL(setStageInRegionWithIndirectBuffer_indirectBufferOffset_,\n    \"setStageInRegionWithIndirectBuffer:indirectBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(setStageInputDescriptor_,\n    \"setStageInputDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(setStartOfEncoderSampleIndex_,\n    \"setStartOfEncoderSampleIndex:\");\n_MTL_PRIVATE_DEF_SEL(setStartOfFragmentSampleIndex_,\n    \"setStartOfFragmentSampleIndex:\");\n_MTL_PRIVATE_DEF_SEL(setStartOfVertexSampleIndex_,\n    \"setStartOfVertexSampleIndex:\");\n_MTL_PRIVATE_DEF_SEL(setStencilAttachment_,\n    \"setStencilAttachment:\");\n_MTL_PRIVATE_DEF_SEL(setStencilAttachmentPixelFormat_,\n    \"setStencilAttachmentPixelFormat:\");\n_MTL_PRIVATE_DEF_SEL(setStencilCompareFunction_,\n    \"setStencilCompareFunction:\");\n_MTL_PRIVATE_DEF_SEL(setStencilFailureOperation_,\n    \"setStencilFailureOperation:\");\n_MTL_PRIVATE_DEF_SEL(setStencilFrontReferenceValue_backReferenceValue_,\n    \"setStencilFrontReferenceValue:backReferenceValue:\");\n_MTL_PRIVATE_DEF_SEL(setStencilReferenceValue_,\n    \"setStencilReferenceValue:\");\n_MTL_PRIVATE_DEF_SEL(setStencilResolveFilter_,\n    \"setStencilResolveFilter:\");\n_MTL_PRIVATE_DEF_SEL(setStencilStoreAction_,\n    \"setStencilStoreAction:\");\n_MTL_PRIVATE_DEF_SEL(setStencilStoreActionOptions_,\n    \"setStencilStoreActionOptions:\");\n_MTL_PRIVATE_DEF_SEL(setStepFunction_,\n    \"setStepFunction:\");\n_MTL_PRIVATE_DEF_SEL(setStepRate_,\n    \"setStepRate:\");\n_MTL_PRIVATE_DEF_SEL(setStorageMode_,\n    \"setStorageMode:\");\n_MTL_PRIVATE_DEF_SEL(setStoreAction_,\n    \"setStoreAction:\");\n_MTL_PRIVATE_DEF_SEL(setStoreActionOptions_,\n    \"setStoreActionOptions:\");\n_MTL_PRIVATE_DEF_SEL(setStride_,\n    \"setStride:\");\n_MTL_PRIVATE_DEF_SEL(setSupportAddingBinaryFunctions_,\n    \"setSupportAddingBinaryFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setSupportAddingFragmentBinaryFunctions_,\n    \"setSupportAddingFragmentBinaryFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setSupportAddingVertexBinaryFunctions_,\n    \"setSupportAddingVertexBinaryFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setSupportArgumentBuffers_,\n    \"setSupportArgumentBuffers:\");\n_MTL_PRIVATE_DEF_SEL(setSupportDynamicAttributeStride_,\n    \"setSupportDynamicAttributeStride:\");\n_MTL_PRIVATE_DEF_SEL(setSupportIndirectCommandBuffers_,\n    \"setSupportIndirectCommandBuffers:\");\n_MTL_PRIVATE_DEF_SEL(setSupportRayTracing_,\n    \"setSupportRayTracing:\");\n_MTL_PRIVATE_DEF_SEL(setSwizzle_,\n    \"setSwizzle:\");\n_MTL_PRIVATE_DEF_SEL(setTAddressMode_,\n    \"setTAddressMode:\");\n_MTL_PRIVATE_DEF_SEL(setTessellationControlPointIndexType_,\n    \"setTessellationControlPointIndexType:\");\n_MTL_PRIVATE_DEF_SEL(setTessellationFactorBuffer_offset_instanceStride_,\n    \"setTessellationFactorBuffer:offset:instanceStride:\");\n_MTL_PRIVATE_DEF_SEL(setTessellationFactorFormat_,\n    \"setTessellationFactorFormat:\");\n_MTL_PRIVATE_DEF_SEL(setTessellationFactorScale_,\n    \"setTessellationFactorScale:\");\n_MTL_PRIVATE_DEF_SEL(setTessellationFactorScaleEnabled_,\n    \"setTessellationFactorScaleEnabled:\");\n_MTL_PRIVATE_DEF_SEL(setTessellationFactorStepFunction_,\n    \"setTessellationFactorStepFunction:\");\n_MTL_PRIVATE_DEF_SEL(setTessellationOutputWindingOrder_,\n    \"setTessellationOutputWindingOrder:\");\n_MTL_PRIVATE_DEF_SEL(setTessellationPartitionMode_,\n    \"setTessellationPartitionMode:\");\n_MTL_PRIVATE_DEF_SEL(setTexture_,\n    \"setTexture:\");\n_MTL_PRIVATE_DEF_SEL(setTexture_atIndex_,\n    \"setTexture:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setTextureType_,\n    \"setTextureType:\");\n_MTL_PRIVATE_DEF_SEL(setTextures_withRange_,\n    \"setTextures:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setThreadGroupSizeIsMultipleOfThreadExecutionWidth_,\n    \"setThreadGroupSizeIsMultipleOfThreadExecutionWidth:\");\n_MTL_PRIVATE_DEF_SEL(setThreadgroupMemoryLength_,\n    \"setThreadgroupMemoryLength:\");\n_MTL_PRIVATE_DEF_SEL(setThreadgroupMemoryLength_atIndex_,\n    \"setThreadgroupMemoryLength:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setThreadgroupMemoryLength_offset_atIndex_,\n    \"setThreadgroupMemoryLength:offset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setThreadgroupSizeMatchesTileSize_,\n    \"setThreadgroupSizeMatchesTileSize:\");\n_MTL_PRIVATE_DEF_SEL(setTileAccelerationStructure_atBufferIndex_,\n    \"setTileAccelerationStructure:atBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setTileAdditionalBinaryFunctions_,\n    \"setTileAdditionalBinaryFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setTileBuffer_offset_atIndex_,\n    \"setTileBuffer:offset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setTileBufferOffset_atIndex_,\n    \"setTileBufferOffset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setTileBuffers_offsets_withRange_,\n    \"setTileBuffers:offsets:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setTileBytes_length_atIndex_,\n    \"setTileBytes:length:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setTileFunction_,\n    \"setTileFunction:\");\n_MTL_PRIVATE_DEF_SEL(setTileHeight_,\n    \"setTileHeight:\");\n_MTL_PRIVATE_DEF_SEL(setTileIntersectionFunctionTable_atBufferIndex_,\n    \"setTileIntersectionFunctionTable:atBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setTileIntersectionFunctionTables_withBufferRange_,\n    \"setTileIntersectionFunctionTables:withBufferRange:\");\n_MTL_PRIVATE_DEF_SEL(setTileSamplerState_atIndex_,\n    \"setTileSamplerState:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setTileSamplerState_lodMinClamp_lodMaxClamp_atIndex_,\n    \"setTileSamplerState:lodMinClamp:lodMaxClamp:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setTileSamplerStates_lodMinClamps_lodMaxClamps_withRange_,\n    \"setTileSamplerStates:lodMinClamps:lodMaxClamps:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setTileSamplerStates_withRange_,\n    \"setTileSamplerStates:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setTileTexture_atIndex_,\n    \"setTileTexture:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setTileTextures_withRange_,\n    \"setTileTextures:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setTileVisibleFunctionTable_atBufferIndex_,\n    \"setTileVisibleFunctionTable:atBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setTileVisibleFunctionTables_withBufferRange_,\n    \"setTileVisibleFunctionTables:withBufferRange:\");\n_MTL_PRIVATE_DEF_SEL(setTileWidth_,\n    \"setTileWidth:\");\n_MTL_PRIVATE_DEF_SEL(setTransformationMatrixBuffer_,\n    \"setTransformationMatrixBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setTransformationMatrixBufferOffset_,\n    \"setTransformationMatrixBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(setTransformationMatrixLayout_,\n    \"setTransformationMatrixLayout:\");\n_MTL_PRIVATE_DEF_SEL(setTriangleCount_,\n    \"setTriangleCount:\");\n_MTL_PRIVATE_DEF_SEL(setTriangleFillMode_,\n    \"setTriangleFillMode:\");\n_MTL_PRIVATE_DEF_SEL(setType_,\n    \"setType:\");\n_MTL_PRIVATE_DEF_SEL(setUrl_,\n    \"setUrl:\");\n_MTL_PRIVATE_DEF_SEL(setUsage_,\n    \"setUsage:\");\n_MTL_PRIVATE_DEF_SEL(setVertexAccelerationStructure_atBufferIndex_,\n    \"setVertexAccelerationStructure:atBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVertexAdditionalBinaryFunctions_,\n    \"setVertexAdditionalBinaryFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setVertexAmplificationCount_viewMappings_,\n    \"setVertexAmplificationCount:viewMappings:\");\n_MTL_PRIVATE_DEF_SEL(setVertexBuffer_,\n    \"setVertexBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setVertexBuffer_offset_atIndex_,\n    \"setVertexBuffer:offset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVertexBuffer_offset_attributeStride_atIndex_,\n    \"setVertexBuffer:offset:attributeStride:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVertexBufferOffset_,\n    \"setVertexBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(setVertexBufferOffset_atIndex_,\n    \"setVertexBufferOffset:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVertexBufferOffset_attributeStride_atIndex_,\n    \"setVertexBufferOffset:attributeStride:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVertexBuffers_,\n    \"setVertexBuffers:\");\n_MTL_PRIVATE_DEF_SEL(setVertexBuffers_offsets_attributeStrides_withRange_,\n    \"setVertexBuffers:offsets:attributeStrides:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setVertexBuffers_offsets_withRange_,\n    \"setVertexBuffers:offsets:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setVertexBytes_length_atIndex_,\n    \"setVertexBytes:length:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVertexBytes_length_attributeStride_atIndex_,\n    \"setVertexBytes:length:attributeStride:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVertexDescriptor_,\n    \"setVertexDescriptor:\");\n_MTL_PRIVATE_DEF_SEL(setVertexFormat_,\n    \"setVertexFormat:\");\n_MTL_PRIVATE_DEF_SEL(setVertexFunction_,\n    \"setVertexFunction:\");\n_MTL_PRIVATE_DEF_SEL(setVertexIntersectionFunctionTable_atBufferIndex_,\n    \"setVertexIntersectionFunctionTable:atBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVertexIntersectionFunctionTables_withBufferRange_,\n    \"setVertexIntersectionFunctionTables:withBufferRange:\");\n_MTL_PRIVATE_DEF_SEL(setVertexLinkedFunctions_,\n    \"setVertexLinkedFunctions:\");\n_MTL_PRIVATE_DEF_SEL(setVertexPreloadedLibraries_,\n    \"setVertexPreloadedLibraries:\");\n_MTL_PRIVATE_DEF_SEL(setVertexSamplerState_atIndex_,\n    \"setVertexSamplerState:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVertexSamplerState_lodMinClamp_lodMaxClamp_atIndex_,\n    \"setVertexSamplerState:lodMinClamp:lodMaxClamp:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVertexSamplerStates_lodMinClamps_lodMaxClamps_withRange_,\n    \"setVertexSamplerStates:lodMinClamps:lodMaxClamps:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setVertexSamplerStates_withRange_,\n    \"setVertexSamplerStates:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setVertexStride_,\n    \"setVertexStride:\");\n_MTL_PRIVATE_DEF_SEL(setVertexTexture_atIndex_,\n    \"setVertexTexture:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVertexTextures_withRange_,\n    \"setVertexTextures:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setVertexVisibleFunctionTable_atBufferIndex_,\n    \"setVertexVisibleFunctionTable:atBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVertexVisibleFunctionTables_withBufferRange_,\n    \"setVertexVisibleFunctionTables:withBufferRange:\");\n_MTL_PRIVATE_DEF_SEL(setViewport_,\n    \"setViewport:\");\n_MTL_PRIVATE_DEF_SEL(setViewports_count_,\n    \"setViewports:count:\");\n_MTL_PRIVATE_DEF_SEL(setVisibilityResultBuffer_,\n    \"setVisibilityResultBuffer:\");\n_MTL_PRIVATE_DEF_SEL(setVisibilityResultMode_offset_,\n    \"setVisibilityResultMode:offset:\");\n_MTL_PRIVATE_DEF_SEL(setVisibleFunctionTable_atBufferIndex_,\n    \"setVisibleFunctionTable:atBufferIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVisibleFunctionTable_atIndex_,\n    \"setVisibleFunctionTable:atIndex:\");\n_MTL_PRIVATE_DEF_SEL(setVisibleFunctionTables_withBufferRange_,\n    \"setVisibleFunctionTables:withBufferRange:\");\n_MTL_PRIVATE_DEF_SEL(setVisibleFunctionTables_withRange_,\n    \"setVisibleFunctionTables:withRange:\");\n_MTL_PRIVATE_DEF_SEL(setWidth_,\n    \"setWidth:\");\n_MTL_PRIVATE_DEF_SEL(setWriteMask_,\n    \"setWriteMask:\");\n_MTL_PRIVATE_DEF_SEL(shaderValidation,\n    \"shaderValidation\");\n_MTL_PRIVATE_DEF_SEL(sharedCaptureManager,\n    \"sharedCaptureManager\");\n_MTL_PRIVATE_DEF_SEL(shouldMaximizeConcurrentCompilation,\n    \"shouldMaximizeConcurrentCompilation\");\n_MTL_PRIVATE_DEF_SEL(signalEvent_value_,\n    \"signalEvent:value:\");\n_MTL_PRIVATE_DEF_SEL(signaledValue,\n    \"signaledValue\");\n_MTL_PRIVATE_DEF_SEL(size,\n    \"size\");\n_MTL_PRIVATE_DEF_SEL(slice,\n    \"slice\");\n_MTL_PRIVATE_DEF_SEL(sourceAlphaBlendFactor,\n    \"sourceAlphaBlendFactor\");\n_MTL_PRIVATE_DEF_SEL(sourceRGBBlendFactor,\n    \"sourceRGBBlendFactor\");\n_MTL_PRIVATE_DEF_SEL(sparsePageSize,\n    \"sparsePageSize\");\n_MTL_PRIVATE_DEF_SEL(sparseTileSizeInBytes,\n    \"sparseTileSizeInBytes\");\n_MTL_PRIVATE_DEF_SEL(sparseTileSizeInBytesForSparsePageSize_,\n    \"sparseTileSizeInBytesForSparsePageSize:\");\n_MTL_PRIVATE_DEF_SEL(sparseTileSizeWithTextureType_pixelFormat_sampleCount_,\n    \"sparseTileSizeWithTextureType:pixelFormat:sampleCount:\");\n_MTL_PRIVATE_DEF_SEL(sparseTileSizeWithTextureType_pixelFormat_sampleCount_sparsePageSize_,\n    \"sparseTileSizeWithTextureType:pixelFormat:sampleCount:sparsePageSize:\");\n_MTL_PRIVATE_DEF_SEL(specializedName,\n    \"specializedName\");\n_MTL_PRIVATE_DEF_SEL(stageInputAttributes,\n    \"stageInputAttributes\");\n_MTL_PRIVATE_DEF_SEL(stageInputDescriptor,\n    \"stageInputDescriptor\");\n_MTL_PRIVATE_DEF_SEL(stageInputOutputDescriptor,\n    \"stageInputOutputDescriptor\");\n_MTL_PRIVATE_DEF_SEL(startCaptureWithCommandQueue_,\n    \"startCaptureWithCommandQueue:\");\n_MTL_PRIVATE_DEF_SEL(startCaptureWithDescriptor_error_,\n    \"startCaptureWithDescriptor:error:\");\n_MTL_PRIVATE_DEF_SEL(startCaptureWithDevice_,\n    \"startCaptureWithDevice:\");\n_MTL_PRIVATE_DEF_SEL(startCaptureWithScope_,\n    \"startCaptureWithScope:\");\n_MTL_PRIVATE_DEF_SEL(startOfEncoderSampleIndex,\n    \"startOfEncoderSampleIndex\");\n_MTL_PRIVATE_DEF_SEL(startOfFragmentSampleIndex,\n    \"startOfFragmentSampleIndex\");\n_MTL_PRIVATE_DEF_SEL(startOfVertexSampleIndex,\n    \"startOfVertexSampleIndex\");\n_MTL_PRIVATE_DEF_SEL(staticThreadgroupMemoryLength,\n    \"staticThreadgroupMemoryLength\");\n_MTL_PRIVATE_DEF_SEL(status,\n    \"status\");\n_MTL_PRIVATE_DEF_SEL(stencilAttachment,\n    \"stencilAttachment\");\n_MTL_PRIVATE_DEF_SEL(stencilAttachmentPixelFormat,\n    \"stencilAttachmentPixelFormat\");\n_MTL_PRIVATE_DEF_SEL(stencilCompareFunction,\n    \"stencilCompareFunction\");\n_MTL_PRIVATE_DEF_SEL(stencilFailureOperation,\n    \"stencilFailureOperation\");\n_MTL_PRIVATE_DEF_SEL(stencilResolveFilter,\n    \"stencilResolveFilter\");\n_MTL_PRIVATE_DEF_SEL(stepFunction,\n    \"stepFunction\");\n_MTL_PRIVATE_DEF_SEL(stepRate,\n    \"stepRate\");\n_MTL_PRIVATE_DEF_SEL(stopCapture,\n    \"stopCapture\");\n_MTL_PRIVATE_DEF_SEL(storageMode,\n    \"storageMode\");\n_MTL_PRIVATE_DEF_SEL(storeAction,\n    \"storeAction\");\n_MTL_PRIVATE_DEF_SEL(storeActionOptions,\n    \"storeActionOptions\");\n_MTL_PRIVATE_DEF_SEL(stride,\n    \"stride\");\n_MTL_PRIVATE_DEF_SEL(structType,\n    \"structType\");\n_MTL_PRIVATE_DEF_SEL(supportAddingBinaryFunctions,\n    \"supportAddingBinaryFunctions\");\n_MTL_PRIVATE_DEF_SEL(supportAddingFragmentBinaryFunctions,\n    \"supportAddingFragmentBinaryFunctions\");\n_MTL_PRIVATE_DEF_SEL(supportAddingVertexBinaryFunctions,\n    \"supportAddingVertexBinaryFunctions\");\n_MTL_PRIVATE_DEF_SEL(supportArgumentBuffers,\n    \"supportArgumentBuffers\");\n_MTL_PRIVATE_DEF_SEL(supportDynamicAttributeStride,\n    \"supportDynamicAttributeStride\");\n_MTL_PRIVATE_DEF_SEL(supportIndirectCommandBuffers,\n    \"supportIndirectCommandBuffers\");\n_MTL_PRIVATE_DEF_SEL(supportRayTracing,\n    \"supportRayTracing\");\n_MTL_PRIVATE_DEF_SEL(supports32BitFloatFiltering,\n    \"supports32BitFloatFiltering\");\n_MTL_PRIVATE_DEF_SEL(supports32BitMSAA,\n    \"supports32BitMSAA\");\n_MTL_PRIVATE_DEF_SEL(supportsBCTextureCompression,\n    \"supportsBCTextureCompression\");\n_MTL_PRIVATE_DEF_SEL(supportsCounterSampling_,\n    \"supportsCounterSampling:\");\n_MTL_PRIVATE_DEF_SEL(supportsDestination_,\n    \"supportsDestination:\");\n_MTL_PRIVATE_DEF_SEL(supportsDynamicLibraries,\n    \"supportsDynamicLibraries\");\n_MTL_PRIVATE_DEF_SEL(supportsFamily_,\n    \"supportsFamily:\");\n_MTL_PRIVATE_DEF_SEL(supportsFeatureSet_,\n    \"supportsFeatureSet:\");\n_MTL_PRIVATE_DEF_SEL(supportsFunctionPointers,\n    \"supportsFunctionPointers\");\n_MTL_PRIVATE_DEF_SEL(supportsFunctionPointersFromRender,\n    \"supportsFunctionPointersFromRender\");\n_MTL_PRIVATE_DEF_SEL(supportsPrimitiveMotionBlur,\n    \"supportsPrimitiveMotionBlur\");\n_MTL_PRIVATE_DEF_SEL(supportsPullModelInterpolation,\n    \"supportsPullModelInterpolation\");\n_MTL_PRIVATE_DEF_SEL(supportsQueryTextureLOD,\n    \"supportsQueryTextureLOD\");\n_MTL_PRIVATE_DEF_SEL(supportsRasterizationRateMapWithLayerCount_,\n    \"supportsRasterizationRateMapWithLayerCount:\");\n_MTL_PRIVATE_DEF_SEL(supportsRaytracing,\n    \"supportsRaytracing\");\n_MTL_PRIVATE_DEF_SEL(supportsRaytracingFromRender,\n    \"supportsRaytracingFromRender\");\n_MTL_PRIVATE_DEF_SEL(supportsRenderDynamicLibraries,\n    \"supportsRenderDynamicLibraries\");\n_MTL_PRIVATE_DEF_SEL(supportsShaderBarycentricCoordinates,\n    \"supportsShaderBarycentricCoordinates\");\n_MTL_PRIVATE_DEF_SEL(supportsTextureSampleCount_,\n    \"supportsTextureSampleCount:\");\n_MTL_PRIVATE_DEF_SEL(supportsVertexAmplificationCount_,\n    \"supportsVertexAmplificationCount:\");\n_MTL_PRIVATE_DEF_SEL(swizzle,\n    \"swizzle\");\n_MTL_PRIVATE_DEF_SEL(synchronizeResource_,\n    \"synchronizeResource:\");\n_MTL_PRIVATE_DEF_SEL(synchronizeTexture_slice_level_,\n    \"synchronizeTexture:slice:level:\");\n_MTL_PRIVATE_DEF_SEL(tAddressMode,\n    \"tAddressMode\");\n_MTL_PRIVATE_DEF_SEL(tailSizeInBytes,\n    \"tailSizeInBytes\");\n_MTL_PRIVATE_DEF_SEL(tessellationControlPointIndexType,\n    \"tessellationControlPointIndexType\");\n_MTL_PRIVATE_DEF_SEL(tessellationFactorFormat,\n    \"tessellationFactorFormat\");\n_MTL_PRIVATE_DEF_SEL(tessellationFactorStepFunction,\n    \"tessellationFactorStepFunction\");\n_MTL_PRIVATE_DEF_SEL(tessellationOutputWindingOrder,\n    \"tessellationOutputWindingOrder\");\n_MTL_PRIVATE_DEF_SEL(tessellationPartitionMode,\n    \"tessellationPartitionMode\");\n_MTL_PRIVATE_DEF_SEL(texture,\n    \"texture\");\n_MTL_PRIVATE_DEF_SEL(texture2DDescriptorWithPixelFormat_width_height_mipmapped_,\n    \"texture2DDescriptorWithPixelFormat:width:height:mipmapped:\");\n_MTL_PRIVATE_DEF_SEL(textureBarrier,\n    \"textureBarrier\");\n_MTL_PRIVATE_DEF_SEL(textureBufferDescriptorWithPixelFormat_width_resourceOptions_usage_,\n    \"textureBufferDescriptorWithPixelFormat:width:resourceOptions:usage:\");\n_MTL_PRIVATE_DEF_SEL(textureCubeDescriptorWithPixelFormat_size_mipmapped_,\n    \"textureCubeDescriptorWithPixelFormat:size:mipmapped:\");\n_MTL_PRIVATE_DEF_SEL(textureDataType,\n    \"textureDataType\");\n_MTL_PRIVATE_DEF_SEL(textureReferenceType,\n    \"textureReferenceType\");\n_MTL_PRIVATE_DEF_SEL(textureType,\n    \"textureType\");\n_MTL_PRIVATE_DEF_SEL(threadExecutionWidth,\n    \"threadExecutionWidth\");\n_MTL_PRIVATE_DEF_SEL(threadGroupSizeIsMultipleOfThreadExecutionWidth,\n    \"threadGroupSizeIsMultipleOfThreadExecutionWidth\");\n_MTL_PRIVATE_DEF_SEL(threadgroupMemoryAlignment,\n    \"threadgroupMemoryAlignment\");\n_MTL_PRIVATE_DEF_SEL(threadgroupMemoryDataSize,\n    \"threadgroupMemoryDataSize\");\n_MTL_PRIVATE_DEF_SEL(threadgroupMemoryLength,\n    \"threadgroupMemoryLength\");\n_MTL_PRIVATE_DEF_SEL(threadgroupSizeMatchesTileSize,\n    \"threadgroupSizeMatchesTileSize\");\n_MTL_PRIVATE_DEF_SEL(tileAdditionalBinaryFunctions,\n    \"tileAdditionalBinaryFunctions\");\n_MTL_PRIVATE_DEF_SEL(tileArguments,\n    \"tileArguments\");\n_MTL_PRIVATE_DEF_SEL(tileBindings,\n    \"tileBindings\");\n_MTL_PRIVATE_DEF_SEL(tileBuffers,\n    \"tileBuffers\");\n_MTL_PRIVATE_DEF_SEL(tileFunction,\n    \"tileFunction\");\n_MTL_PRIVATE_DEF_SEL(tileHeight,\n    \"tileHeight\");\n_MTL_PRIVATE_DEF_SEL(tileWidth,\n    \"tileWidth\");\n_MTL_PRIVATE_DEF_SEL(transformationMatrixBuffer,\n    \"transformationMatrixBuffer\");\n_MTL_PRIVATE_DEF_SEL(transformationMatrixBufferOffset,\n    \"transformationMatrixBufferOffset\");\n_MTL_PRIVATE_DEF_SEL(transformationMatrixLayout,\n    \"transformationMatrixLayout\");\n_MTL_PRIVATE_DEF_SEL(triangleCount,\n    \"triangleCount\");\n_MTL_PRIVATE_DEF_SEL(tryCancel,\n    \"tryCancel\");\n_MTL_PRIVATE_DEF_SEL(type,\n    \"type\");\n_MTL_PRIVATE_DEF_SEL(updateFence_,\n    \"updateFence:\");\n_MTL_PRIVATE_DEF_SEL(updateFence_afterStages_,\n    \"updateFence:afterStages:\");\n_MTL_PRIVATE_DEF_SEL(updateTextureMapping_mode_indirectBuffer_indirectBufferOffset_,\n    \"updateTextureMapping:mode:indirectBuffer:indirectBufferOffset:\");\n_MTL_PRIVATE_DEF_SEL(updateTextureMapping_mode_region_mipLevel_slice_,\n    \"updateTextureMapping:mode:region:mipLevel:slice:\");\n_MTL_PRIVATE_DEF_SEL(updateTextureMappings_mode_regions_mipLevels_slices_numRegions_,\n    \"updateTextureMappings:mode:regions:mipLevels:slices:numRegions:\");\n_MTL_PRIVATE_DEF_SEL(url,\n    \"url\");\n_MTL_PRIVATE_DEF_SEL(usage,\n    \"usage\");\n_MTL_PRIVATE_DEF_SEL(useHeap_,\n    \"useHeap:\");\n_MTL_PRIVATE_DEF_SEL(useHeap_stages_,\n    \"useHeap:stages:\");\n_MTL_PRIVATE_DEF_SEL(useHeaps_count_,\n    \"useHeaps:count:\");\n_MTL_PRIVATE_DEF_SEL(useHeaps_count_stages_,\n    \"useHeaps:count:stages:\");\n_MTL_PRIVATE_DEF_SEL(useResidencySet_,\n    \"useResidencySet:\");\n_MTL_PRIVATE_DEF_SEL(useResidencySets_count_,\n    \"useResidencySets:count:\");\n_MTL_PRIVATE_DEF_SEL(useResource_usage_,\n    \"useResource:usage:\");\n_MTL_PRIVATE_DEF_SEL(useResource_usage_stages_,\n    \"useResource:usage:stages:\");\n_MTL_PRIVATE_DEF_SEL(useResources_count_usage_,\n    \"useResources:count:usage:\");\n_MTL_PRIVATE_DEF_SEL(useResources_count_usage_stages_,\n    \"useResources:count:usage:stages:\");\n_MTL_PRIVATE_DEF_SEL(usedSize,\n    \"usedSize\");\n_MTL_PRIVATE_DEF_SEL(vertexAdditionalBinaryFunctions,\n    \"vertexAdditionalBinaryFunctions\");\n_MTL_PRIVATE_DEF_SEL(vertexArguments,\n    \"vertexArguments\");\n_MTL_PRIVATE_DEF_SEL(vertexAttributes,\n    \"vertexAttributes\");\n_MTL_PRIVATE_DEF_SEL(vertexBindings,\n    \"vertexBindings\");\n_MTL_PRIVATE_DEF_SEL(vertexBuffer,\n    \"vertexBuffer\");\n_MTL_PRIVATE_DEF_SEL(vertexBufferOffset,\n    \"vertexBufferOffset\");\n_MTL_PRIVATE_DEF_SEL(vertexBuffers,\n    \"vertexBuffers\");\n_MTL_PRIVATE_DEF_SEL(vertexDescriptor,\n    \"vertexDescriptor\");\n_MTL_PRIVATE_DEF_SEL(vertexFormat,\n    \"vertexFormat\");\n_MTL_PRIVATE_DEF_SEL(vertexFunction,\n    \"vertexFunction\");\n_MTL_PRIVATE_DEF_SEL(vertexLinkedFunctions,\n    \"vertexLinkedFunctions\");\n_MTL_PRIVATE_DEF_SEL(vertexPreloadedLibraries,\n    \"vertexPreloadedLibraries\");\n_MTL_PRIVATE_DEF_SEL(vertexStride,\n    \"vertexStride\");\n_MTL_PRIVATE_DEF_SEL(vertical,\n    \"vertical\");\n_MTL_PRIVATE_DEF_SEL(verticalSampleStorage,\n    \"verticalSampleStorage\");\n_MTL_PRIVATE_DEF_SEL(visibilityResultBuffer,\n    \"visibilityResultBuffer\");\n_MTL_PRIVATE_DEF_SEL(visibleFunctionTableDescriptor,\n    \"visibleFunctionTableDescriptor\");\n_MTL_PRIVATE_DEF_SEL(waitForEvent_value_,\n    \"waitForEvent:value:\");\n_MTL_PRIVATE_DEF_SEL(waitForFence_,\n    \"waitForFence:\");\n_MTL_PRIVATE_DEF_SEL(waitForFence_beforeStages_,\n    \"waitForFence:beforeStages:\");\n_MTL_PRIVATE_DEF_SEL(waitUntilCompleted,\n    \"waitUntilCompleted\");\n_MTL_PRIVATE_DEF_SEL(waitUntilScheduled,\n    \"waitUntilScheduled\");\n_MTL_PRIVATE_DEF_SEL(waitUntilSignaledValue_timeoutMS_,\n    \"waitUntilSignaledValue:timeoutMS:\");\n_MTL_PRIVATE_DEF_SEL(width,\n    \"width\");\n_MTL_PRIVATE_DEF_SEL(writeCompactedAccelerationStructureSize_toBuffer_offset_,\n    \"writeCompactedAccelerationStructureSize:toBuffer:offset:\");\n_MTL_PRIVATE_DEF_SEL(writeCompactedAccelerationStructureSize_toBuffer_offset_sizeDataType_,\n    \"writeCompactedAccelerationStructureSize:toBuffer:offset:sizeDataType:\");\n_MTL_PRIVATE_DEF_SEL(writeMask,\n    \"writeMask\");\n\n}\n\n#include <CoreFoundation/CoreFoundation.h>\n#include <functional>\n\nnamespace MTL\n{\nusing DrawablePresentedHandler = void (^)(class Drawable*);\n\nusing DrawablePresentedHandlerFunction = std::function<void(class Drawable*)>;\n\nclass Drawable : public NS::Referencing<Drawable>\n{\npublic:\n    void           addPresentedHandler(const MTL::DrawablePresentedHandlerFunction& function);\n\n    void           present();\n\n    void           presentAtTime(CFTimeInterval presentationTime);\n\n    void           presentAfterMinimumDuration(CFTimeInterval duration);\n\n    void           addPresentedHandler(const MTL::DrawablePresentedHandler block);\n\n    CFTimeInterval presentedTime() const;\n\n    NS::UInteger   drawableID() const;\n};\n\n}\n\n_MTL_INLINE void MTL::Drawable::addPresentedHandler(const MTL::DrawablePresentedHandlerFunction& function)\n{\n    __block DrawablePresentedHandlerFunction blockFunction = function;\n\n    addPresentedHandler(^(Drawable* pDrawable) { blockFunction(pDrawable); });\n}\n\n_MTL_INLINE void MTL::Drawable::present()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(present));\n}\n\n_MTL_INLINE void MTL::Drawable::presentAtTime(CFTimeInterval presentationTime)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(presentAtTime_), presentationTime);\n}\n\n_MTL_INLINE void MTL::Drawable::presentAfterMinimumDuration(CFTimeInterval duration)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(presentAfterMinimumDuration_), duration);\n}\n\n_MTL_INLINE void MTL::Drawable::addPresentedHandler(const MTL::DrawablePresentedHandler block)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(addPresentedHandler_), block);\n}\n\n_MTL_INLINE CFTimeInterval MTL::Drawable::presentedTime() const\n{\n    return Object::sendMessage<CFTimeInterval>(this, _MTL_PRIVATE_SEL(presentedTime));\n}\n\n_MTL_INLINE NS::UInteger MTL::Drawable::drawableID() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(drawableID));\n}\n\n#pragma once\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, PixelFormat) {\n    PixelFormatInvalid = 0,\n    PixelFormatA8Unorm = 1,\n    PixelFormatR8Unorm = 10,\n    PixelFormatR8Unorm_sRGB = 11,\n    PixelFormatR8Snorm = 12,\n    PixelFormatR8Uint = 13,\n    PixelFormatR8Sint = 14,\n    PixelFormatR16Unorm = 20,\n    PixelFormatR16Snorm = 22,\n    PixelFormatR16Uint = 23,\n    PixelFormatR16Sint = 24,\n    PixelFormatR16Float = 25,\n    PixelFormatRG8Unorm = 30,\n    PixelFormatRG8Unorm_sRGB = 31,\n    PixelFormatRG8Snorm = 32,\n    PixelFormatRG8Uint = 33,\n    PixelFormatRG8Sint = 34,\n    PixelFormatB5G6R5Unorm = 40,\n    PixelFormatA1BGR5Unorm = 41,\n    PixelFormatABGR4Unorm = 42,\n    PixelFormatBGR5A1Unorm = 43,\n    PixelFormatR32Uint = 53,\n    PixelFormatR32Sint = 54,\n    PixelFormatR32Float = 55,\n    PixelFormatRG16Unorm = 60,\n    PixelFormatRG16Snorm = 62,\n    PixelFormatRG16Uint = 63,\n    PixelFormatRG16Sint = 64,\n    PixelFormatRG16Float = 65,\n    PixelFormatRGBA8Unorm = 70,\n    PixelFormatRGBA8Unorm_sRGB = 71,\n    PixelFormatRGBA8Snorm = 72,\n    PixelFormatRGBA8Uint = 73,\n    PixelFormatRGBA8Sint = 74,\n    PixelFormatBGRA8Unorm = 80,\n    PixelFormatBGRA8Unorm_sRGB = 81,\n    PixelFormatRGB10A2Unorm = 90,\n    PixelFormatRGB10A2Uint = 91,\n    PixelFormatRG11B10Float = 92,\n    PixelFormatRGB9E5Float = 93,\n    PixelFormatBGR10A2Unorm = 94,\n    PixelFormatBGR10_XR = 554,\n    PixelFormatBGR10_XR_sRGB = 555,\n    PixelFormatRG32Uint = 103,\n    PixelFormatRG32Sint = 104,\n    PixelFormatRG32Float = 105,\n    PixelFormatRGBA16Unorm = 110,\n    PixelFormatRGBA16Snorm = 112,\n    PixelFormatRGBA16Uint = 113,\n    PixelFormatRGBA16Sint = 114,\n    PixelFormatRGBA16Float = 115,\n    PixelFormatBGRA10_XR = 552,\n    PixelFormatBGRA10_XR_sRGB = 553,\n    PixelFormatRGBA32Uint = 123,\n    PixelFormatRGBA32Sint = 124,\n    PixelFormatRGBA32Float = 125,\n    PixelFormatBC1_RGBA = 130,\n    PixelFormatBC1_RGBA_sRGB = 131,\n    PixelFormatBC2_RGBA = 132,\n    PixelFormatBC2_RGBA_sRGB = 133,\n    PixelFormatBC3_RGBA = 134,\n    PixelFormatBC3_RGBA_sRGB = 135,\n    PixelFormatBC4_RUnorm = 140,\n    PixelFormatBC4_RSnorm = 141,\n    PixelFormatBC5_RGUnorm = 142,\n    PixelFormatBC5_RGSnorm = 143,\n    PixelFormatBC6H_RGBFloat = 150,\n    PixelFormatBC6H_RGBUfloat = 151,\n    PixelFormatBC7_RGBAUnorm = 152,\n    PixelFormatBC7_RGBAUnorm_sRGB = 153,\n    PixelFormatPVRTC_RGB_2BPP = 160,\n    PixelFormatPVRTC_RGB_2BPP_sRGB = 161,\n    PixelFormatPVRTC_RGB_4BPP = 162,\n    PixelFormatPVRTC_RGB_4BPP_sRGB = 163,\n    PixelFormatPVRTC_RGBA_2BPP = 164,\n    PixelFormatPVRTC_RGBA_2BPP_sRGB = 165,\n    PixelFormatPVRTC_RGBA_4BPP = 166,\n    PixelFormatPVRTC_RGBA_4BPP_sRGB = 167,\n    PixelFormatEAC_R11Unorm = 170,\n    PixelFormatEAC_R11Snorm = 172,\n    PixelFormatEAC_RG11Unorm = 174,\n    PixelFormatEAC_RG11Snorm = 176,\n    PixelFormatEAC_RGBA8 = 178,\n    PixelFormatEAC_RGBA8_sRGB = 179,\n    PixelFormatETC2_RGB8 = 180,\n    PixelFormatETC2_RGB8_sRGB = 181,\n    PixelFormatETC2_RGB8A1 = 182,\n    PixelFormatETC2_RGB8A1_sRGB = 183,\n    PixelFormatASTC_4x4_sRGB = 186,\n    PixelFormatASTC_5x4_sRGB = 187,\n    PixelFormatASTC_5x5_sRGB = 188,\n    PixelFormatASTC_6x5_sRGB = 189,\n    PixelFormatASTC_6x6_sRGB = 190,\n    PixelFormatASTC_8x5_sRGB = 192,\n    PixelFormatASTC_8x6_sRGB = 193,\n    PixelFormatASTC_8x8_sRGB = 194,\n    PixelFormatASTC_10x5_sRGB = 195,\n    PixelFormatASTC_10x6_sRGB = 196,\n    PixelFormatASTC_10x8_sRGB = 197,\n    PixelFormatASTC_10x10_sRGB = 198,\n    PixelFormatASTC_12x10_sRGB = 199,\n    PixelFormatASTC_12x12_sRGB = 200,\n    PixelFormatASTC_4x4_LDR = 204,\n    PixelFormatASTC_5x4_LDR = 205,\n    PixelFormatASTC_5x5_LDR = 206,\n    PixelFormatASTC_6x5_LDR = 207,\n    PixelFormatASTC_6x6_LDR = 208,\n    PixelFormatASTC_8x5_LDR = 210,\n    PixelFormatASTC_8x6_LDR = 211,\n    PixelFormatASTC_8x8_LDR = 212,\n    PixelFormatASTC_10x5_LDR = 213,\n    PixelFormatASTC_10x6_LDR = 214,\n    PixelFormatASTC_10x8_LDR = 215,\n    PixelFormatASTC_10x10_LDR = 216,\n    PixelFormatASTC_12x10_LDR = 217,\n    PixelFormatASTC_12x12_LDR = 218,\n    PixelFormatASTC_4x4_HDR = 222,\n    PixelFormatASTC_5x4_HDR = 223,\n    PixelFormatASTC_5x5_HDR = 224,\n    PixelFormatASTC_6x5_HDR = 225,\n    PixelFormatASTC_6x6_HDR = 226,\n    PixelFormatASTC_8x5_HDR = 228,\n    PixelFormatASTC_8x6_HDR = 229,\n    PixelFormatASTC_8x8_HDR = 230,\n    PixelFormatASTC_10x5_HDR = 231,\n    PixelFormatASTC_10x6_HDR = 232,\n    PixelFormatASTC_10x8_HDR = 233,\n    PixelFormatASTC_10x10_HDR = 234,\n    PixelFormatASTC_12x10_HDR = 235,\n    PixelFormatASTC_12x12_HDR = 236,\n    PixelFormatGBGR422 = 240,\n    PixelFormatBGRG422 = 241,\n    PixelFormatDepth16Unorm = 250,\n    PixelFormatDepth32Float = 252,\n    PixelFormatStencil8 = 253,\n    PixelFormatDepth24Unorm_Stencil8 = 255,\n    PixelFormatDepth32Float_Stencil8 = 260,\n    PixelFormatX32_Stencil8 = 261,\n    PixelFormatX24_Stencil8 = 262,\n};\n\n}\n\n#pragma once\n\n#include <mach/mach.h>\n\n#pragma once\n\nnamespace MTL\n{\nclass Allocation : public NS::Referencing<Allocation>\n{\npublic:\n    NS::UInteger allocatedSize() const;\n};\n\n}\n\n_MTL_INLINE NS::UInteger MTL::Allocation::allocatedSize() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(allocatedSize));\n}\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, PurgeableState) {\n    PurgeableStateKeepCurrent = 1,\n    PurgeableStateNonVolatile = 2,\n    PurgeableStateVolatile = 3,\n    PurgeableStateEmpty = 4,\n};\n\n_MTL_ENUM(NS::UInteger, CPUCacheMode) {\n    CPUCacheModeDefaultCache = 0,\n    CPUCacheModeWriteCombined = 1,\n};\n\n_MTL_ENUM(NS::UInteger, StorageMode) {\n    StorageModeShared = 0,\n    StorageModeManaged = 1,\n    StorageModePrivate = 2,\n    StorageModeMemoryless = 3,\n};\n\n_MTL_ENUM(NS::UInteger, HazardTrackingMode) {\n    HazardTrackingModeDefault = 0,\n    HazardTrackingModeUntracked = 1,\n    HazardTrackingModeTracked = 2,\n};\n\n_MTL_OPTIONS(NS::UInteger, ResourceOptions) {\n    ResourceCPUCacheModeDefaultCache = 0,\n    ResourceCPUCacheModeWriteCombined = 1,\n    ResourceStorageModeShared = 0,\n    ResourceStorageModeManaged = 16,\n    ResourceStorageModePrivate = 32,\n    ResourceStorageModeMemoryless = 48,\n    ResourceHazardTrackingModeDefault = 0,\n    ResourceHazardTrackingModeUntracked = 256,\n    ResourceHazardTrackingModeTracked = 512,\n    ResourceOptionCPUCacheModeDefault = 0,\n    ResourceOptionCPUCacheModeWriteCombined = 1,\n};\n\nclass Resource : public NS::Referencing<Resource, Allocation>\n{\npublic:\n    NS::String*             label() const;\n    void                    setLabel(const NS::String* label);\n\n    class Device*           device() const;\n\n    MTL::CPUCacheMode       cpuCacheMode() const;\n\n    MTL::StorageMode        storageMode() const;\n\n    MTL::HazardTrackingMode hazardTrackingMode() const;\n\n    MTL::ResourceOptions    resourceOptions() const;\n\n    MTL::PurgeableState     setPurgeableState(MTL::PurgeableState state);\n\n    class Heap*             heap() const;\n\n    NS::UInteger            heapOffset() const;\n\n    NS::UInteger            allocatedSize() const;\n\n    void                    makeAliasable();\n\n    bool                    isAliasable();\n\n    kern_return_t           setOwner(task_id_token_t task_id_token);\n};\n\n}\n\n_MTL_INLINE NS::String* MTL::Resource::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::Resource::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::Device* MTL::Resource::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE MTL::CPUCacheMode MTL::Resource::cpuCacheMode() const\n{\n    return Object::sendMessage<MTL::CPUCacheMode>(this, _MTL_PRIVATE_SEL(cpuCacheMode));\n}\n\n_MTL_INLINE MTL::StorageMode MTL::Resource::storageMode() const\n{\n    return Object::sendMessage<MTL::StorageMode>(this, _MTL_PRIVATE_SEL(storageMode));\n}\n\n_MTL_INLINE MTL::HazardTrackingMode MTL::Resource::hazardTrackingMode() const\n{\n    return Object::sendMessage<MTL::HazardTrackingMode>(this, _MTL_PRIVATE_SEL(hazardTrackingMode));\n}\n\n_MTL_INLINE MTL::ResourceOptions MTL::Resource::resourceOptions() const\n{\n    return Object::sendMessage<MTL::ResourceOptions>(this, _MTL_PRIVATE_SEL(resourceOptions));\n}\n\n_MTL_INLINE MTL::PurgeableState MTL::Resource::setPurgeableState(MTL::PurgeableState state)\n{\n    return Object::sendMessage<MTL::PurgeableState>(this, _MTL_PRIVATE_SEL(setPurgeableState_), state);\n}\n\n_MTL_INLINE MTL::Heap* MTL::Resource::heap() const\n{\n    return Object::sendMessage<MTL::Heap*>(this, _MTL_PRIVATE_SEL(heap));\n}\n\n_MTL_INLINE NS::UInteger MTL::Resource::heapOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(heapOffset));\n}\n\n_MTL_INLINE NS::UInteger MTL::Resource::allocatedSize() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(allocatedSize));\n}\n\n_MTL_INLINE void MTL::Resource::makeAliasable()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(makeAliasable));\n}\n\n_MTL_INLINE bool MTL::Resource::isAliasable()\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isAliasable));\n}\n\n_MTL_INLINE kern_return_t MTL::Resource::setOwner(task_id_token_t task_id_token)\n{\n    return Object::sendMessage<kern_return_t>(this, _MTL_PRIVATE_SEL(setOwnerWithIdentity_), task_id_token);\n}\n\n#pragma once\n\nnamespace MTL\n{\nstruct Origin\n{\n    Origin() = default;\n\n    Origin(NS::UInteger x, NS::UInteger y, NS::UInteger z);\n\n    static Origin Make(NS::UInteger x, NS::UInteger y, NS::UInteger z);\n\n    NS::UInteger  x;\n    NS::UInteger  y;\n    NS::UInteger  z;\n} _MTL_PACKED;\n\nstruct Size\n{\n    Size() = default;\n\n    Size(NS::UInteger width, NS::UInteger height, NS::UInteger depth);\n\n    static Size  Make(NS::UInteger width, NS::UInteger height, NS::UInteger depth);\n\n    NS::UInteger width;\n    NS::UInteger height;\n    NS::UInteger depth;\n} _MTL_PACKED;\n\nstruct Region\n{\n    Region() = default;\n\n    Region(NS::UInteger x, NS::UInteger width);\n\n    Region(NS::UInteger x, NS::UInteger y, NS::UInteger width, NS::UInteger height);\n\n    Region(NS::UInteger x, NS::UInteger y, NS::UInteger z, NS::UInteger width, NS::UInteger height, NS::UInteger depth);\n\n    static Region Make1D(NS::UInteger x, NS::UInteger width);\n\n    static Region Make2D(NS::UInteger x, NS::UInteger y, NS::UInteger width, NS::UInteger height);\n\n    static Region Make3D(NS::UInteger x, NS::UInteger y, NS::UInteger z, NS::UInteger width, NS::UInteger height, NS::UInteger depth);\n\n    MTL::Origin   origin;\n    MTL::Size     size;\n} _MTL_PACKED;\n\nstruct SamplePosition;\n\nusing Coordinate2D = SamplePosition;\n\nstruct SamplePosition\n{\n    SamplePosition() = default;\n\n    SamplePosition(float _x, float _y);\n\n    static SamplePosition Make(float x, float y);\n\n    float                 x;\n    float                 y;\n} _MTL_PACKED;\n\nstruct ResourceID\n{\n    uint64_t _impl;\n} _MTL_PACKED;\n\n}\n\n_MTL_INLINE MTL::Origin::Origin(NS::UInteger _x, NS::UInteger _y, NS::UInteger _z)\n    : x(_x)\n    , y(_y)\n    , z(_z)\n{\n}\n\n_MTL_INLINE MTL::Origin MTL::Origin::Make(NS::UInteger x, NS::UInteger y, NS::UInteger z)\n{\n    return Origin(x, y, z);\n}\n\n_MTL_INLINE MTL::Size::Size(NS::UInteger _width, NS::UInteger _height, NS::UInteger _depth)\n    : width(_width)\n    , height(_height)\n    , depth(_depth)\n{\n}\n\n_MTL_INLINE MTL::Size MTL::Size::Make(NS::UInteger width, NS::UInteger height, NS::UInteger depth)\n{\n    return Size(width, height, depth);\n}\n\n_MTL_INLINE MTL::Region::Region(NS::UInteger x, NS::UInteger width)\n    : origin(x, 0, 0)\n    , size(width, 1, 1)\n{\n}\n\n_MTL_INLINE MTL::Region::Region(NS::UInteger x, NS::UInteger y, NS::UInteger width, NS::UInteger height)\n    : origin(x, y, 0)\n    , size(width, height, 1)\n{\n}\n\n_MTL_INLINE MTL::Region::Region(NS::UInteger x, NS::UInteger y, NS::UInteger z, NS::UInteger width, NS::UInteger height, NS::UInteger depth)\n    : origin(x, y, z)\n    , size(width, height, depth)\n{\n}\n\n_MTL_INLINE MTL::Region MTL::Region::Make1D(NS::UInteger x, NS::UInteger width)\n{\n    return Region(x, width);\n}\n\n_MTL_INLINE MTL::Region MTL::Region::Make2D(NS::UInteger x, NS::UInteger y, NS::UInteger width, NS::UInteger height)\n{\n    return Region(x, y, width, height);\n}\n\n_MTL_INLINE MTL::Region MTL::Region::Make3D(NS::UInteger x, NS::UInteger y, NS::UInteger z, NS::UInteger width, NS::UInteger height, NS::UInteger depth)\n{\n    return Region(x, y, z, width, height, depth);\n}\n\n_MTL_INLINE MTL::SamplePosition::SamplePosition(float _x, float _y)\n    : x(_x)\n    , y(_y)\n{\n}\n\n_MTL_INLINE MTL::SamplePosition MTL::SamplePosition::Make(float x, float y)\n{\n    return SamplePosition(x, y);\n}\n\n#include <IOSurface/IOSurfaceRef.h>\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, TextureType) {\n    TextureType1D = 0,\n    TextureType1DArray = 1,\n    TextureType2D = 2,\n    TextureType2DArray = 3,\n    TextureType2DMultisample = 4,\n    TextureTypeCube = 5,\n    TextureTypeCubeArray = 6,\n    TextureType3D = 7,\n    TextureType2DMultisampleArray = 8,\n    TextureTypeTextureBuffer = 9,\n};\n\n_MTL_ENUM(uint8_t, TextureSwizzle) {\n    TextureSwizzleZero = 0,\n    TextureSwizzleOne = 1,\n    TextureSwizzleRed = 2,\n    TextureSwizzleGreen = 3,\n    TextureSwizzleBlue = 4,\n    TextureSwizzleAlpha = 5,\n};\n\nstruct TextureSwizzleChannels\n{\n    static TextureSwizzleChannels   Default();\n    static TextureSwizzleChannels   Make( TextureSwizzle r, TextureSwizzle g, TextureSwizzle b, TextureSwizzle a );\n\n    constexpr TextureSwizzleChannels();\n    constexpr TextureSwizzleChannels( TextureSwizzle r, TextureSwizzle g, TextureSwizzle b, TextureSwizzle a );\n\n    MTL::TextureSwizzle red;\n    MTL::TextureSwizzle green;\n    MTL::TextureSwizzle blue;\n    MTL::TextureSwizzle alpha;\n} _MTL_PACKED;\n\nclass SharedTextureHandle : public NS::SecureCoding<SharedTextureHandle>\n{\npublic:\n    static class SharedTextureHandle* alloc();\n\n    class SharedTextureHandle*        init();\n\n    class Device*                     device() const;\n\n    NS::String*                       label() const;\n};\n\n_MTL_OPTIONS(NS::UInteger, TextureUsage) {\n    TextureUsageUnknown = 0,\n    TextureUsageShaderRead = 1,\n    TextureUsageShaderWrite = 2,\n    TextureUsageRenderTarget = 4,\n    TextureUsagePixelFormatView = 16,\n    TextureUsageShaderAtomic = 32,\n};\n\n_MTL_ENUM(NS::Integer, TextureCompressionType) {\n    TextureCompressionTypeLossless = 0,\n    TextureCompressionTypeLossy = 1,\n};\n\nclass TextureDescriptor : public NS::Copying<TextureDescriptor>\n{\npublic:\n    static class TextureDescriptor* alloc();\n\n    class TextureDescriptor*        init();\n\n    static class TextureDescriptor* texture2DDescriptor(MTL::PixelFormat pixelFormat, NS::UInteger width, NS::UInteger height, bool mipmapped);\n\n    static class TextureDescriptor* textureCubeDescriptor(MTL::PixelFormat pixelFormat, NS::UInteger size, bool mipmapped);\n\n    static class TextureDescriptor* textureBufferDescriptor(MTL::PixelFormat pixelFormat, NS::UInteger width, MTL::ResourceOptions resourceOptions, MTL::TextureUsage usage);\n\n    MTL::TextureType                textureType() const;\n    void                            setTextureType(MTL::TextureType textureType);\n\n    MTL::PixelFormat                pixelFormat() const;\n    void                            setPixelFormat(MTL::PixelFormat pixelFormat);\n\n    NS::UInteger                    width() const;\n    void                            setWidth(NS::UInteger width);\n\n    NS::UInteger                    height() const;\n    void                            setHeight(NS::UInteger height);\n\n    NS::UInteger                    depth() const;\n    void                            setDepth(NS::UInteger depth);\n\n    NS::UInteger                    mipmapLevelCount() const;\n    void                            setMipmapLevelCount(NS::UInteger mipmapLevelCount);\n\n    NS::UInteger                    sampleCount() const;\n    void                            setSampleCount(NS::UInteger sampleCount);\n\n    NS::UInteger                    arrayLength() const;\n    void                            setArrayLength(NS::UInteger arrayLength);\n\n    MTL::ResourceOptions            resourceOptions() const;\n    void                            setResourceOptions(MTL::ResourceOptions resourceOptions);\n\n    MTL::CPUCacheMode               cpuCacheMode() const;\n    void                            setCpuCacheMode(MTL::CPUCacheMode cpuCacheMode);\n\n    MTL::StorageMode                storageMode() const;\n    void                            setStorageMode(MTL::StorageMode storageMode);\n\n    MTL::HazardTrackingMode         hazardTrackingMode() const;\n    void                            setHazardTrackingMode(MTL::HazardTrackingMode hazardTrackingMode);\n\n    MTL::TextureUsage               usage() const;\n    void                            setUsage(MTL::TextureUsage usage);\n\n    bool                            allowGPUOptimizedContents() const;\n    void                            setAllowGPUOptimizedContents(bool allowGPUOptimizedContents);\n\n    MTL::TextureCompressionType     compressionType() const;\n    void                            setCompressionType(MTL::TextureCompressionType compressionType);\n\n    MTL::TextureSwizzleChannels     swizzle() const;\n    void                            setSwizzle(MTL::TextureSwizzleChannels swizzle);\n};\n\nclass Texture : public NS::Referencing<Texture, Resource>\n{\npublic:\n    class Resource*             rootResource() const;\n\n    class Texture*              parentTexture() const;\n\n    NS::UInteger                parentRelativeLevel() const;\n\n    NS::UInteger                parentRelativeSlice() const;\n\n    class Buffer*               buffer() const;\n\n    NS::UInteger                bufferOffset() const;\n\n    NS::UInteger                bufferBytesPerRow() const;\n\n    IOSurfaceRef                iosurface() const;\n\n    NS::UInteger                iosurfacePlane() const;\n\n    MTL::TextureType            textureType() const;\n\n    MTL::PixelFormat            pixelFormat() const;\n\n    NS::UInteger                width() const;\n\n    NS::UInteger                height() const;\n\n    NS::UInteger                depth() const;\n\n    NS::UInteger                mipmapLevelCount() const;\n\n    NS::UInteger                sampleCount() const;\n\n    NS::UInteger                arrayLength() const;\n\n    MTL::TextureUsage           usage() const;\n\n    bool                        shareable() const;\n\n    bool                        framebufferOnly() const;\n\n    NS::UInteger                firstMipmapInTail() const;\n\n    NS::UInteger                tailSizeInBytes() const;\n\n    bool                        isSparse() const;\n\n    bool                        allowGPUOptimizedContents() const;\n\n    MTL::TextureCompressionType compressionType() const;\n\n    MTL::ResourceID             gpuResourceID() const;\n\n    void                        getBytes(void* pixelBytes, NS::UInteger bytesPerRow, NS::UInteger bytesPerImage, MTL::Region region, NS::UInteger level, NS::UInteger slice);\n\n    void                        replaceRegion(MTL::Region region, NS::UInteger level, NS::UInteger slice, const void* pixelBytes, NS::UInteger bytesPerRow, NS::UInteger bytesPerImage);\n\n    void                        getBytes(void* pixelBytes, NS::UInteger bytesPerRow, MTL::Region region, NS::UInteger level);\n\n    void                        replaceRegion(MTL::Region region, NS::UInteger level, const void* pixelBytes, NS::UInteger bytesPerRow);\n\n    class Texture*              newTextureView(MTL::PixelFormat pixelFormat);\n\n    class Texture*              newTextureView(MTL::PixelFormat pixelFormat, MTL::TextureType textureType, NS::Range levelRange, NS::Range sliceRange);\n\n    class SharedTextureHandle*  newSharedTextureHandle();\n\n    class Texture*              remoteStorageTexture() const;\n\n    class Texture*              newRemoteTextureViewForDevice(const class Device* device);\n\n    MTL::TextureSwizzleChannels swizzle() const;\n\n    class Texture*              newTextureView(MTL::PixelFormat pixelFormat, MTL::TextureType textureType, NS::Range levelRange, NS::Range sliceRange, MTL::TextureSwizzleChannels swizzle);\n};\n\n}\n\n_MTL_INLINE MTL::TextureSwizzleChannels MTL::TextureSwizzleChannels::Default()\n{\n    return MTL::TextureSwizzleChannels();\n}\n\n_MTL_INLINE constexpr MTL::TextureSwizzleChannels::TextureSwizzleChannels()\n: red(MTL::TextureSwizzleRed)\n, green(MTL::TextureSwizzleGreen)\n, blue(MTL::TextureSwizzleBlue)\n, alpha(MTL::TextureSwizzleAlpha)\n{\n\n}\n\n_MTL_INLINE MTL::TextureSwizzleChannels MTL::TextureSwizzleChannels::Make( TextureSwizzle r, TextureSwizzle g, TextureSwizzle b, TextureSwizzle a )\n{\n    return TextureSwizzleChannels(r, g, b, a);\n}\n\n_MTL_INLINE constexpr MTL::TextureSwizzleChannels::TextureSwizzleChannels( TextureSwizzle r, TextureSwizzle g, TextureSwizzle b, TextureSwizzle a )\n: red(r)\n, green(g)\n, blue(b)\n, alpha(a)\n{\n\n}\n\n_MTL_INLINE MTL::SharedTextureHandle* MTL::SharedTextureHandle::alloc()\n{\n    return NS::Object::alloc<MTL::SharedTextureHandle>(_MTL_PRIVATE_CLS(MTLSharedTextureHandle));\n}\n\n_MTL_INLINE MTL::SharedTextureHandle* MTL::SharedTextureHandle::init()\n{\n    return NS::Object::init<MTL::SharedTextureHandle>();\n}\n\n_MTL_INLINE MTL::Device* MTL::SharedTextureHandle::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE NS::String* MTL::SharedTextureHandle::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE MTL::TextureDescriptor* MTL::TextureDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::TextureDescriptor>(_MTL_PRIVATE_CLS(MTLTextureDescriptor));\n}\n\n_MTL_INLINE MTL::TextureDescriptor* MTL::TextureDescriptor::init()\n{\n    return NS::Object::init<MTL::TextureDescriptor>();\n}\n\n_MTL_INLINE MTL::TextureDescriptor* MTL::TextureDescriptor::texture2DDescriptor(MTL::PixelFormat pixelFormat, NS::UInteger width, NS::UInteger height, bool mipmapped)\n{\n    return Object::sendMessage<MTL::TextureDescriptor*>(_MTL_PRIVATE_CLS(MTLTextureDescriptor), _MTL_PRIVATE_SEL(texture2DDescriptorWithPixelFormat_width_height_mipmapped_), pixelFormat, width, height, mipmapped);\n}\n\n_MTL_INLINE MTL::TextureDescriptor* MTL::TextureDescriptor::textureCubeDescriptor(MTL::PixelFormat pixelFormat, NS::UInteger size, bool mipmapped)\n{\n    return Object::sendMessage<MTL::TextureDescriptor*>(_MTL_PRIVATE_CLS(MTLTextureDescriptor), _MTL_PRIVATE_SEL(textureCubeDescriptorWithPixelFormat_size_mipmapped_), pixelFormat, size, mipmapped);\n}\n\n_MTL_INLINE MTL::TextureDescriptor* MTL::TextureDescriptor::textureBufferDescriptor(MTL::PixelFormat pixelFormat, NS::UInteger width, MTL::ResourceOptions resourceOptions, MTL::TextureUsage usage)\n{\n    return Object::sendMessage<MTL::TextureDescriptor*>(_MTL_PRIVATE_CLS(MTLTextureDescriptor), _MTL_PRIVATE_SEL(textureBufferDescriptorWithPixelFormat_width_resourceOptions_usage_), pixelFormat, width, resourceOptions, usage);\n}\n\n_MTL_INLINE MTL::TextureType MTL::TextureDescriptor::textureType() const\n{\n    return Object::sendMessage<MTL::TextureType>(this, _MTL_PRIVATE_SEL(textureType));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setTextureType(MTL::TextureType textureType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTextureType_), textureType);\n}\n\n_MTL_INLINE MTL::PixelFormat MTL::TextureDescriptor::pixelFormat() const\n{\n    return Object::sendMessage<MTL::PixelFormat>(this, _MTL_PRIVATE_SEL(pixelFormat));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setPixelFormat(MTL::PixelFormat pixelFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPixelFormat_), pixelFormat);\n}\n\n_MTL_INLINE NS::UInteger MTL::TextureDescriptor::width() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(width));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setWidth(NS::UInteger width)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setWidth_), width);\n}\n\n_MTL_INLINE NS::UInteger MTL::TextureDescriptor::height() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(height));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setHeight(NS::UInteger height)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setHeight_), height);\n}\n\n_MTL_INLINE NS::UInteger MTL::TextureDescriptor::depth() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(depth));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setDepth(NS::UInteger depth)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepth_), depth);\n}\n\n_MTL_INLINE NS::UInteger MTL::TextureDescriptor::mipmapLevelCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(mipmapLevelCount));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setMipmapLevelCount(NS::UInteger mipmapLevelCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMipmapLevelCount_), mipmapLevelCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::TextureDescriptor::sampleCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(sampleCount));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setSampleCount(NS::UInteger sampleCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSampleCount_), sampleCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::TextureDescriptor::arrayLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(arrayLength));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setArrayLength(NS::UInteger arrayLength)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setArrayLength_), arrayLength);\n}\n\n_MTL_INLINE MTL::ResourceOptions MTL::TextureDescriptor::resourceOptions() const\n{\n    return Object::sendMessage<MTL::ResourceOptions>(this, _MTL_PRIVATE_SEL(resourceOptions));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setResourceOptions(MTL::ResourceOptions resourceOptions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setResourceOptions_), resourceOptions);\n}\n\n_MTL_INLINE MTL::CPUCacheMode MTL::TextureDescriptor::cpuCacheMode() const\n{\n    return Object::sendMessage<MTL::CPUCacheMode>(this, _MTL_PRIVATE_SEL(cpuCacheMode));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setCpuCacheMode(MTL::CPUCacheMode cpuCacheMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCpuCacheMode_), cpuCacheMode);\n}\n\n_MTL_INLINE MTL::StorageMode MTL::TextureDescriptor::storageMode() const\n{\n    return Object::sendMessage<MTL::StorageMode>(this, _MTL_PRIVATE_SEL(storageMode));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setStorageMode(MTL::StorageMode storageMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStorageMode_), storageMode);\n}\n\n_MTL_INLINE MTL::HazardTrackingMode MTL::TextureDescriptor::hazardTrackingMode() const\n{\n    return Object::sendMessage<MTL::HazardTrackingMode>(this, _MTL_PRIVATE_SEL(hazardTrackingMode));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setHazardTrackingMode(MTL::HazardTrackingMode hazardTrackingMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setHazardTrackingMode_), hazardTrackingMode);\n}\n\n_MTL_INLINE MTL::TextureUsage MTL::TextureDescriptor::usage() const\n{\n    return Object::sendMessage<MTL::TextureUsage>(this, _MTL_PRIVATE_SEL(usage));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setUsage(MTL::TextureUsage usage)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setUsage_), usage);\n}\n\n_MTL_INLINE bool MTL::TextureDescriptor::allowGPUOptimizedContents() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(allowGPUOptimizedContents));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setAllowGPUOptimizedContents(bool allowGPUOptimizedContents)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setAllowGPUOptimizedContents_), allowGPUOptimizedContents);\n}\n\n_MTL_INLINE MTL::TextureCompressionType MTL::TextureDescriptor::compressionType() const\n{\n    return Object::sendMessage<MTL::TextureCompressionType>(this, _MTL_PRIVATE_SEL(compressionType));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setCompressionType(MTL::TextureCompressionType compressionType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCompressionType_), compressionType);\n}\n\n_MTL_INLINE MTL::TextureSwizzleChannels MTL::TextureDescriptor::swizzle() const\n{\n    return Object::sendMessage<MTL::TextureSwizzleChannels>(this, _MTL_PRIVATE_SEL(swizzle));\n}\n\n_MTL_INLINE void MTL::TextureDescriptor::setSwizzle(MTL::TextureSwizzleChannels swizzle)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSwizzle_), swizzle);\n}\n\n_MTL_INLINE MTL::Resource* MTL::Texture::rootResource() const\n{\n    return Object::sendMessage<MTL::Resource*>(this, _MTL_PRIVATE_SEL(rootResource));\n}\n\n_MTL_INLINE MTL::Texture* MTL::Texture::parentTexture() const\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(parentTexture));\n}\n\n_MTL_INLINE NS::UInteger MTL::Texture::parentRelativeLevel() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(parentRelativeLevel));\n}\n\n_MTL_INLINE NS::UInteger MTL::Texture::parentRelativeSlice() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(parentRelativeSlice));\n}\n\n_MTL_INLINE MTL::Buffer* MTL::Texture::buffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(buffer));\n}\n\n_MTL_INLINE NS::UInteger MTL::Texture::bufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(bufferOffset));\n}\n\n_MTL_INLINE NS::UInteger MTL::Texture::bufferBytesPerRow() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(bufferBytesPerRow));\n}\n\n_MTL_INLINE IOSurfaceRef MTL::Texture::iosurface() const\n{\n    return Object::sendMessage<IOSurfaceRef>(this, _MTL_PRIVATE_SEL(iosurface));\n}\n\n_MTL_INLINE NS::UInteger MTL::Texture::iosurfacePlane() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(iosurfacePlane));\n}\n\n_MTL_INLINE MTL::TextureType MTL::Texture::textureType() const\n{\n    return Object::sendMessage<MTL::TextureType>(this, _MTL_PRIVATE_SEL(textureType));\n}\n\n_MTL_INLINE MTL::PixelFormat MTL::Texture::pixelFormat() const\n{\n    return Object::sendMessage<MTL::PixelFormat>(this, _MTL_PRIVATE_SEL(pixelFormat));\n}\n\n_MTL_INLINE NS::UInteger MTL::Texture::width() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(width));\n}\n\n_MTL_INLINE NS::UInteger MTL::Texture::height() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(height));\n}\n\n_MTL_INLINE NS::UInteger MTL::Texture::depth() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(depth));\n}\n\n_MTL_INLINE NS::UInteger MTL::Texture::mipmapLevelCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(mipmapLevelCount));\n}\n\n_MTL_INLINE NS::UInteger MTL::Texture::sampleCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(sampleCount));\n}\n\n_MTL_INLINE NS::UInteger MTL::Texture::arrayLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(arrayLength));\n}\n\n_MTL_INLINE MTL::TextureUsage MTL::Texture::usage() const\n{\n    return Object::sendMessage<MTL::TextureUsage>(this, _MTL_PRIVATE_SEL(usage));\n}\n\n_MTL_INLINE bool MTL::Texture::shareable() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isShareable));\n}\n\n_MTL_INLINE bool MTL::Texture::framebufferOnly() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isFramebufferOnly));\n}\n\n_MTL_INLINE NS::UInteger MTL::Texture::firstMipmapInTail() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(firstMipmapInTail));\n}\n\n_MTL_INLINE NS::UInteger MTL::Texture::tailSizeInBytes() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(tailSizeInBytes));\n}\n\n_MTL_INLINE bool MTL::Texture::isSparse() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isSparse));\n}\n\n_MTL_INLINE bool MTL::Texture::allowGPUOptimizedContents() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(allowGPUOptimizedContents));\n}\n\n_MTL_INLINE MTL::TextureCompressionType MTL::Texture::compressionType() const\n{\n    return Object::sendMessage<MTL::TextureCompressionType>(this, _MTL_PRIVATE_SEL(compressionType));\n}\n\n_MTL_INLINE MTL::ResourceID MTL::Texture::gpuResourceID() const\n{\n    return Object::sendMessage<MTL::ResourceID>(this, _MTL_PRIVATE_SEL(gpuResourceID));\n}\n\n_MTL_INLINE void MTL::Texture::getBytes(void* pixelBytes, NS::UInteger bytesPerRow, NS::UInteger bytesPerImage, MTL::Region region, NS::UInteger level, NS::UInteger slice)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(getBytes_bytesPerRow_bytesPerImage_fromRegion_mipmapLevel_slice_), pixelBytes, bytesPerRow, bytesPerImage, region, level, slice);\n}\n\n_MTL_INLINE void MTL::Texture::replaceRegion(MTL::Region region, NS::UInteger level, NS::UInteger slice, const void* pixelBytes, NS::UInteger bytesPerRow, NS::UInteger bytesPerImage)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(replaceRegion_mipmapLevel_slice_withBytes_bytesPerRow_bytesPerImage_), region, level, slice, pixelBytes, bytesPerRow, bytesPerImage);\n}\n\n_MTL_INLINE void MTL::Texture::getBytes(void* pixelBytes, NS::UInteger bytesPerRow, MTL::Region region, NS::UInteger level)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(getBytes_bytesPerRow_fromRegion_mipmapLevel_), pixelBytes, bytesPerRow, region, level);\n}\n\n_MTL_INLINE void MTL::Texture::replaceRegion(MTL::Region region, NS::UInteger level, const void* pixelBytes, NS::UInteger bytesPerRow)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(replaceRegion_mipmapLevel_withBytes_bytesPerRow_), region, level, pixelBytes, bytesPerRow);\n}\n\n_MTL_INLINE MTL::Texture* MTL::Texture::newTextureView(MTL::PixelFormat pixelFormat)\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(newTextureViewWithPixelFormat_), pixelFormat);\n}\n\n_MTL_INLINE MTL::Texture* MTL::Texture::newTextureView(MTL::PixelFormat pixelFormat, MTL::TextureType textureType, NS::Range levelRange, NS::Range sliceRange)\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(newTextureViewWithPixelFormat_textureType_levels_slices_), pixelFormat, textureType, levelRange, sliceRange);\n}\n\n_MTL_INLINE MTL::SharedTextureHandle* MTL::Texture::newSharedTextureHandle()\n{\n    return Object::sendMessage<MTL::SharedTextureHandle*>(this, _MTL_PRIVATE_SEL(newSharedTextureHandle));\n}\n\n_MTL_INLINE MTL::Texture* MTL::Texture::remoteStorageTexture() const\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(remoteStorageTexture));\n}\n\n_MTL_INLINE MTL::Texture* MTL::Texture::newRemoteTextureViewForDevice(const MTL::Device* device)\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(newRemoteTextureViewForDevice_), device);\n}\n\n_MTL_INLINE MTL::TextureSwizzleChannels MTL::Texture::swizzle() const\n{\n    return Object::sendMessage<MTL::TextureSwizzleChannels>(this, _MTL_PRIVATE_SEL(swizzle));\n}\n\n_MTL_INLINE MTL::Texture* MTL::Texture::newTextureView(MTL::PixelFormat pixelFormat, MTL::TextureType textureType, NS::Range levelRange, NS::Range sliceRange, MTL::TextureSwizzleChannels swizzle)\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(newTextureViewWithPixelFormat_textureType_levels_slices_swizzle_), pixelFormat, textureType, levelRange, sliceRange, swizzle);\n}\n\n#define _CA_EXPORT _NS_EXPORT\n#define _CA_EXTERN _NS_EXTERN\n#define _CA_INLINE _NS_INLINE\n#define _CA_PACKED _NS_PACKED\n\n#define _CA_CONST(type, name) _NS_CONST(type, name)\n#define _CA_ENUM(type, name) _NS_ENUM(type, name)\n#define _CA_OPTIONS(type, name) _NS_OPTIONS(type, name)\n\n#define _CA_VALIDATE_SIZE(ns, name) _NS_VALIDATE_SIZE(ns, name)\n#define _CA_VALIDATE_ENUM(ns, name) _NS_VALIDATE_ENUM(ns, name)\n\n#include <objc/runtime.h>\n\n#define _CA_PRIVATE_CLS(symbol) (Private::Class::s_k##symbol)\n#define _CA_PRIVATE_SEL(accessor) (Private::Selector::s_k##accessor)\n\n#if defined(CA_PRIVATE_IMPLEMENTATION)\n\n#ifdef METALCPP_SYMBOL_VISIBILITY_HIDDEN\n#define _CA_PRIVATE_VISIBILITY __attribute__((visibility(\"hidden\")))\n#else\n#define _CA_PRIVATE_VISIBILITY __attribute__((visibility(\"default\")))\n#endif // METALCPP_SYMBOL_VISIBILITY_HIDDEN\n\n#define _CA_PRIVATE_IMPORT __attribute__((weak_import))\n\n#ifdef __OBJC__\n#define _CA_PRIVATE_OBJC_LOOKUP_CLASS(symbol) ((__bridge void*)objc_lookUpClass(#symbol))\n#define _CA_PRIVATE_OBJC_GET_PROTOCOL(symbol) ((__bridge void*)objc_getProtocol(#symbol))\n#else\n#define _CA_PRIVATE_OBJC_LOOKUP_CLASS(symbol) objc_lookUpClass(#symbol)\n#define _CA_PRIVATE_OBJC_GET_PROTOCOL(symbol) objc_getProtocol(#symbol)\n#endif // __OBJC__\n\n#define _CA_PRIVATE_DEF_CLS(symbol) void* s_k##symbol _CA_PRIVATE_VISIBILITY = _CA_PRIVATE_OBJC_LOOKUP_CLASS(symbol)\n#define _CA_PRIVATE_DEF_PRO(symbol) void* s_k##symbol _CA_PRIVATE_VISIBILITY = _CA_PRIVATE_OBJC_GET_PROTOCOL(symbol)\n#define _CA_PRIVATE_DEF_SEL(accessor, symbol) SEL s_k##accessor _CA_PRIVATE_VISIBILITY = sel_registerName(symbol)\n#define _CA_PRIVATE_DEF_STR(type, symbol)                \\\n    _CA_EXTERN type const CA##symbol _CA_PRIVATE_IMPORT; \\\n    type const                       CA::symbol = (nullptr != &CA##symbol) ? CA##symbol : nullptr\n\n#else\n\n#define _CA_PRIVATE_DEF_CLS(symbol) extern void* s_k##symbol\n#define _CA_PRIVATE_DEF_PRO(symbol) extern void* s_k##symbol\n#define _CA_PRIVATE_DEF_SEL(accessor, symbol) extern SEL s_k##accessor\n#define _CA_PRIVATE_DEF_STR(type, symbol) extern type const CA::symbol\n\n#endif // CA_PRIVATE_IMPLEMENTATION\n\nnamespace CA\n{\nnamespace Private\n{\n    namespace Class\n    {\n        _CA_PRIVATE_DEF_CLS(CAMetalLayer);\n    } // Class\n} // Private\n} // CA\n\nnamespace CA\n{\nnamespace Private\n{\n    namespace Protocol\n    {\n\n        _CA_PRIVATE_DEF_PRO(CAMetalDrawable);\n\n    } // Protocol\n} // Private\n} // CA\n\nnamespace CA\n{\nnamespace Private\n{\n    namespace Selector\n    {\n        _CA_PRIVATE_DEF_SEL(device,\n            \"device\");\n        _CA_PRIVATE_DEF_SEL(drawableSize,\n            \"drawableSize\");\n        _CA_PRIVATE_DEF_SEL(framebufferOnly,\n            \"framebufferOnly\");\n        _CA_PRIVATE_DEF_SEL(layer,\n            \"layer\");\n        _CA_PRIVATE_DEF_SEL(nextDrawable,\n            \"nextDrawable\");\n        _CA_PRIVATE_DEF_SEL(pixelFormat,\n            \"pixelFormat\");\n        _CA_PRIVATE_DEF_SEL(setDevice_,\n            \"setDevice:\");\n        _CA_PRIVATE_DEF_SEL(setDrawableSize_,\n            \"setDrawableSize:\");\n        _CA_PRIVATE_DEF_SEL(setFramebufferOnly_,\n            \"setFramebufferOnly:\");\n        _CA_PRIVATE_DEF_SEL(setPixelFormat_,\n            \"setPixelFormat:\");\n        _CA_PRIVATE_DEF_SEL(texture,\n            \"texture\");\n    } // Class\n} // Private\n} // CA\n\nnamespace CA\n{\nclass MetalDrawable : public NS::Referencing<MetalDrawable, MTL::Drawable>\n{\npublic:\n    class MetalLayer* layer() const;\n    MTL::Texture*     texture() const;\n};\n}\n\n_CA_INLINE CA::MetalLayer* CA::MetalDrawable::layer() const\n{\n    return Object::sendMessage<MetalLayer*>(this, _CA_PRIVATE_SEL(layer));\n}\n\n_CA_INLINE MTL::Texture* CA::MetalDrawable::texture() const\n{\n    return Object::sendMessage<MTL::Texture*>(this, _CA_PRIVATE_SEL(texture));\n}\n\n#include <CoreGraphics/CGGeometry.h>\n\nnamespace CA\n{\n\nclass MetalLayer : public NS::Referencing<MetalLayer>\n{\npublic:\n    static class MetalLayer* layer();\n\n    MTL::Device*             device() const;\n    void                     setDevice(MTL::Device* device);\n\n    MTL::PixelFormat         pixelFormat() const;\n    void                     setPixelFormat(MTL::PixelFormat pixelFormat);\n\n    bool                     framebufferOnly() const;\n    void                     setFramebufferOnly(bool framebufferOnly);\n\n    CGSize                   drawableSize() const;\n    void                     setDrawableSize(CGSize drawableSize);\n\n    class MetalDrawable*     nextDrawable();\n};\n} // namespace CA\n\n_CA_INLINE CA::MetalLayer* CA::MetalLayer::layer()\n{\n    return Object::sendMessage<CA::MetalLayer*>(_CA_PRIVATE_CLS(CAMetalLayer), _CA_PRIVATE_SEL(layer));\n}\n\n_CA_INLINE MTL::Device* CA::MetalLayer::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _CA_PRIVATE_SEL(device));\n}\n\n_CA_INLINE void CA::MetalLayer::setDevice(MTL::Device* device)\n{\n    return Object::sendMessage<void>(this, _CA_PRIVATE_SEL(setDevice_), device);\n}\n\n_CA_INLINE MTL::PixelFormat CA::MetalLayer::pixelFormat() const\n{\n    return Object::sendMessage<MTL::PixelFormat>(this,\n        _CA_PRIVATE_SEL(pixelFormat));\n}\n\n_CA_INLINE void CA::MetalLayer::setPixelFormat(MTL::PixelFormat pixelFormat)\n{\n    return Object::sendMessage<void>(this, _CA_PRIVATE_SEL(setPixelFormat_),\n        pixelFormat);\n}\n\n_CA_INLINE bool CA::MetalLayer::framebufferOnly() const\n{\n    return Object::sendMessage<bool>(this, _CA_PRIVATE_SEL(framebufferOnly));\n}\n\n_CA_INLINE void CA::MetalLayer::setFramebufferOnly(bool framebufferOnly)\n{\n    return Object::sendMessage<void>(this, _CA_PRIVATE_SEL(setFramebufferOnly_),\n        framebufferOnly);\n}\n\n_CA_INLINE CGSize CA::MetalLayer::drawableSize() const\n{\n    return Object::sendMessage<CGSize>(this, _CA_PRIVATE_SEL(drawableSize));\n}\n\n_CA_INLINE void CA::MetalLayer::setDrawableSize(CGSize drawableSize)\n{\n    return Object::sendMessage<void>(this, _CA_PRIVATE_SEL(setDrawableSize_),\n        drawableSize);\n}\n\n_CA_INLINE CA::MetalDrawable* CA::MetalLayer::nextDrawable()\n{\n    return Object::sendMessage<MetalDrawable*>(this,\n        _CA_PRIVATE_SEL(nextDrawable));\n}\n\n#pragma once\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, AttributeFormat) {\n    AttributeFormatInvalid = 0,\n    AttributeFormatUChar2 = 1,\n    AttributeFormatUChar3 = 2,\n    AttributeFormatUChar4 = 3,\n    AttributeFormatChar2 = 4,\n    AttributeFormatChar3 = 5,\n    AttributeFormatChar4 = 6,\n    AttributeFormatUChar2Normalized = 7,\n    AttributeFormatUChar3Normalized = 8,\n    AttributeFormatUChar4Normalized = 9,\n    AttributeFormatChar2Normalized = 10,\n    AttributeFormatChar3Normalized = 11,\n    AttributeFormatChar4Normalized = 12,\n    AttributeFormatUShort2 = 13,\n    AttributeFormatUShort3 = 14,\n    AttributeFormatUShort4 = 15,\n    AttributeFormatShort2 = 16,\n    AttributeFormatShort3 = 17,\n    AttributeFormatShort4 = 18,\n    AttributeFormatUShort2Normalized = 19,\n    AttributeFormatUShort3Normalized = 20,\n    AttributeFormatUShort4Normalized = 21,\n    AttributeFormatShort2Normalized = 22,\n    AttributeFormatShort3Normalized = 23,\n    AttributeFormatShort4Normalized = 24,\n    AttributeFormatHalf2 = 25,\n    AttributeFormatHalf3 = 26,\n    AttributeFormatHalf4 = 27,\n    AttributeFormatFloat = 28,\n    AttributeFormatFloat2 = 29,\n    AttributeFormatFloat3 = 30,\n    AttributeFormatFloat4 = 31,\n    AttributeFormatInt = 32,\n    AttributeFormatInt2 = 33,\n    AttributeFormatInt3 = 34,\n    AttributeFormatInt4 = 35,\n    AttributeFormatUInt = 36,\n    AttributeFormatUInt2 = 37,\n    AttributeFormatUInt3 = 38,\n    AttributeFormatUInt4 = 39,\n    AttributeFormatInt1010102Normalized = 40,\n    AttributeFormatUInt1010102Normalized = 41,\n    AttributeFormatUChar4Normalized_BGRA = 42,\n    AttributeFormatUChar = 45,\n    AttributeFormatChar = 46,\n    AttributeFormatUCharNormalized = 47,\n    AttributeFormatCharNormalized = 48,\n    AttributeFormatUShort = 49,\n    AttributeFormatShort = 50,\n    AttributeFormatUShortNormalized = 51,\n    AttributeFormatShortNormalized = 52,\n    AttributeFormatHalf = 53,\n    AttributeFormatFloatRG11B10 = 54,\n    AttributeFormatFloatRGB9E5 = 55,\n};\n\n_MTL_ENUM(NS::UInteger, IndexType) {\n    IndexTypeUInt16 = 0,\n    IndexTypeUInt32 = 1,\n};\n\n_MTL_ENUM(NS::UInteger, StepFunction) {\n    StepFunctionConstant = 0,\n    StepFunctionPerVertex = 1,\n    StepFunctionPerInstance = 2,\n    StepFunctionPerPatch = 3,\n    StepFunctionPerPatchControlPoint = 4,\n    StepFunctionThreadPositionInGridX = 5,\n    StepFunctionThreadPositionInGridY = 6,\n    StepFunctionThreadPositionInGridXIndexed = 7,\n    StepFunctionThreadPositionInGridYIndexed = 8,\n};\n\nclass BufferLayoutDescriptor : public NS::Copying<BufferLayoutDescriptor>\n{\npublic:\n    static class BufferLayoutDescriptor* alloc();\n\n    class BufferLayoutDescriptor*        init();\n\n    NS::UInteger                         stride() const;\n    void                                 setStride(NS::UInteger stride);\n\n    MTL::StepFunction                    stepFunction() const;\n    void                                 setStepFunction(MTL::StepFunction stepFunction);\n\n    NS::UInteger                         stepRate() const;\n    void                                 setStepRate(NS::UInteger stepRate);\n};\n\nclass BufferLayoutDescriptorArray : public NS::Referencing<BufferLayoutDescriptorArray>\n{\npublic:\n    static class BufferLayoutDescriptorArray* alloc();\n\n    class BufferLayoutDescriptorArray*        init();\n\n    class BufferLayoutDescriptor*             object(NS::UInteger index);\n\n    void                                      setObject(const class BufferLayoutDescriptor* bufferDesc, NS::UInteger index);\n};\n\nclass AttributeDescriptor : public NS::Copying<AttributeDescriptor>\n{\npublic:\n    static class AttributeDescriptor* alloc();\n\n    class AttributeDescriptor*        init();\n\n    MTL::AttributeFormat              format() const;\n    void                              setFormat(MTL::AttributeFormat format);\n\n    NS::UInteger                      offset() const;\n    void                              setOffset(NS::UInteger offset);\n\n    NS::UInteger                      bufferIndex() const;\n    void                              setBufferIndex(NS::UInteger bufferIndex);\n};\n\nclass AttributeDescriptorArray : public NS::Referencing<AttributeDescriptorArray>\n{\npublic:\n    static class AttributeDescriptorArray* alloc();\n\n    class AttributeDescriptorArray*        init();\n\n    class AttributeDescriptor*             object(NS::UInteger index);\n\n    void                                   setObject(const class AttributeDescriptor* attributeDesc, NS::UInteger index);\n};\n\nclass StageInputOutputDescriptor : public NS::Copying<StageInputOutputDescriptor>\n{\npublic:\n    static class StageInputOutputDescriptor* alloc();\n\n    class StageInputOutputDescriptor*        init();\n\n    static class StageInputOutputDescriptor* stageInputOutputDescriptor();\n\n    class BufferLayoutDescriptorArray*       layouts() const;\n\n    class AttributeDescriptorArray*          attributes() const;\n\n    MTL::IndexType                           indexType() const;\n    void                                     setIndexType(MTL::IndexType indexType);\n\n    NS::UInteger                             indexBufferIndex() const;\n    void                                     setIndexBufferIndex(NS::UInteger indexBufferIndex);\n\n    void                                     reset();\n};\n\n}\n\n_MTL_INLINE MTL::BufferLayoutDescriptor* MTL::BufferLayoutDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::BufferLayoutDescriptor>(_MTL_PRIVATE_CLS(MTLBufferLayoutDescriptor));\n}\n\n_MTL_INLINE MTL::BufferLayoutDescriptor* MTL::BufferLayoutDescriptor::init()\n{\n    return NS::Object::init<MTL::BufferLayoutDescriptor>();\n}\n\n_MTL_INLINE NS::UInteger MTL::BufferLayoutDescriptor::stride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(stride));\n}\n\n_MTL_INLINE void MTL::BufferLayoutDescriptor::setStride(NS::UInteger stride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStride_), stride);\n}\n\n_MTL_INLINE MTL::StepFunction MTL::BufferLayoutDescriptor::stepFunction() const\n{\n    return Object::sendMessage<MTL::StepFunction>(this, _MTL_PRIVATE_SEL(stepFunction));\n}\n\n_MTL_INLINE void MTL::BufferLayoutDescriptor::setStepFunction(MTL::StepFunction stepFunction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStepFunction_), stepFunction);\n}\n\n_MTL_INLINE NS::UInteger MTL::BufferLayoutDescriptor::stepRate() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(stepRate));\n}\n\n_MTL_INLINE void MTL::BufferLayoutDescriptor::setStepRate(NS::UInteger stepRate)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStepRate_), stepRate);\n}\n\n_MTL_INLINE MTL::BufferLayoutDescriptorArray* MTL::BufferLayoutDescriptorArray::alloc()\n{\n    return NS::Object::alloc<MTL::BufferLayoutDescriptorArray>(_MTL_PRIVATE_CLS(MTLBufferLayoutDescriptorArray));\n}\n\n_MTL_INLINE MTL::BufferLayoutDescriptorArray* MTL::BufferLayoutDescriptorArray::init()\n{\n    return NS::Object::init<MTL::BufferLayoutDescriptorArray>();\n}\n\n_MTL_INLINE MTL::BufferLayoutDescriptor* MTL::BufferLayoutDescriptorArray::object(NS::UInteger index)\n{\n    return Object::sendMessage<MTL::BufferLayoutDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), index);\n}\n\n_MTL_INLINE void MTL::BufferLayoutDescriptorArray::setObject(const MTL::BufferLayoutDescriptor* bufferDesc, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), bufferDesc, index);\n}\n\n_MTL_INLINE MTL::AttributeDescriptor* MTL::AttributeDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::AttributeDescriptor>(_MTL_PRIVATE_CLS(MTLAttributeDescriptor));\n}\n\n_MTL_INLINE MTL::AttributeDescriptor* MTL::AttributeDescriptor::init()\n{\n    return NS::Object::init<MTL::AttributeDescriptor>();\n}\n\n_MTL_INLINE MTL::AttributeFormat MTL::AttributeDescriptor::format() const\n{\n    return Object::sendMessage<MTL::AttributeFormat>(this, _MTL_PRIVATE_SEL(format));\n}\n\n_MTL_INLINE void MTL::AttributeDescriptor::setFormat(MTL::AttributeFormat format)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFormat_), format);\n}\n\n_MTL_INLINE NS::UInteger MTL::AttributeDescriptor::offset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(offset));\n}\n\n_MTL_INLINE void MTL::AttributeDescriptor::setOffset(NS::UInteger offset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setOffset_), offset);\n}\n\n_MTL_INLINE NS::UInteger MTL::AttributeDescriptor::bufferIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(bufferIndex));\n}\n\n_MTL_INLINE void MTL::AttributeDescriptor::setBufferIndex(NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBufferIndex_), bufferIndex);\n}\n\n_MTL_INLINE MTL::AttributeDescriptorArray* MTL::AttributeDescriptorArray::alloc()\n{\n    return NS::Object::alloc<MTL::AttributeDescriptorArray>(_MTL_PRIVATE_CLS(MTLAttributeDescriptorArray));\n}\n\n_MTL_INLINE MTL::AttributeDescriptorArray* MTL::AttributeDescriptorArray::init()\n{\n    return NS::Object::init<MTL::AttributeDescriptorArray>();\n}\n\n_MTL_INLINE MTL::AttributeDescriptor* MTL::AttributeDescriptorArray::object(NS::UInteger index)\n{\n    return Object::sendMessage<MTL::AttributeDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), index);\n}\n\n_MTL_INLINE void MTL::AttributeDescriptorArray::setObject(const MTL::AttributeDescriptor* attributeDesc, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attributeDesc, index);\n}\n\n_MTL_INLINE MTL::StageInputOutputDescriptor* MTL::StageInputOutputDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::StageInputOutputDescriptor>(_MTL_PRIVATE_CLS(MTLStageInputOutputDescriptor));\n}\n\n_MTL_INLINE MTL::StageInputOutputDescriptor* MTL::StageInputOutputDescriptor::init()\n{\n    return NS::Object::init<MTL::StageInputOutputDescriptor>();\n}\n\n_MTL_INLINE MTL::StageInputOutputDescriptor* MTL::StageInputOutputDescriptor::stageInputOutputDescriptor()\n{\n    return Object::sendMessage<MTL::StageInputOutputDescriptor*>(_MTL_PRIVATE_CLS(MTLStageInputOutputDescriptor), _MTL_PRIVATE_SEL(stageInputOutputDescriptor));\n}\n\n_MTL_INLINE MTL::BufferLayoutDescriptorArray* MTL::StageInputOutputDescriptor::layouts() const\n{\n    return Object::sendMessage<MTL::BufferLayoutDescriptorArray*>(this, _MTL_PRIVATE_SEL(layouts));\n}\n\n_MTL_INLINE MTL::AttributeDescriptorArray* MTL::StageInputOutputDescriptor::attributes() const\n{\n    return Object::sendMessage<MTL::AttributeDescriptorArray*>(this, _MTL_PRIVATE_SEL(attributes));\n}\n\n_MTL_INLINE MTL::IndexType MTL::StageInputOutputDescriptor::indexType() const\n{\n    return Object::sendMessage<MTL::IndexType>(this, _MTL_PRIVATE_SEL(indexType));\n}\n\n_MTL_INLINE void MTL::StageInputOutputDescriptor::setIndexType(MTL::IndexType indexType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexType_), indexType);\n}\n\n_MTL_INLINE NS::UInteger MTL::StageInputOutputDescriptor::indexBufferIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(indexBufferIndex));\n}\n\n_MTL_INLINE void MTL::StageInputOutputDescriptor::setIndexBufferIndex(NS::UInteger indexBufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexBufferIndex_), indexBufferIndex);\n}\n\n_MTL_INLINE void MTL::StageInputOutputDescriptor::reset()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(reset));\n}\n\nnamespace MTL\n{\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wnested-anon-types\"\nstruct PackedFloat3\n{\n    PackedFloat3();\n    PackedFloat3(float x, float y, float z);\n\n    float& operator[](int idx);\n    float  operator[](int idx) const;\n\n    union\n    {\n        struct\n        {\n            float x;\n            float y;\n            float z;\n        };\n\n        float elements[3];\n    };\n} _MTL_PACKED;\n#pragma clang diagnostic pop\n\nstruct PackedFloat4x3\n{\n    PackedFloat4x3();\n    PackedFloat4x3(const PackedFloat3& col0, const PackedFloat3& col1, const PackedFloat3& col2, const PackedFloat3& col3);\n\n    PackedFloat3&       operator[](int idx);\n    const PackedFloat3& operator[](int idx) const;\n\n    PackedFloat3        columns[4];\n} _MTL_PACKED;\n\nstruct AxisAlignedBoundingBox\n{\n    AxisAlignedBoundingBox();\n    AxisAlignedBoundingBox(PackedFloat3 p);\n    AxisAlignedBoundingBox(PackedFloat3 min, PackedFloat3 max);\n\n    PackedFloat3 min;\n    PackedFloat3 max;\n} _MTL_PACKED;\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wnested-anon-types\"\nstruct PackedFloatQuaternion\n{\n    PackedFloatQuaternion();\n    PackedFloatQuaternion(float x, float y, float z, float w);\n\n    float&       operator[](int idx);\n    const float& operator[](int idx) const;\n\n    union \n    {\n        struct\n        {\n            float x;\n            float y;\n            float z;\n            float w;\n        };\n\n        float elements[4];\n    };\n    \n} _MTL_PACKED;\n#pragma clang diagnostic pop\n\nstruct ComponentTransform\n{\n    PackedFloat3          scale;\n    PackedFloat3          shear;\n    PackedFloat3          pivot;\n    PackedFloatQuaternion rotation;\n    PackedFloat3          translation;\n} _MTL_PACKED;\n\n}\n\n_MTL_INLINE MTL::PackedFloat3::PackedFloat3()\n    : x(0.0f)\n    , y(0.0f)\n    , z(0.0f)\n{\n}\n\n_MTL_INLINE MTL::PackedFloat3::PackedFloat3(float _x, float _y, float _z)\n    : x(_x)\n    , y(_y)\n    , z(_z)\n{\n}\n\n_MTL_INLINE float& MTL::PackedFloat3::operator[](int idx)\n{\n    return elements[idx];\n}\n\n_MTL_INLINE float MTL::PackedFloat3::operator[](int idx) const\n{\n    return elements[idx];\n}\n\n_MTL_INLINE MTL::PackedFloat4x3::PackedFloat4x3()\n{\n    columns[0] = PackedFloat3(0.0f, 0.0f, 0.0f);\n    columns[1] = PackedFloat3(0.0f, 0.0f, 0.0f);\n    columns[2] = PackedFloat3(0.0f, 0.0f, 0.0f);\n    columns[3] = PackedFloat3(0.0f, 0.0f, 0.0f);\n}\n\n_MTL_INLINE MTL::PackedFloat4x3::PackedFloat4x3(const PackedFloat3& col0, const PackedFloat3& col1, const PackedFloat3& col2, const PackedFloat3& col3)\n{\n    columns[0] = col0;\n    columns[1] = col1;\n    columns[2] = col2;\n    columns[3] = col3;\n}\n\n_MTL_INLINE MTL::PackedFloat3& MTL::PackedFloat4x3::operator[](int idx)\n{\n    return columns[idx];\n}\n\n_MTL_INLINE const MTL::PackedFloat3& MTL::PackedFloat4x3::operator[](int idx) const\n{\n    return columns[idx];\n}\n\n_MTL_INLINE MTL::AxisAlignedBoundingBox::AxisAlignedBoundingBox()\n    : min(INFINITY, INFINITY, INFINITY)\n    , max(-INFINITY, -INFINITY, -INFINITY)\n{\n}\n\n_MTL_INLINE MTL::AxisAlignedBoundingBox::AxisAlignedBoundingBox(PackedFloat3 p)\n    : min(p)\n    , max(p)\n{\n}\n\n_MTL_INLINE MTL::AxisAlignedBoundingBox::AxisAlignedBoundingBox(PackedFloat3 _min, PackedFloat3 _max)\n    : min(_min)\n    , max(_max)\n{\n}\n\n_MTL_INLINE MTL::PackedFloatQuaternion::PackedFloatQuaternion()\n    : x(0.0f)\n    , y(0.0f)\n    , z(0.0f)\n    , w(0.0f)\n{\n}\n\n_MTL_INLINE MTL::PackedFloatQuaternion::PackedFloatQuaternion(float x, float y, float z, float w)\n    : x(x)\n    , y(y)\n    , z(z)\n    , w(w)\n{\n}\n\n_MTL_INLINE float& MTL::PackedFloatQuaternion::operator[](int idx)\n{\n    return elements[idx];\n}\n\n_MTL_INLINE const float& MTL::PackedFloatQuaternion::operator[](int idx) const\n{\n    return elements[idx];\n}\n\nnamespace MTL\n{\n_MTL_OPTIONS(NS::UInteger, AccelerationStructureUsage) {\n    AccelerationStructureUsageNone = 0,\n    AccelerationStructureUsageRefit = 1,\n    AccelerationStructureUsagePreferFastBuild = 2,\n    AccelerationStructureUsageExtendedLimits = 4,\n};\n\n_MTL_OPTIONS(uint32_t, AccelerationStructureInstanceOptions) {\n    AccelerationStructureInstanceOptionNone = 0,\n    AccelerationStructureInstanceOptionDisableTriangleCulling = 1,\n    AccelerationStructureInstanceOptionTriangleFrontFacingWindingCounterClockwise = 2,\n    AccelerationStructureInstanceOptionOpaque = 4,\n    AccelerationStructureInstanceOptionNonOpaque = 8,\n};\n\n_MTL_ENUM(NS::Integer, MatrixLayout) {\n    MatrixLayoutColumnMajor = 0,\n    MatrixLayoutRowMajor = 1,\n};\n\nclass AccelerationStructureDescriptor : public NS::Copying<AccelerationStructureDescriptor>\n{\npublic:\n    static class AccelerationStructureDescriptor* alloc();\n\n    class AccelerationStructureDescriptor*        init();\n\n    MTL::AccelerationStructureUsage               usage() const;\n    void                                          setUsage(MTL::AccelerationStructureUsage usage);\n};\n\nclass AccelerationStructureGeometryDescriptor : public NS::Copying<AccelerationStructureGeometryDescriptor>\n{\npublic:\n    static class AccelerationStructureGeometryDescriptor* alloc();\n\n    class AccelerationStructureGeometryDescriptor*        init();\n\n    NS::UInteger                                          intersectionFunctionTableOffset() const;\n    void                                                  setIntersectionFunctionTableOffset(NS::UInteger intersectionFunctionTableOffset);\n\n    bool                                                  opaque() const;\n    void                                                  setOpaque(bool opaque);\n\n    bool                                                  allowDuplicateIntersectionFunctionInvocation() const;\n    void                                                  setAllowDuplicateIntersectionFunctionInvocation(bool allowDuplicateIntersectionFunctionInvocation);\n\n    NS::String*                                           label() const;\n    void                                                  setLabel(const NS::String* label);\n\n    class Buffer*                                         primitiveDataBuffer() const;\n    void                                                  setPrimitiveDataBuffer(const class Buffer* primitiveDataBuffer);\n\n    NS::UInteger                                          primitiveDataBufferOffset() const;\n    void                                                  setPrimitiveDataBufferOffset(NS::UInteger primitiveDataBufferOffset);\n\n    NS::UInteger                                          primitiveDataStride() const;\n    void                                                  setPrimitiveDataStride(NS::UInteger primitiveDataStride);\n\n    NS::UInteger                                          primitiveDataElementSize() const;\n    void                                                  setPrimitiveDataElementSize(NS::UInteger primitiveDataElementSize);\n};\n\n_MTL_ENUM(uint32_t, MotionBorderMode) {\n    MotionBorderModeClamp = 0,\n    MotionBorderModeVanish = 1,\n};\n\nclass PrimitiveAccelerationStructureDescriptor : public NS::Copying<PrimitiveAccelerationStructureDescriptor, MTL::AccelerationStructureDescriptor>\n{\npublic:\n    static class PrimitiveAccelerationStructureDescriptor* alloc();\n\n    class PrimitiveAccelerationStructureDescriptor*        init();\n\n    NS::Array*                                             geometryDescriptors() const;\n    void                                                   setGeometryDescriptors(const NS::Array* geometryDescriptors);\n\n    MTL::MotionBorderMode                                  motionStartBorderMode() const;\n    void                                                   setMotionStartBorderMode(MTL::MotionBorderMode motionStartBorderMode);\n\n    MTL::MotionBorderMode                                  motionEndBorderMode() const;\n    void                                                   setMotionEndBorderMode(MTL::MotionBorderMode motionEndBorderMode);\n\n    float                                                  motionStartTime() const;\n    void                                                   setMotionStartTime(float motionStartTime);\n\n    float                                                  motionEndTime() const;\n    void                                                   setMotionEndTime(float motionEndTime);\n\n    NS::UInteger                                           motionKeyframeCount() const;\n    void                                                   setMotionKeyframeCount(NS::UInteger motionKeyframeCount);\n\n    static MTL::PrimitiveAccelerationStructureDescriptor*  descriptor();\n};\n\nclass AccelerationStructureTriangleGeometryDescriptor : public NS::Copying<AccelerationStructureTriangleGeometryDescriptor, MTL::AccelerationStructureGeometryDescriptor>\n{\npublic:\n    static class AccelerationStructureTriangleGeometryDescriptor* alloc();\n\n    class AccelerationStructureTriangleGeometryDescriptor*        init();\n\n    class Buffer*                                                 vertexBuffer() const;\n    void                                                          setVertexBuffer(const class Buffer* vertexBuffer);\n\n    NS::UInteger                                                  vertexBufferOffset() const;\n    void                                                          setVertexBufferOffset(NS::UInteger vertexBufferOffset);\n\n    MTL::AttributeFormat                                          vertexFormat() const;\n    void                                                          setVertexFormat(MTL::AttributeFormat vertexFormat);\n\n    NS::UInteger                                                  vertexStride() const;\n    void                                                          setVertexStride(NS::UInteger vertexStride);\n\n    class Buffer*                                                 indexBuffer() const;\n    void                                                          setIndexBuffer(const class Buffer* indexBuffer);\n\n    NS::UInteger                                                  indexBufferOffset() const;\n    void                                                          setIndexBufferOffset(NS::UInteger indexBufferOffset);\n\n    MTL::IndexType                                                indexType() const;\n    void                                                          setIndexType(MTL::IndexType indexType);\n\n    NS::UInteger                                                  triangleCount() const;\n    void                                                          setTriangleCount(NS::UInteger triangleCount);\n\n    class Buffer*                                                 transformationMatrixBuffer() const;\n    void                                                          setTransformationMatrixBuffer(const class Buffer* transformationMatrixBuffer);\n\n    NS::UInteger                                                  transformationMatrixBufferOffset() const;\n    void                                                          setTransformationMatrixBufferOffset(NS::UInteger transformationMatrixBufferOffset);\n\n    MTL::MatrixLayout                                             transformationMatrixLayout() const;\n    void                                                          setTransformationMatrixLayout(MTL::MatrixLayout transformationMatrixLayout);\n\n    static MTL::AccelerationStructureTriangleGeometryDescriptor*  descriptor();\n};\n\nclass AccelerationStructureBoundingBoxGeometryDescriptor : public NS::Copying<AccelerationStructureBoundingBoxGeometryDescriptor, MTL::AccelerationStructureGeometryDescriptor>\n{\npublic:\n    static class AccelerationStructureBoundingBoxGeometryDescriptor* alloc();\n\n    class AccelerationStructureBoundingBoxGeometryDescriptor*        init();\n\n    class Buffer*                                                    boundingBoxBuffer() const;\n    void                                                             setBoundingBoxBuffer(const class Buffer* boundingBoxBuffer);\n\n    NS::UInteger                                                     boundingBoxBufferOffset() const;\n    void                                                             setBoundingBoxBufferOffset(NS::UInteger boundingBoxBufferOffset);\n\n    NS::UInteger                                                     boundingBoxStride() const;\n    void                                                             setBoundingBoxStride(NS::UInteger boundingBoxStride);\n\n    NS::UInteger                                                     boundingBoxCount() const;\n    void                                                             setBoundingBoxCount(NS::UInteger boundingBoxCount);\n\n    static MTL::AccelerationStructureBoundingBoxGeometryDescriptor*  descriptor();\n};\n\nclass MotionKeyframeData : public NS::Referencing<MotionKeyframeData>\n{\npublic:\n    static class MotionKeyframeData* alloc();\n\n    class MotionKeyframeData*        init();\n\n    class Buffer*                    buffer() const;\n    void                             setBuffer(const class Buffer* buffer);\n\n    NS::UInteger                     offset() const;\n    void                             setOffset(NS::UInteger offset);\n\n    static MTL::MotionKeyframeData*  data();\n};\n\nclass AccelerationStructureMotionTriangleGeometryDescriptor : public NS::Copying<AccelerationStructureMotionTriangleGeometryDescriptor, MTL::AccelerationStructureGeometryDescriptor>\n{\npublic:\n    static class AccelerationStructureMotionTriangleGeometryDescriptor* alloc();\n\n    class AccelerationStructureMotionTriangleGeometryDescriptor*        init();\n\n    NS::Array*                                                          vertexBuffers() const;\n    void                                                                setVertexBuffers(const NS::Array* vertexBuffers);\n\n    MTL::AttributeFormat                                                vertexFormat() const;\n    void                                                                setVertexFormat(MTL::AttributeFormat vertexFormat);\n\n    NS::UInteger                                                        vertexStride() const;\n    void                                                                setVertexStride(NS::UInteger vertexStride);\n\n    class Buffer*                                                       indexBuffer() const;\n    void                                                                setIndexBuffer(const class Buffer* indexBuffer);\n\n    NS::UInteger                                                        indexBufferOffset() const;\n    void                                                                setIndexBufferOffset(NS::UInteger indexBufferOffset);\n\n    MTL::IndexType                                                      indexType() const;\n    void                                                                setIndexType(MTL::IndexType indexType);\n\n    NS::UInteger                                                        triangleCount() const;\n    void                                                                setTriangleCount(NS::UInteger triangleCount);\n\n    class Buffer*                                                       transformationMatrixBuffer() const;\n    void                                                                setTransformationMatrixBuffer(const class Buffer* transformationMatrixBuffer);\n\n    NS::UInteger                                                        transformationMatrixBufferOffset() const;\n    void                                                                setTransformationMatrixBufferOffset(NS::UInteger transformationMatrixBufferOffset);\n\n    MTL::MatrixLayout                                                   transformationMatrixLayout() const;\n    void                                                                setTransformationMatrixLayout(MTL::MatrixLayout transformationMatrixLayout);\n\n    static MTL::AccelerationStructureMotionTriangleGeometryDescriptor*  descriptor();\n};\n\nclass AccelerationStructureMotionBoundingBoxGeometryDescriptor : public NS::Copying<AccelerationStructureMotionBoundingBoxGeometryDescriptor, MTL::AccelerationStructureGeometryDescriptor>\n{\npublic:\n    static class AccelerationStructureMotionBoundingBoxGeometryDescriptor* alloc();\n\n    class AccelerationStructureMotionBoundingBoxGeometryDescriptor*        init();\n\n    NS::Array*                                                             boundingBoxBuffers() const;\n    void                                                                   setBoundingBoxBuffers(const NS::Array* boundingBoxBuffers);\n\n    NS::UInteger                                                           boundingBoxStride() const;\n    void                                                                   setBoundingBoxStride(NS::UInteger boundingBoxStride);\n\n    NS::UInteger                                                           boundingBoxCount() const;\n    void                                                                   setBoundingBoxCount(NS::UInteger boundingBoxCount);\n\n    static MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor*  descriptor();\n};\n\n_MTL_ENUM(NS::Integer, CurveType) {\n    CurveTypeRound = 0,\n    CurveTypeFlat = 1,\n};\n\n_MTL_ENUM(NS::Integer, CurveBasis) {\n    CurveBasisBSpline = 0,\n    CurveBasisCatmullRom = 1,\n    CurveBasisLinear = 2,\n    CurveBasisBezier = 3,\n};\n\n_MTL_ENUM(NS::Integer, CurveEndCaps) {\n    CurveEndCapsNone = 0,\n    CurveEndCapsDisk = 1,\n    CurveEndCapsSphere = 2,\n};\n\nclass AccelerationStructureCurveGeometryDescriptor : public NS::Copying<AccelerationStructureCurveGeometryDescriptor, MTL::AccelerationStructureGeometryDescriptor>\n{\npublic:\n    static class AccelerationStructureCurveGeometryDescriptor* alloc();\n\n    class AccelerationStructureCurveGeometryDescriptor*        init();\n\n    class Buffer*                                              controlPointBuffer() const;\n    void                                                       setControlPointBuffer(const class Buffer* controlPointBuffer);\n\n    NS::UInteger                                               controlPointBufferOffset() const;\n    void                                                       setControlPointBufferOffset(NS::UInteger controlPointBufferOffset);\n\n    NS::UInteger                                               controlPointCount() const;\n    void                                                       setControlPointCount(NS::UInteger controlPointCount);\n\n    NS::UInteger                                               controlPointStride() const;\n    void                                                       setControlPointStride(NS::UInteger controlPointStride);\n\n    MTL::AttributeFormat                                       controlPointFormat() const;\n    void                                                       setControlPointFormat(MTL::AttributeFormat controlPointFormat);\n\n    class Buffer*                                              radiusBuffer() const;\n    void                                                       setRadiusBuffer(const class Buffer* radiusBuffer);\n\n    NS::UInteger                                               radiusBufferOffset() const;\n    void                                                       setRadiusBufferOffset(NS::UInteger radiusBufferOffset);\n\n    MTL::AttributeFormat                                       radiusFormat() const;\n    void                                                       setRadiusFormat(MTL::AttributeFormat radiusFormat);\n\n    NS::UInteger                                               radiusStride() const;\n    void                                                       setRadiusStride(NS::UInteger radiusStride);\n\n    class Buffer*                                              indexBuffer() const;\n    void                                                       setIndexBuffer(const class Buffer* indexBuffer);\n\n    NS::UInteger                                               indexBufferOffset() const;\n    void                                                       setIndexBufferOffset(NS::UInteger indexBufferOffset);\n\n    MTL::IndexType                                             indexType() const;\n    void                                                       setIndexType(MTL::IndexType indexType);\n\n    NS::UInteger                                               segmentCount() const;\n    void                                                       setSegmentCount(NS::UInteger segmentCount);\n\n    NS::UInteger                                               segmentControlPointCount() const;\n    void                                                       setSegmentControlPointCount(NS::UInteger segmentControlPointCount);\n\n    MTL::CurveType                                             curveType() const;\n    void                                                       setCurveType(MTL::CurveType curveType);\n\n    MTL::CurveBasis                                            curveBasis() const;\n    void                                                       setCurveBasis(MTL::CurveBasis curveBasis);\n\n    MTL::CurveEndCaps                                          curveEndCaps() const;\n    void                                                       setCurveEndCaps(MTL::CurveEndCaps curveEndCaps);\n\n    static MTL::AccelerationStructureCurveGeometryDescriptor*  descriptor();\n};\n\nclass AccelerationStructureMotionCurveGeometryDescriptor : public NS::Copying<AccelerationStructureMotionCurveGeometryDescriptor, MTL::AccelerationStructureGeometryDescriptor>\n{\npublic:\n    static class AccelerationStructureMotionCurveGeometryDescriptor* alloc();\n\n    class AccelerationStructureMotionCurveGeometryDescriptor*        init();\n\n    NS::Array*                                                       controlPointBuffers() const;\n    void                                                             setControlPointBuffers(const NS::Array* controlPointBuffers);\n\n    NS::UInteger                                                     controlPointCount() const;\n    void                                                             setControlPointCount(NS::UInteger controlPointCount);\n\n    NS::UInteger                                                     controlPointStride() const;\n    void                                                             setControlPointStride(NS::UInteger controlPointStride);\n\n    MTL::AttributeFormat                                             controlPointFormat() const;\n    void                                                             setControlPointFormat(MTL::AttributeFormat controlPointFormat);\n\n    NS::Array*                                                       radiusBuffers() const;\n    void                                                             setRadiusBuffers(const NS::Array* radiusBuffers);\n\n    MTL::AttributeFormat                                             radiusFormat() const;\n    void                                                             setRadiusFormat(MTL::AttributeFormat radiusFormat);\n\n    NS::UInteger                                                     radiusStride() const;\n    void                                                             setRadiusStride(NS::UInteger radiusStride);\n\n    class Buffer*                                                    indexBuffer() const;\n    void                                                             setIndexBuffer(const class Buffer* indexBuffer);\n\n    NS::UInteger                                                     indexBufferOffset() const;\n    void                                                             setIndexBufferOffset(NS::UInteger indexBufferOffset);\n\n    MTL::IndexType                                                   indexType() const;\n    void                                                             setIndexType(MTL::IndexType indexType);\n\n    NS::UInteger                                                     segmentCount() const;\n    void                                                             setSegmentCount(NS::UInteger segmentCount);\n\n    NS::UInteger                                                     segmentControlPointCount() const;\n    void                                                             setSegmentControlPointCount(NS::UInteger segmentControlPointCount);\n\n    MTL::CurveType                                                   curveType() const;\n    void                                                             setCurveType(MTL::CurveType curveType);\n\n    MTL::CurveBasis                                                  curveBasis() const;\n    void                                                             setCurveBasis(MTL::CurveBasis curveBasis);\n\n    MTL::CurveEndCaps                                                curveEndCaps() const;\n    void                                                             setCurveEndCaps(MTL::CurveEndCaps curveEndCaps);\n\n    static MTL::AccelerationStructureMotionCurveGeometryDescriptor*  descriptor();\n};\n\nstruct AccelerationStructureInstanceDescriptor\n{\n    MTL::PackedFloat4x3                       transformationMatrix;\n    MTL::AccelerationStructureInstanceOptions options;\n    uint32_t                                  mask;\n    uint32_t                                  intersectionFunctionTableOffset;\n    uint32_t                                  accelerationStructureIndex;\n} _MTL_PACKED;\n\nstruct AccelerationStructureUserIDInstanceDescriptor\n{\n    MTL::PackedFloat4x3                       transformationMatrix;\n    MTL::AccelerationStructureInstanceOptions options;\n    uint32_t                                  mask;\n    uint32_t                                  intersectionFunctionTableOffset;\n    uint32_t                                  accelerationStructureIndex;\n    uint32_t                                  userID;\n} _MTL_PACKED;\n\n_MTL_ENUM(NS::UInteger, AccelerationStructureInstanceDescriptorType) {\n    AccelerationStructureInstanceDescriptorTypeDefault = 0,\n    AccelerationStructureInstanceDescriptorTypeUserID = 1,\n    AccelerationStructureInstanceDescriptorTypeMotion = 2,\n    AccelerationStructureInstanceDescriptorTypeIndirect = 3,\n    AccelerationStructureInstanceDescriptorTypeIndirectMotion = 4,\n};\n\nstruct AccelerationStructureMotionInstanceDescriptor\n{\n    MTL::AccelerationStructureInstanceOptions options;\n    uint32_t                                  mask;\n    uint32_t                                  intersectionFunctionTableOffset;\n    uint32_t                                  accelerationStructureIndex;\n    uint32_t                                  userID;\n    uint32_t                                  motionTransformsStartIndex;\n    uint32_t                                  motionTransformsCount;\n    MTL::MotionBorderMode                     motionStartBorderMode;\n    MTL::MotionBorderMode                     motionEndBorderMode;\n    float                                     motionStartTime;\n    float                                     motionEndTime;\n} _MTL_PACKED;\n\nstruct IndirectAccelerationStructureInstanceDescriptor\n{\n    MTL::PackedFloat4x3                       transformationMatrix;\n    MTL::AccelerationStructureInstanceOptions options;\n    uint32_t                                  mask;\n    uint32_t                                  intersectionFunctionTableOffset;\n    uint32_t                                  userID;\n    MTL::ResourceID                           accelerationStructureID;\n} _MTL_PACKED;\n\nstruct IndirectAccelerationStructureMotionInstanceDescriptor\n{\n    MTL::AccelerationStructureInstanceOptions options;\n    uint32_t                                  mask;\n    uint32_t                                  intersectionFunctionTableOffset;\n    uint32_t                                  userID;\n    MTL::ResourceID                           accelerationStructureID;\n    uint32_t                                  motionTransformsStartIndex;\n    uint32_t                                  motionTransformsCount;\n    MTL::MotionBorderMode                     motionStartBorderMode;\n    MTL::MotionBorderMode                     motionEndBorderMode;\n    float                                     motionStartTime;\n    float                                     motionEndTime;\n} _MTL_PACKED;\n\n_MTL_ENUM(NS::Integer, TransformType) {\n    TransformTypePackedFloat4x3 = 0,\n    TransformTypeComponent = 1,\n};\n\nclass InstanceAccelerationStructureDescriptor : public NS::Copying<InstanceAccelerationStructureDescriptor, MTL::AccelerationStructureDescriptor>\n{\npublic:\n    static class InstanceAccelerationStructureDescriptor* alloc();\n\n    class InstanceAccelerationStructureDescriptor*        init();\n\n    class Buffer*                                         instanceDescriptorBuffer() const;\n    void                                                  setInstanceDescriptorBuffer(const class Buffer* instanceDescriptorBuffer);\n\n    NS::UInteger                                          instanceDescriptorBufferOffset() const;\n    void                                                  setInstanceDescriptorBufferOffset(NS::UInteger instanceDescriptorBufferOffset);\n\n    NS::UInteger                                          instanceDescriptorStride() const;\n    void                                                  setInstanceDescriptorStride(NS::UInteger instanceDescriptorStride);\n\n    NS::UInteger                                          instanceCount() const;\n    void                                                  setInstanceCount(NS::UInteger instanceCount);\n\n    NS::Array*                                            instancedAccelerationStructures() const;\n    void                                                  setInstancedAccelerationStructures(const NS::Array* instancedAccelerationStructures);\n\n    MTL::AccelerationStructureInstanceDescriptorType      instanceDescriptorType() const;\n    void                                                  setInstanceDescriptorType(MTL::AccelerationStructureInstanceDescriptorType instanceDescriptorType);\n\n    class Buffer*                                         motionTransformBuffer() const;\n    void                                                  setMotionTransformBuffer(const class Buffer* motionTransformBuffer);\n\n    NS::UInteger                                          motionTransformBufferOffset() const;\n    void                                                  setMotionTransformBufferOffset(NS::UInteger motionTransformBufferOffset);\n\n    NS::UInteger                                          motionTransformCount() const;\n    void                                                  setMotionTransformCount(NS::UInteger motionTransformCount);\n\n    MTL::MatrixLayout                                     instanceTransformationMatrixLayout() const;\n    void                                                  setInstanceTransformationMatrixLayout(MTL::MatrixLayout instanceTransformationMatrixLayout);\n\n    MTL::TransformType                                    motionTransformType() const;\n    void                                                  setMotionTransformType(MTL::TransformType motionTransformType);\n\n    NS::UInteger                                          motionTransformStride() const;\n    void                                                  setMotionTransformStride(NS::UInteger motionTransformStride);\n\n    static MTL::InstanceAccelerationStructureDescriptor*  descriptor();\n};\n\nclass IndirectInstanceAccelerationStructureDescriptor : public NS::Copying<IndirectInstanceAccelerationStructureDescriptor, MTL::AccelerationStructureDescriptor>\n{\npublic:\n    static class IndirectInstanceAccelerationStructureDescriptor* alloc();\n\n    class IndirectInstanceAccelerationStructureDescriptor*        init();\n\n    class Buffer*                                                 instanceDescriptorBuffer() const;\n    void                                                          setInstanceDescriptorBuffer(const class Buffer* instanceDescriptorBuffer);\n\n    NS::UInteger                                                  instanceDescriptorBufferOffset() const;\n    void                                                          setInstanceDescriptorBufferOffset(NS::UInteger instanceDescriptorBufferOffset);\n\n    NS::UInteger                                                  instanceDescriptorStride() const;\n    void                                                          setInstanceDescriptorStride(NS::UInteger instanceDescriptorStride);\n\n    NS::UInteger                                                  maxInstanceCount() const;\n    void                                                          setMaxInstanceCount(NS::UInteger maxInstanceCount);\n\n    class Buffer*                                                 instanceCountBuffer() const;\n    void                                                          setInstanceCountBuffer(const class Buffer* instanceCountBuffer);\n\n    NS::UInteger                                                  instanceCountBufferOffset() const;\n    void                                                          setInstanceCountBufferOffset(NS::UInteger instanceCountBufferOffset);\n\n    MTL::AccelerationStructureInstanceDescriptorType              instanceDescriptorType() const;\n    void                                                          setInstanceDescriptorType(MTL::AccelerationStructureInstanceDescriptorType instanceDescriptorType);\n\n    class Buffer*                                                 motionTransformBuffer() const;\n    void                                                          setMotionTransformBuffer(const class Buffer* motionTransformBuffer);\n\n    NS::UInteger                                                  motionTransformBufferOffset() const;\n    void                                                          setMotionTransformBufferOffset(NS::UInteger motionTransformBufferOffset);\n\n    NS::UInteger                                                  maxMotionTransformCount() const;\n    void                                                          setMaxMotionTransformCount(NS::UInteger maxMotionTransformCount);\n\n    class Buffer*                                                 motionTransformCountBuffer() const;\n    void                                                          setMotionTransformCountBuffer(const class Buffer* motionTransformCountBuffer);\n\n    NS::UInteger                                                  motionTransformCountBufferOffset() const;\n    void                                                          setMotionTransformCountBufferOffset(NS::UInteger motionTransformCountBufferOffset);\n\n    MTL::MatrixLayout                                             instanceTransformationMatrixLayout() const;\n    void                                                          setInstanceTransformationMatrixLayout(MTL::MatrixLayout instanceTransformationMatrixLayout);\n\n    MTL::TransformType                                            motionTransformType() const;\n    void                                                          setMotionTransformType(MTL::TransformType motionTransformType);\n\n    NS::UInteger                                                  motionTransformStride() const;\n    void                                                          setMotionTransformStride(NS::UInteger motionTransformStride);\n\n    static MTL::IndirectInstanceAccelerationStructureDescriptor*  descriptor();\n};\n\nclass AccelerationStructure : public NS::Referencing<AccelerationStructure, Resource>\n{\npublic:\n    NS::UInteger    size() const;\n\n    MTL::ResourceID gpuResourceID() const;\n};\n\n}\n\n_MTL_INLINE MTL::AccelerationStructureDescriptor* MTL::AccelerationStructureDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::AccelerationStructureDescriptor>(_MTL_PRIVATE_CLS(MTLAccelerationStructureDescriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructureDescriptor* MTL::AccelerationStructureDescriptor::init()\n{\n    return NS::Object::init<MTL::AccelerationStructureDescriptor>();\n}\n\n_MTL_INLINE MTL::AccelerationStructureUsage MTL::AccelerationStructureDescriptor::usage() const\n{\n    return Object::sendMessage<MTL::AccelerationStructureUsage>(this, _MTL_PRIVATE_SEL(usage));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureDescriptor::setUsage(MTL::AccelerationStructureUsage usage)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setUsage_), usage);\n}\n\n_MTL_INLINE MTL::AccelerationStructureGeometryDescriptor* MTL::AccelerationStructureGeometryDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::AccelerationStructureGeometryDescriptor>(_MTL_PRIVATE_CLS(MTLAccelerationStructureGeometryDescriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructureGeometryDescriptor* MTL::AccelerationStructureGeometryDescriptor::init()\n{\n    return NS::Object::init<MTL::AccelerationStructureGeometryDescriptor>();\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureGeometryDescriptor::intersectionFunctionTableOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(intersectionFunctionTableOffset));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setIntersectionFunctionTableOffset(NS::UInteger intersectionFunctionTableOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIntersectionFunctionTableOffset_), intersectionFunctionTableOffset);\n}\n\n_MTL_INLINE bool MTL::AccelerationStructureGeometryDescriptor::opaque() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(opaque));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setOpaque(bool opaque)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setOpaque_), opaque);\n}\n\n_MTL_INLINE bool MTL::AccelerationStructureGeometryDescriptor::allowDuplicateIntersectionFunctionInvocation() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(allowDuplicateIntersectionFunctionInvocation));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setAllowDuplicateIntersectionFunctionInvocation(bool allowDuplicateIntersectionFunctionInvocation)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setAllowDuplicateIntersectionFunctionInvocation_), allowDuplicateIntersectionFunctionInvocation);\n}\n\n_MTL_INLINE NS::String* MTL::AccelerationStructureGeometryDescriptor::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::AccelerationStructureGeometryDescriptor::primitiveDataBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(primitiveDataBuffer));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setPrimitiveDataBuffer(const MTL::Buffer* primitiveDataBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPrimitiveDataBuffer_), primitiveDataBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureGeometryDescriptor::primitiveDataBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(primitiveDataBufferOffset));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setPrimitiveDataBufferOffset(NS::UInteger primitiveDataBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPrimitiveDataBufferOffset_), primitiveDataBufferOffset);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureGeometryDescriptor::primitiveDataStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(primitiveDataStride));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setPrimitiveDataStride(NS::UInteger primitiveDataStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPrimitiveDataStride_), primitiveDataStride);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureGeometryDescriptor::primitiveDataElementSize() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(primitiveDataElementSize));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setPrimitiveDataElementSize(NS::UInteger primitiveDataElementSize)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPrimitiveDataElementSize_), primitiveDataElementSize);\n}\n\n_MTL_INLINE MTL::PrimitiveAccelerationStructureDescriptor* MTL::PrimitiveAccelerationStructureDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::PrimitiveAccelerationStructureDescriptor>(_MTL_PRIVATE_CLS(MTLPrimitiveAccelerationStructureDescriptor));\n}\n\n_MTL_INLINE MTL::PrimitiveAccelerationStructureDescriptor* MTL::PrimitiveAccelerationStructureDescriptor::init()\n{\n    return NS::Object::init<MTL::PrimitiveAccelerationStructureDescriptor>();\n}\n\n_MTL_INLINE NS::Array* MTL::PrimitiveAccelerationStructureDescriptor::geometryDescriptors() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(geometryDescriptors));\n}\n\n_MTL_INLINE void MTL::PrimitiveAccelerationStructureDescriptor::setGeometryDescriptors(const NS::Array* geometryDescriptors)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setGeometryDescriptors_), geometryDescriptors);\n}\n\n_MTL_INLINE MTL::MotionBorderMode MTL::PrimitiveAccelerationStructureDescriptor::motionStartBorderMode() const\n{\n    return Object::sendMessage<MTL::MotionBorderMode>(this, _MTL_PRIVATE_SEL(motionStartBorderMode));\n}\n\n_MTL_INLINE void MTL::PrimitiveAccelerationStructureDescriptor::setMotionStartBorderMode(MTL::MotionBorderMode motionStartBorderMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionStartBorderMode_), motionStartBorderMode);\n}\n\n_MTL_INLINE MTL::MotionBorderMode MTL::PrimitiveAccelerationStructureDescriptor::motionEndBorderMode() const\n{\n    return Object::sendMessage<MTL::MotionBorderMode>(this, _MTL_PRIVATE_SEL(motionEndBorderMode));\n}\n\n_MTL_INLINE void MTL::PrimitiveAccelerationStructureDescriptor::setMotionEndBorderMode(MTL::MotionBorderMode motionEndBorderMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionEndBorderMode_), motionEndBorderMode);\n}\n\n_MTL_INLINE float MTL::PrimitiveAccelerationStructureDescriptor::motionStartTime() const\n{\n    return Object::sendMessage<float>(this, _MTL_PRIVATE_SEL(motionStartTime));\n}\n\n_MTL_INLINE void MTL::PrimitiveAccelerationStructureDescriptor::setMotionStartTime(float motionStartTime)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionStartTime_), motionStartTime);\n}\n\n_MTL_INLINE float MTL::PrimitiveAccelerationStructureDescriptor::motionEndTime() const\n{\n    return Object::sendMessage<float>(this, _MTL_PRIVATE_SEL(motionEndTime));\n}\n\n_MTL_INLINE void MTL::PrimitiveAccelerationStructureDescriptor::setMotionEndTime(float motionEndTime)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionEndTime_), motionEndTime);\n}\n\n_MTL_INLINE NS::UInteger MTL::PrimitiveAccelerationStructureDescriptor::motionKeyframeCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(motionKeyframeCount));\n}\n\n_MTL_INLINE void MTL::PrimitiveAccelerationStructureDescriptor::setMotionKeyframeCount(NS::UInteger motionKeyframeCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionKeyframeCount_), motionKeyframeCount);\n}\n\n_MTL_INLINE MTL::PrimitiveAccelerationStructureDescriptor* MTL::PrimitiveAccelerationStructureDescriptor::descriptor()\n{\n    return Object::sendMessage<MTL::PrimitiveAccelerationStructureDescriptor*>(_MTL_PRIVATE_CLS(MTLPrimitiveAccelerationStructureDescriptor), _MTL_PRIVATE_SEL(descriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructureTriangleGeometryDescriptor* MTL::AccelerationStructureTriangleGeometryDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::AccelerationStructureTriangleGeometryDescriptor>(_MTL_PRIVATE_CLS(MTLAccelerationStructureTriangleGeometryDescriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructureTriangleGeometryDescriptor* MTL::AccelerationStructureTriangleGeometryDescriptor::init()\n{\n    return NS::Object::init<MTL::AccelerationStructureTriangleGeometryDescriptor>();\n}\n\n_MTL_INLINE MTL::Buffer* MTL::AccelerationStructureTriangleGeometryDescriptor::vertexBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(vertexBuffer));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setVertexBuffer(const MTL::Buffer* vertexBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexBuffer_), vertexBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureTriangleGeometryDescriptor::vertexBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(vertexBufferOffset));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setVertexBufferOffset(NS::UInteger vertexBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexBufferOffset_), vertexBufferOffset);\n}\n\n_MTL_INLINE MTL::AttributeFormat MTL::AccelerationStructureTriangleGeometryDescriptor::vertexFormat() const\n{\n    return Object::sendMessage<MTL::AttributeFormat>(this, _MTL_PRIVATE_SEL(vertexFormat));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setVertexFormat(MTL::AttributeFormat vertexFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexFormat_), vertexFormat);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureTriangleGeometryDescriptor::vertexStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(vertexStride));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setVertexStride(NS::UInteger vertexStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexStride_), vertexStride);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::AccelerationStructureTriangleGeometryDescriptor::indexBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(indexBuffer));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setIndexBuffer(const MTL::Buffer* indexBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexBuffer_), indexBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureTriangleGeometryDescriptor::indexBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(indexBufferOffset));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setIndexBufferOffset(NS::UInteger indexBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexBufferOffset_), indexBufferOffset);\n}\n\n_MTL_INLINE MTL::IndexType MTL::AccelerationStructureTriangleGeometryDescriptor::indexType() const\n{\n    return Object::sendMessage<MTL::IndexType>(this, _MTL_PRIVATE_SEL(indexType));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setIndexType(MTL::IndexType indexType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexType_), indexType);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureTriangleGeometryDescriptor::triangleCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(triangleCount));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setTriangleCount(NS::UInteger triangleCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTriangleCount_), triangleCount);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::AccelerationStructureTriangleGeometryDescriptor::transformationMatrixBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(transformationMatrixBuffer));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setTransformationMatrixBuffer(const MTL::Buffer* transformationMatrixBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTransformationMatrixBuffer_), transformationMatrixBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureTriangleGeometryDescriptor::transformationMatrixBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(transformationMatrixBufferOffset));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setTransformationMatrixBufferOffset(NS::UInteger transformationMatrixBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTransformationMatrixBufferOffset_), transformationMatrixBufferOffset);\n}\n\n_MTL_INLINE MTL::MatrixLayout MTL::AccelerationStructureTriangleGeometryDescriptor::transformationMatrixLayout() const\n{\n    return Object::sendMessage<MTL::MatrixLayout>(this, _MTL_PRIVATE_SEL(transformationMatrixLayout));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setTransformationMatrixLayout(MTL::MatrixLayout transformationMatrixLayout)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTransformationMatrixLayout_), transformationMatrixLayout);\n}\n\n_MTL_INLINE MTL::AccelerationStructureTriangleGeometryDescriptor* MTL::AccelerationStructureTriangleGeometryDescriptor::descriptor()\n{\n    return Object::sendMessage<MTL::AccelerationStructureTriangleGeometryDescriptor*>(_MTL_PRIVATE_CLS(MTLAccelerationStructureTriangleGeometryDescriptor), _MTL_PRIVATE_SEL(descriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructureBoundingBoxGeometryDescriptor* MTL::AccelerationStructureBoundingBoxGeometryDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::AccelerationStructureBoundingBoxGeometryDescriptor>(_MTL_PRIVATE_CLS(MTLAccelerationStructureBoundingBoxGeometryDescriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructureBoundingBoxGeometryDescriptor* MTL::AccelerationStructureBoundingBoxGeometryDescriptor::init()\n{\n    return NS::Object::init<MTL::AccelerationStructureBoundingBoxGeometryDescriptor>();\n}\n\n_MTL_INLINE MTL::Buffer* MTL::AccelerationStructureBoundingBoxGeometryDescriptor::boundingBoxBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(boundingBoxBuffer));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureBoundingBoxGeometryDescriptor::setBoundingBoxBuffer(const MTL::Buffer* boundingBoxBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBoundingBoxBuffer_), boundingBoxBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureBoundingBoxGeometryDescriptor::boundingBoxBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(boundingBoxBufferOffset));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureBoundingBoxGeometryDescriptor::setBoundingBoxBufferOffset(NS::UInteger boundingBoxBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBoundingBoxBufferOffset_), boundingBoxBufferOffset);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureBoundingBoxGeometryDescriptor::boundingBoxStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(boundingBoxStride));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureBoundingBoxGeometryDescriptor::setBoundingBoxStride(NS::UInteger boundingBoxStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBoundingBoxStride_), boundingBoxStride);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureBoundingBoxGeometryDescriptor::boundingBoxCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(boundingBoxCount));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureBoundingBoxGeometryDescriptor::setBoundingBoxCount(NS::UInteger boundingBoxCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBoundingBoxCount_), boundingBoxCount);\n}\n\n_MTL_INLINE MTL::AccelerationStructureBoundingBoxGeometryDescriptor* MTL::AccelerationStructureBoundingBoxGeometryDescriptor::descriptor()\n{\n    return Object::sendMessage<MTL::AccelerationStructureBoundingBoxGeometryDescriptor*>(_MTL_PRIVATE_CLS(MTLAccelerationStructureBoundingBoxGeometryDescriptor), _MTL_PRIVATE_SEL(descriptor));\n}\n\n_MTL_INLINE MTL::MotionKeyframeData* MTL::MotionKeyframeData::alloc()\n{\n    return NS::Object::alloc<MTL::MotionKeyframeData>(_MTL_PRIVATE_CLS(MTLMotionKeyframeData));\n}\n\n_MTL_INLINE MTL::MotionKeyframeData* MTL::MotionKeyframeData::init()\n{\n    return NS::Object::init<MTL::MotionKeyframeData>();\n}\n\n_MTL_INLINE MTL::Buffer* MTL::MotionKeyframeData::buffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(buffer));\n}\n\n_MTL_INLINE void MTL::MotionKeyframeData::setBuffer(const MTL::Buffer* buffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBuffer_), buffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::MotionKeyframeData::offset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(offset));\n}\n\n_MTL_INLINE void MTL::MotionKeyframeData::setOffset(NS::UInteger offset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setOffset_), offset);\n}\n\n_MTL_INLINE MTL::MotionKeyframeData* MTL::MotionKeyframeData::data()\n{\n    return Object::sendMessage<MTL::MotionKeyframeData*>(_MTL_PRIVATE_CLS(MTLMotionKeyframeData), _MTL_PRIVATE_SEL(data));\n}\n\n_MTL_INLINE MTL::AccelerationStructureMotionTriangleGeometryDescriptor* MTL::AccelerationStructureMotionTriangleGeometryDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::AccelerationStructureMotionTriangleGeometryDescriptor>(_MTL_PRIVATE_CLS(MTLAccelerationStructureMotionTriangleGeometryDescriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructureMotionTriangleGeometryDescriptor* MTL::AccelerationStructureMotionTriangleGeometryDescriptor::init()\n{\n    return NS::Object::init<MTL::AccelerationStructureMotionTriangleGeometryDescriptor>();\n}\n\n_MTL_INLINE NS::Array* MTL::AccelerationStructureMotionTriangleGeometryDescriptor::vertexBuffers() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(vertexBuffers));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setVertexBuffers(const NS::Array* vertexBuffers)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexBuffers_), vertexBuffers);\n}\n\n_MTL_INLINE MTL::AttributeFormat MTL::AccelerationStructureMotionTriangleGeometryDescriptor::vertexFormat() const\n{\n    return Object::sendMessage<MTL::AttributeFormat>(this, _MTL_PRIVATE_SEL(vertexFormat));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setVertexFormat(MTL::AttributeFormat vertexFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexFormat_), vertexFormat);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionTriangleGeometryDescriptor::vertexStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(vertexStride));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setVertexStride(NS::UInteger vertexStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexStride_), vertexStride);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::AccelerationStructureMotionTriangleGeometryDescriptor::indexBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(indexBuffer));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setIndexBuffer(const MTL::Buffer* indexBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexBuffer_), indexBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionTriangleGeometryDescriptor::indexBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(indexBufferOffset));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setIndexBufferOffset(NS::UInteger indexBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexBufferOffset_), indexBufferOffset);\n}\n\n_MTL_INLINE MTL::IndexType MTL::AccelerationStructureMotionTriangleGeometryDescriptor::indexType() const\n{\n    return Object::sendMessage<MTL::IndexType>(this, _MTL_PRIVATE_SEL(indexType));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setIndexType(MTL::IndexType indexType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexType_), indexType);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionTriangleGeometryDescriptor::triangleCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(triangleCount));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setTriangleCount(NS::UInteger triangleCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTriangleCount_), triangleCount);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::AccelerationStructureMotionTriangleGeometryDescriptor::transformationMatrixBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(transformationMatrixBuffer));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setTransformationMatrixBuffer(const MTL::Buffer* transformationMatrixBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTransformationMatrixBuffer_), transformationMatrixBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionTriangleGeometryDescriptor::transformationMatrixBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(transformationMatrixBufferOffset));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setTransformationMatrixBufferOffset(NS::UInteger transformationMatrixBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTransformationMatrixBufferOffset_), transformationMatrixBufferOffset);\n}\n\n_MTL_INLINE MTL::MatrixLayout MTL::AccelerationStructureMotionTriangleGeometryDescriptor::transformationMatrixLayout() const\n{\n    return Object::sendMessage<MTL::MatrixLayout>(this, _MTL_PRIVATE_SEL(transformationMatrixLayout));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setTransformationMatrixLayout(MTL::MatrixLayout transformationMatrixLayout)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTransformationMatrixLayout_), transformationMatrixLayout);\n}\n\n_MTL_INLINE MTL::AccelerationStructureMotionTriangleGeometryDescriptor* MTL::AccelerationStructureMotionTriangleGeometryDescriptor::descriptor()\n{\n    return Object::sendMessage<MTL::AccelerationStructureMotionTriangleGeometryDescriptor*>(_MTL_PRIVATE_CLS(MTLAccelerationStructureMotionTriangleGeometryDescriptor), _MTL_PRIVATE_SEL(descriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor* MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor>(_MTL_PRIVATE_CLS(MTLAccelerationStructureMotionBoundingBoxGeometryDescriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor* MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::init()\n{\n    return NS::Object::init<MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor>();\n}\n\n_MTL_INLINE NS::Array* MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::boundingBoxBuffers() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(boundingBoxBuffers));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::setBoundingBoxBuffers(const NS::Array* boundingBoxBuffers)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBoundingBoxBuffers_), boundingBoxBuffers);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::boundingBoxStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(boundingBoxStride));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::setBoundingBoxStride(NS::UInteger boundingBoxStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBoundingBoxStride_), boundingBoxStride);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::boundingBoxCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(boundingBoxCount));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::setBoundingBoxCount(NS::UInteger boundingBoxCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBoundingBoxCount_), boundingBoxCount);\n}\n\n_MTL_INLINE MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor* MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::descriptor()\n{\n    return Object::sendMessage<MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor*>(_MTL_PRIVATE_CLS(MTLAccelerationStructureMotionBoundingBoxGeometryDescriptor), _MTL_PRIVATE_SEL(descriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructureCurveGeometryDescriptor* MTL::AccelerationStructureCurveGeometryDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::AccelerationStructureCurveGeometryDescriptor>(_MTL_PRIVATE_CLS(MTLAccelerationStructureCurveGeometryDescriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructureCurveGeometryDescriptor* MTL::AccelerationStructureCurveGeometryDescriptor::init()\n{\n    return NS::Object::init<MTL::AccelerationStructureCurveGeometryDescriptor>();\n}\n\n_MTL_INLINE MTL::Buffer* MTL::AccelerationStructureCurveGeometryDescriptor::controlPointBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(controlPointBuffer));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setControlPointBuffer(const MTL::Buffer* controlPointBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setControlPointBuffer_), controlPointBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::controlPointBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(controlPointBufferOffset));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setControlPointBufferOffset(NS::UInteger controlPointBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setControlPointBufferOffset_), controlPointBufferOffset);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::controlPointCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(controlPointCount));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setControlPointCount(NS::UInteger controlPointCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setControlPointCount_), controlPointCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::controlPointStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(controlPointStride));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setControlPointStride(NS::UInteger controlPointStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setControlPointStride_), controlPointStride);\n}\n\n_MTL_INLINE MTL::AttributeFormat MTL::AccelerationStructureCurveGeometryDescriptor::controlPointFormat() const\n{\n    return Object::sendMessage<MTL::AttributeFormat>(this, _MTL_PRIVATE_SEL(controlPointFormat));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setControlPointFormat(MTL::AttributeFormat controlPointFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setControlPointFormat_), controlPointFormat);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::AccelerationStructureCurveGeometryDescriptor::radiusBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(radiusBuffer));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setRadiusBuffer(const MTL::Buffer* radiusBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRadiusBuffer_), radiusBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::radiusBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(radiusBufferOffset));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setRadiusBufferOffset(NS::UInteger radiusBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRadiusBufferOffset_), radiusBufferOffset);\n}\n\n_MTL_INLINE MTL::AttributeFormat MTL::AccelerationStructureCurveGeometryDescriptor::radiusFormat() const\n{\n    return Object::sendMessage<MTL::AttributeFormat>(this, _MTL_PRIVATE_SEL(radiusFormat));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setRadiusFormat(MTL::AttributeFormat radiusFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRadiusFormat_), radiusFormat);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::radiusStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(radiusStride));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setRadiusStride(NS::UInteger radiusStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRadiusStride_), radiusStride);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::AccelerationStructureCurveGeometryDescriptor::indexBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(indexBuffer));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setIndexBuffer(const MTL::Buffer* indexBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexBuffer_), indexBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::indexBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(indexBufferOffset));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setIndexBufferOffset(NS::UInteger indexBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexBufferOffset_), indexBufferOffset);\n}\n\n_MTL_INLINE MTL::IndexType MTL::AccelerationStructureCurveGeometryDescriptor::indexType() const\n{\n    return Object::sendMessage<MTL::IndexType>(this, _MTL_PRIVATE_SEL(indexType));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setIndexType(MTL::IndexType indexType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexType_), indexType);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::segmentCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(segmentCount));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setSegmentCount(NS::UInteger segmentCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSegmentCount_), segmentCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::segmentControlPointCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(segmentControlPointCount));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setSegmentControlPointCount(NS::UInteger segmentControlPointCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSegmentControlPointCount_), segmentControlPointCount);\n}\n\n_MTL_INLINE MTL::CurveType MTL::AccelerationStructureCurveGeometryDescriptor::curveType() const\n{\n    return Object::sendMessage<MTL::CurveType>(this, _MTL_PRIVATE_SEL(curveType));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setCurveType(MTL::CurveType curveType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCurveType_), curveType);\n}\n\n_MTL_INLINE MTL::CurveBasis MTL::AccelerationStructureCurveGeometryDescriptor::curveBasis() const\n{\n    return Object::sendMessage<MTL::CurveBasis>(this, _MTL_PRIVATE_SEL(curveBasis));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setCurveBasis(MTL::CurveBasis curveBasis)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCurveBasis_), curveBasis);\n}\n\n_MTL_INLINE MTL::CurveEndCaps MTL::AccelerationStructureCurveGeometryDescriptor::curveEndCaps() const\n{\n    return Object::sendMessage<MTL::CurveEndCaps>(this, _MTL_PRIVATE_SEL(curveEndCaps));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setCurveEndCaps(MTL::CurveEndCaps curveEndCaps)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCurveEndCaps_), curveEndCaps);\n}\n\n_MTL_INLINE MTL::AccelerationStructureCurveGeometryDescriptor* MTL::AccelerationStructureCurveGeometryDescriptor::descriptor()\n{\n    return Object::sendMessage<MTL::AccelerationStructureCurveGeometryDescriptor*>(_MTL_PRIVATE_CLS(MTLAccelerationStructureCurveGeometryDescriptor), _MTL_PRIVATE_SEL(descriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructureMotionCurveGeometryDescriptor* MTL::AccelerationStructureMotionCurveGeometryDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::AccelerationStructureMotionCurveGeometryDescriptor>(_MTL_PRIVATE_CLS(MTLAccelerationStructureMotionCurveGeometryDescriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructureMotionCurveGeometryDescriptor* MTL::AccelerationStructureMotionCurveGeometryDescriptor::init()\n{\n    return NS::Object::init<MTL::AccelerationStructureMotionCurveGeometryDescriptor>();\n}\n\n_MTL_INLINE NS::Array* MTL::AccelerationStructureMotionCurveGeometryDescriptor::controlPointBuffers() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(controlPointBuffers));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setControlPointBuffers(const NS::Array* controlPointBuffers)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setControlPointBuffers_), controlPointBuffers);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionCurveGeometryDescriptor::controlPointCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(controlPointCount));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setControlPointCount(NS::UInteger controlPointCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setControlPointCount_), controlPointCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionCurveGeometryDescriptor::controlPointStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(controlPointStride));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setControlPointStride(NS::UInteger controlPointStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setControlPointStride_), controlPointStride);\n}\n\n_MTL_INLINE MTL::AttributeFormat MTL::AccelerationStructureMotionCurveGeometryDescriptor::controlPointFormat() const\n{\n    return Object::sendMessage<MTL::AttributeFormat>(this, _MTL_PRIVATE_SEL(controlPointFormat));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setControlPointFormat(MTL::AttributeFormat controlPointFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setControlPointFormat_), controlPointFormat);\n}\n\n_MTL_INLINE NS::Array* MTL::AccelerationStructureMotionCurveGeometryDescriptor::radiusBuffers() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(radiusBuffers));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setRadiusBuffers(const NS::Array* radiusBuffers)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRadiusBuffers_), radiusBuffers);\n}\n\n_MTL_INLINE MTL::AttributeFormat MTL::AccelerationStructureMotionCurveGeometryDescriptor::radiusFormat() const\n{\n    return Object::sendMessage<MTL::AttributeFormat>(this, _MTL_PRIVATE_SEL(radiusFormat));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setRadiusFormat(MTL::AttributeFormat radiusFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRadiusFormat_), radiusFormat);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionCurveGeometryDescriptor::radiusStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(radiusStride));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setRadiusStride(NS::UInteger radiusStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRadiusStride_), radiusStride);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::AccelerationStructureMotionCurveGeometryDescriptor::indexBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(indexBuffer));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setIndexBuffer(const MTL::Buffer* indexBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexBuffer_), indexBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionCurveGeometryDescriptor::indexBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(indexBufferOffset));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setIndexBufferOffset(NS::UInteger indexBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexBufferOffset_), indexBufferOffset);\n}\n\n_MTL_INLINE MTL::IndexType MTL::AccelerationStructureMotionCurveGeometryDescriptor::indexType() const\n{\n    return Object::sendMessage<MTL::IndexType>(this, _MTL_PRIVATE_SEL(indexType));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setIndexType(MTL::IndexType indexType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndexType_), indexType);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionCurveGeometryDescriptor::segmentCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(segmentCount));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setSegmentCount(NS::UInteger segmentCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSegmentCount_), segmentCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionCurveGeometryDescriptor::segmentControlPointCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(segmentControlPointCount));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setSegmentControlPointCount(NS::UInteger segmentControlPointCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSegmentControlPointCount_), segmentControlPointCount);\n}\n\n_MTL_INLINE MTL::CurveType MTL::AccelerationStructureMotionCurveGeometryDescriptor::curveType() const\n{\n    return Object::sendMessage<MTL::CurveType>(this, _MTL_PRIVATE_SEL(curveType));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setCurveType(MTL::CurveType curveType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCurveType_), curveType);\n}\n\n_MTL_INLINE MTL::CurveBasis MTL::AccelerationStructureMotionCurveGeometryDescriptor::curveBasis() const\n{\n    return Object::sendMessage<MTL::CurveBasis>(this, _MTL_PRIVATE_SEL(curveBasis));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setCurveBasis(MTL::CurveBasis curveBasis)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCurveBasis_), curveBasis);\n}\n\n_MTL_INLINE MTL::CurveEndCaps MTL::AccelerationStructureMotionCurveGeometryDescriptor::curveEndCaps() const\n{\n    return Object::sendMessage<MTL::CurveEndCaps>(this, _MTL_PRIVATE_SEL(curveEndCaps));\n}\n\n_MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setCurveEndCaps(MTL::CurveEndCaps curveEndCaps)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCurveEndCaps_), curveEndCaps);\n}\n\n_MTL_INLINE MTL::AccelerationStructureMotionCurveGeometryDescriptor* MTL::AccelerationStructureMotionCurveGeometryDescriptor::descriptor()\n{\n    return Object::sendMessage<MTL::AccelerationStructureMotionCurveGeometryDescriptor*>(_MTL_PRIVATE_CLS(MTLAccelerationStructureMotionCurveGeometryDescriptor), _MTL_PRIVATE_SEL(descriptor));\n}\n\n_MTL_INLINE MTL::InstanceAccelerationStructureDescriptor* MTL::InstanceAccelerationStructureDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::InstanceAccelerationStructureDescriptor>(_MTL_PRIVATE_CLS(MTLInstanceAccelerationStructureDescriptor));\n}\n\n_MTL_INLINE MTL::InstanceAccelerationStructureDescriptor* MTL::InstanceAccelerationStructureDescriptor::init()\n{\n    return NS::Object::init<MTL::InstanceAccelerationStructureDescriptor>();\n}\n\n_MTL_INLINE MTL::Buffer* MTL::InstanceAccelerationStructureDescriptor::instanceDescriptorBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(instanceDescriptorBuffer));\n}\n\n_MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstanceDescriptorBuffer(const MTL::Buffer* instanceDescriptorBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstanceDescriptorBuffer_), instanceDescriptorBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::InstanceAccelerationStructureDescriptor::instanceDescriptorBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(instanceDescriptorBufferOffset));\n}\n\n_MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstanceDescriptorBufferOffset(NS::UInteger instanceDescriptorBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstanceDescriptorBufferOffset_), instanceDescriptorBufferOffset);\n}\n\n_MTL_INLINE NS::UInteger MTL::InstanceAccelerationStructureDescriptor::instanceDescriptorStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(instanceDescriptorStride));\n}\n\n_MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstanceDescriptorStride(NS::UInteger instanceDescriptorStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstanceDescriptorStride_), instanceDescriptorStride);\n}\n\n_MTL_INLINE NS::UInteger MTL::InstanceAccelerationStructureDescriptor::instanceCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(instanceCount));\n}\n\n_MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstanceCount(NS::UInteger instanceCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstanceCount_), instanceCount);\n}\n\n_MTL_INLINE NS::Array* MTL::InstanceAccelerationStructureDescriptor::instancedAccelerationStructures() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(instancedAccelerationStructures));\n}\n\n_MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstancedAccelerationStructures(const NS::Array* instancedAccelerationStructures)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstancedAccelerationStructures_), instancedAccelerationStructures);\n}\n\n_MTL_INLINE MTL::AccelerationStructureInstanceDescriptorType MTL::InstanceAccelerationStructureDescriptor::instanceDescriptorType() const\n{\n    return Object::sendMessage<MTL::AccelerationStructureInstanceDescriptorType>(this, _MTL_PRIVATE_SEL(instanceDescriptorType));\n}\n\n_MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstanceDescriptorType(MTL::AccelerationStructureInstanceDescriptorType instanceDescriptorType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstanceDescriptorType_), instanceDescriptorType);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::InstanceAccelerationStructureDescriptor::motionTransformBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(motionTransformBuffer));\n}\n\n_MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setMotionTransformBuffer(const MTL::Buffer* motionTransformBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionTransformBuffer_), motionTransformBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::InstanceAccelerationStructureDescriptor::motionTransformBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(motionTransformBufferOffset));\n}\n\n_MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setMotionTransformBufferOffset(NS::UInteger motionTransformBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionTransformBufferOffset_), motionTransformBufferOffset);\n}\n\n_MTL_INLINE NS::UInteger MTL::InstanceAccelerationStructureDescriptor::motionTransformCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(motionTransformCount));\n}\n\n_MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setMotionTransformCount(NS::UInteger motionTransformCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionTransformCount_), motionTransformCount);\n}\n\n_MTL_INLINE MTL::MatrixLayout MTL::InstanceAccelerationStructureDescriptor::instanceTransformationMatrixLayout() const\n{\n    return Object::sendMessage<MTL::MatrixLayout>(this, _MTL_PRIVATE_SEL(instanceTransformationMatrixLayout));\n}\n\n_MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstanceTransformationMatrixLayout(MTL::MatrixLayout instanceTransformationMatrixLayout)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstanceTransformationMatrixLayout_), instanceTransformationMatrixLayout);\n}\n\n_MTL_INLINE MTL::TransformType MTL::InstanceAccelerationStructureDescriptor::motionTransformType() const\n{\n    return Object::sendMessage<MTL::TransformType>(this, _MTL_PRIVATE_SEL(motionTransformType));\n}\n\n_MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setMotionTransformType(MTL::TransformType motionTransformType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionTransformType_), motionTransformType);\n}\n\n_MTL_INLINE NS::UInteger MTL::InstanceAccelerationStructureDescriptor::motionTransformStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(motionTransformStride));\n}\n\n_MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setMotionTransformStride(NS::UInteger motionTransformStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionTransformStride_), motionTransformStride);\n}\n\n_MTL_INLINE MTL::InstanceAccelerationStructureDescriptor* MTL::InstanceAccelerationStructureDescriptor::descriptor()\n{\n    return Object::sendMessage<MTL::InstanceAccelerationStructureDescriptor*>(_MTL_PRIVATE_CLS(MTLInstanceAccelerationStructureDescriptor), _MTL_PRIVATE_SEL(descriptor));\n}\n\n_MTL_INLINE MTL::IndirectInstanceAccelerationStructureDescriptor* MTL::IndirectInstanceAccelerationStructureDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::IndirectInstanceAccelerationStructureDescriptor>(_MTL_PRIVATE_CLS(MTLIndirectInstanceAccelerationStructureDescriptor));\n}\n\n_MTL_INLINE MTL::IndirectInstanceAccelerationStructureDescriptor* MTL::IndirectInstanceAccelerationStructureDescriptor::init()\n{\n    return NS::Object::init<MTL::IndirectInstanceAccelerationStructureDescriptor>();\n}\n\n_MTL_INLINE MTL::Buffer* MTL::IndirectInstanceAccelerationStructureDescriptor::instanceDescriptorBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(instanceDescriptorBuffer));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceDescriptorBuffer(const MTL::Buffer* instanceDescriptorBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstanceDescriptorBuffer_), instanceDescriptorBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::instanceDescriptorBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(instanceDescriptorBufferOffset));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceDescriptorBufferOffset(NS::UInteger instanceDescriptorBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstanceDescriptorBufferOffset_), instanceDescriptorBufferOffset);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::instanceDescriptorStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(instanceDescriptorStride));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceDescriptorStride(NS::UInteger instanceDescriptorStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstanceDescriptorStride_), instanceDescriptorStride);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::maxInstanceCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxInstanceCount));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMaxInstanceCount(NS::UInteger maxInstanceCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxInstanceCount_), maxInstanceCount);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::IndirectInstanceAccelerationStructureDescriptor::instanceCountBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(instanceCountBuffer));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceCountBuffer(const MTL::Buffer* instanceCountBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstanceCountBuffer_), instanceCountBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::instanceCountBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(instanceCountBufferOffset));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceCountBufferOffset(NS::UInteger instanceCountBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstanceCountBufferOffset_), instanceCountBufferOffset);\n}\n\n_MTL_INLINE MTL::AccelerationStructureInstanceDescriptorType MTL::IndirectInstanceAccelerationStructureDescriptor::instanceDescriptorType() const\n{\n    return Object::sendMessage<MTL::AccelerationStructureInstanceDescriptorType>(this, _MTL_PRIVATE_SEL(instanceDescriptorType));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceDescriptorType(MTL::AccelerationStructureInstanceDescriptorType instanceDescriptorType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstanceDescriptorType_), instanceDescriptorType);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::IndirectInstanceAccelerationStructureDescriptor::motionTransformBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(motionTransformBuffer));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMotionTransformBuffer(const MTL::Buffer* motionTransformBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionTransformBuffer_), motionTransformBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::motionTransformBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(motionTransformBufferOffset));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMotionTransformBufferOffset(NS::UInteger motionTransformBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionTransformBufferOffset_), motionTransformBufferOffset);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::maxMotionTransformCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxMotionTransformCount));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMaxMotionTransformCount(NS::UInteger maxMotionTransformCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxMotionTransformCount_), maxMotionTransformCount);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::IndirectInstanceAccelerationStructureDescriptor::motionTransformCountBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(motionTransformCountBuffer));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMotionTransformCountBuffer(const MTL::Buffer* motionTransformCountBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionTransformCountBuffer_), motionTransformCountBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::motionTransformCountBufferOffset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(motionTransformCountBufferOffset));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMotionTransformCountBufferOffset(NS::UInteger motionTransformCountBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionTransformCountBufferOffset_), motionTransformCountBufferOffset);\n}\n\n_MTL_INLINE MTL::MatrixLayout MTL::IndirectInstanceAccelerationStructureDescriptor::instanceTransformationMatrixLayout() const\n{\n    return Object::sendMessage<MTL::MatrixLayout>(this, _MTL_PRIVATE_SEL(instanceTransformationMatrixLayout));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceTransformationMatrixLayout(MTL::MatrixLayout instanceTransformationMatrixLayout)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstanceTransformationMatrixLayout_), instanceTransformationMatrixLayout);\n}\n\n_MTL_INLINE MTL::TransformType MTL::IndirectInstanceAccelerationStructureDescriptor::motionTransformType() const\n{\n    return Object::sendMessage<MTL::TransformType>(this, _MTL_PRIVATE_SEL(motionTransformType));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMotionTransformType(MTL::TransformType motionTransformType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionTransformType_), motionTransformType);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::motionTransformStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(motionTransformStride));\n}\n\n_MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMotionTransformStride(NS::UInteger motionTransformStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMotionTransformStride_), motionTransformStride);\n}\n\n_MTL_INLINE MTL::IndirectInstanceAccelerationStructureDescriptor* MTL::IndirectInstanceAccelerationStructureDescriptor::descriptor()\n{\n    return Object::sendMessage<MTL::IndirectInstanceAccelerationStructureDescriptor*>(_MTL_PRIVATE_CLS(MTLIndirectInstanceAccelerationStructureDescriptor), _MTL_PRIVATE_SEL(descriptor));\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructure::size() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(size));\n}\n\n_MTL_INLINE MTL::ResourceID MTL::AccelerationStructure::gpuResourceID() const\n{\n    return Object::sendMessage<MTL::ResourceID>(this, _MTL_PRIVATE_SEL(gpuResourceID));\n}\n\n#pragma once\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, DataType) {\n    DataTypeNone = 0,\n    DataTypeStruct = 1,\n    DataTypeArray = 2,\n    DataTypeFloat = 3,\n    DataTypeFloat2 = 4,\n    DataTypeFloat3 = 5,\n    DataTypeFloat4 = 6,\n    DataTypeFloat2x2 = 7,\n    DataTypeFloat2x3 = 8,\n    DataTypeFloat2x4 = 9,\n    DataTypeFloat3x2 = 10,\n    DataTypeFloat3x3 = 11,\n    DataTypeFloat3x4 = 12,\n    DataTypeFloat4x2 = 13,\n    DataTypeFloat4x3 = 14,\n    DataTypeFloat4x4 = 15,\n    DataTypeHalf = 16,\n    DataTypeHalf2 = 17,\n    DataTypeHalf3 = 18,\n    DataTypeHalf4 = 19,\n    DataTypeHalf2x2 = 20,\n    DataTypeHalf2x3 = 21,\n    DataTypeHalf2x4 = 22,\n    DataTypeHalf3x2 = 23,\n    DataTypeHalf3x3 = 24,\n    DataTypeHalf3x4 = 25,\n    DataTypeHalf4x2 = 26,\n    DataTypeHalf4x3 = 27,\n    DataTypeHalf4x4 = 28,\n    DataTypeInt = 29,\n    DataTypeInt2 = 30,\n    DataTypeInt3 = 31,\n    DataTypeInt4 = 32,\n    DataTypeUInt = 33,\n    DataTypeUInt2 = 34,\n    DataTypeUInt3 = 35,\n    DataTypeUInt4 = 36,\n    DataTypeShort = 37,\n    DataTypeShort2 = 38,\n    DataTypeShort3 = 39,\n    DataTypeShort4 = 40,\n    DataTypeUShort = 41,\n    DataTypeUShort2 = 42,\n    DataTypeUShort3 = 43,\n    DataTypeUShort4 = 44,\n    DataTypeChar = 45,\n    DataTypeChar2 = 46,\n    DataTypeChar3 = 47,\n    DataTypeChar4 = 48,\n    DataTypeUChar = 49,\n    DataTypeUChar2 = 50,\n    DataTypeUChar3 = 51,\n    DataTypeUChar4 = 52,\n    DataTypeBool = 53,\n    DataTypeBool2 = 54,\n    DataTypeBool3 = 55,\n    DataTypeBool4 = 56,\n    DataTypeTexture = 58,\n    DataTypeSampler = 59,\n    DataTypePointer = 60,\n    DataTypeR8Unorm = 62,\n    DataTypeR8Snorm = 63,\n    DataTypeR16Unorm = 64,\n    DataTypeR16Snorm = 65,\n    DataTypeRG8Unorm = 66,\n    DataTypeRG8Snorm = 67,\n    DataTypeRG16Unorm = 68,\n    DataTypeRG16Snorm = 69,\n    DataTypeRGBA8Unorm = 70,\n    DataTypeRGBA8Unorm_sRGB = 71,\n    DataTypeRGBA8Snorm = 72,\n    DataTypeRGBA16Unorm = 73,\n    DataTypeRGBA16Snorm = 74,\n    DataTypeRGB10A2Unorm = 75,\n    DataTypeRG11B10Float = 76,\n    DataTypeRGB9E5Float = 77,\n    DataTypeRenderPipeline = 78,\n    DataTypeComputePipeline = 79,\n    DataTypeIndirectCommandBuffer = 80,\n    DataTypeLong = 81,\n    DataTypeLong2 = 82,\n    DataTypeLong3 = 83,\n    DataTypeLong4 = 84,\n    DataTypeULong = 85,\n    DataTypeULong2 = 86,\n    DataTypeULong3 = 87,\n    DataTypeULong4 = 88,\n    DataTypeVisibleFunctionTable = 115,\n    DataTypeIntersectionFunctionTable = 116,\n    DataTypePrimitiveAccelerationStructure = 117,\n    DataTypeInstanceAccelerationStructure = 118,\n    DataTypeBFloat = 121,\n    DataTypeBFloat2 = 122,\n    DataTypeBFloat3 = 123,\n    DataTypeBFloat4 = 124,\n};\n\n_MTL_ENUM(NS::Integer, BindingType) {\n    BindingTypeBuffer = 0,\n    BindingTypeThreadgroupMemory = 1,\n    BindingTypeTexture = 2,\n    BindingTypeSampler = 3,\n    BindingTypeImageblockData = 16,\n    BindingTypeImageblock = 17,\n    BindingTypeVisibleFunctionTable = 24,\n    BindingTypePrimitiveAccelerationStructure = 25,\n    BindingTypeInstanceAccelerationStructure = 26,\n    BindingTypeIntersectionFunctionTable = 27,\n    BindingTypeObjectPayload = 34,\n};\n\n_MTL_ENUM(NS::UInteger, ArgumentType) {\n    ArgumentTypeBuffer = 0,\n    ArgumentTypeThreadgroupMemory = 1,\n    ArgumentTypeTexture = 2,\n    ArgumentTypeSampler = 3,\n    ArgumentTypeImageblockData = 16,\n    ArgumentTypeImageblock = 17,\n    ArgumentTypeVisibleFunctionTable = 24,\n    ArgumentTypePrimitiveAccelerationStructure = 25,\n    ArgumentTypeInstanceAccelerationStructure = 26,\n    ArgumentTypeIntersectionFunctionTable = 27,\n};\n\n_MTL_ENUM(NS::UInteger, BindingAccess) {\n    BindingAccessReadOnly = 0,\n    BindingAccessReadWrite = 1,\n    BindingAccessWriteOnly = 2,\n    ArgumentAccessReadOnly = 0,\n    ArgumentAccessReadWrite = 1,\n    ArgumentAccessWriteOnly = 2,\n};\n\nclass Type : public NS::Referencing<Type>\n{\npublic:\n    static class Type* alloc();\n\n    class Type*        init();\n\n    MTL::DataType      dataType() const;\n};\n\nclass StructMember : public NS::Referencing<StructMember>\n{\npublic:\n    static class StructMember*  alloc();\n\n    class StructMember*         init();\n\n    NS::String*                 name() const;\n\n    NS::UInteger                offset() const;\n\n    MTL::DataType               dataType() const;\n\n    class StructType*           structType();\n\n    class ArrayType*            arrayType();\n\n    class TextureReferenceType* textureReferenceType();\n\n    class PointerType*          pointerType();\n\n    NS::UInteger                argumentIndex() const;\n};\n\nclass StructType : public NS::Referencing<StructType, Type>\n{\npublic:\n    static class StructType* alloc();\n\n    class StructType*        init();\n\n    NS::Array*               members() const;\n\n    class StructMember*      memberByName(const NS::String* name);\n};\n\nclass ArrayType : public NS::Referencing<ArrayType, Type>\n{\npublic:\n    static class ArrayType*     alloc();\n\n    class ArrayType*            init();\n\n    MTL::DataType               elementType() const;\n\n    NS::UInteger                arrayLength() const;\n\n    NS::UInteger                stride() const;\n\n    NS::UInteger                argumentIndexStride() const;\n\n    class StructType*           elementStructType();\n\n    class ArrayType*            elementArrayType();\n\n    class TextureReferenceType* elementTextureReferenceType();\n\n    class PointerType*          elementPointerType();\n};\n\nclass PointerType : public NS::Referencing<PointerType, Type>\n{\npublic:\n    static class PointerType* alloc();\n\n    class PointerType*        init();\n\n    MTL::DataType             elementType() const;\n\n    MTL::BindingAccess        access() const;\n\n    NS::UInteger              alignment() const;\n\n    NS::UInteger              dataSize() const;\n\n    bool                      elementIsArgumentBuffer() const;\n\n    class StructType*         elementStructType();\n\n    class ArrayType*          elementArrayType();\n};\n\nclass TextureReferenceType : public NS::Referencing<TextureReferenceType, Type>\n{\npublic:\n    static class TextureReferenceType* alloc();\n\n    class TextureReferenceType*        init();\n\n    MTL::DataType                      textureDataType() const;\n\n    MTL::TextureType                   textureType() const;\n\n    MTL::BindingAccess                 access() const;\n\n    bool                               isDepthTexture() const;\n};\n\nclass Argument : public NS::Referencing<Argument>\n{\npublic:\n    static class Argument* alloc();\n\n    class Argument*        init();\n\n    NS::String*            name() const;\n\n    MTL::ArgumentType      type() const;\n\n    MTL::BindingAccess     access() const;\n\n    NS::UInteger           index() const;\n\n    bool                   active() const;\n\n    NS::UInteger           bufferAlignment() const;\n\n    NS::UInteger           bufferDataSize() const;\n\n    MTL::DataType          bufferDataType() const;\n\n    class StructType*      bufferStructType() const;\n\n    class PointerType*     bufferPointerType() const;\n\n    NS::UInteger           threadgroupMemoryAlignment() const;\n\n    NS::UInteger           threadgroupMemoryDataSize() const;\n\n    MTL::TextureType       textureType() const;\n\n    MTL::DataType          textureDataType() const;\n\n    bool                   isDepthTexture() const;\n\n    NS::UInteger           arrayLength() const;\n};\n\nclass Binding : public NS::Referencing<Binding>\n{\npublic:\n    NS::String*        name() const;\n\n    MTL::BindingType   type() const;\n\n    MTL::BindingAccess access() const;\n\n    NS::UInteger       index() const;\n\n    bool               used() const;\n\n    bool               argument() const;\n};\n\nclass BufferBinding : public NS::Referencing<BufferBinding, Binding>\n{\npublic:\n    NS::UInteger       bufferAlignment() const;\n\n    NS::UInteger       bufferDataSize() const;\n\n    MTL::DataType      bufferDataType() const;\n\n    class StructType*  bufferStructType() const;\n\n    class PointerType* bufferPointerType() const;\n};\n\nclass ThreadgroupBinding : public NS::Referencing<ThreadgroupBinding, Binding>\n{\npublic:\n    NS::UInteger threadgroupMemoryAlignment() const;\n\n    NS::UInteger threadgroupMemoryDataSize() const;\n};\n\nclass TextureBinding : public NS::Referencing<TextureBinding, Binding>\n{\npublic:\n    MTL::TextureType textureType() const;\n\n    MTL::DataType    textureDataType() const;\n\n    bool             depthTexture() const;\n\n    NS::UInteger     arrayLength() const;\n};\n\nclass ObjectPayloadBinding : public NS::Referencing<ObjectPayloadBinding, Binding>\n{\npublic:\n    NS::UInteger objectPayloadAlignment() const;\n\n    NS::UInteger objectPayloadDataSize() const;\n};\n\n}\n\n_MTL_INLINE MTL::Type* MTL::Type::alloc()\n{\n    return NS::Object::alloc<MTL::Type>(_MTL_PRIVATE_CLS(MTLType));\n}\n\n_MTL_INLINE MTL::Type* MTL::Type::init()\n{\n    return NS::Object::init<MTL::Type>();\n}\n\n_MTL_INLINE MTL::DataType MTL::Type::dataType() const\n{\n    return Object::sendMessage<MTL::DataType>(this, _MTL_PRIVATE_SEL(dataType));\n}\n\n_MTL_INLINE MTL::StructMember* MTL::StructMember::alloc()\n{\n    return NS::Object::alloc<MTL::StructMember>(_MTL_PRIVATE_CLS(MTLStructMember));\n}\n\n_MTL_INLINE MTL::StructMember* MTL::StructMember::init()\n{\n    return NS::Object::init<MTL::StructMember>();\n}\n\n_MTL_INLINE NS::String* MTL::StructMember::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_INLINE NS::UInteger MTL::StructMember::offset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(offset));\n}\n\n_MTL_INLINE MTL::DataType MTL::StructMember::dataType() const\n{\n    return Object::sendMessage<MTL::DataType>(this, _MTL_PRIVATE_SEL(dataType));\n}\n\n_MTL_INLINE MTL::StructType* MTL::StructMember::structType()\n{\n    return Object::sendMessage<MTL::StructType*>(this, _MTL_PRIVATE_SEL(structType));\n}\n\n_MTL_INLINE MTL::ArrayType* MTL::StructMember::arrayType()\n{\n    return Object::sendMessage<MTL::ArrayType*>(this, _MTL_PRIVATE_SEL(arrayType));\n}\n\n_MTL_INLINE MTL::TextureReferenceType* MTL::StructMember::textureReferenceType()\n{\n    return Object::sendMessage<MTL::TextureReferenceType*>(this, _MTL_PRIVATE_SEL(textureReferenceType));\n}\n\n_MTL_INLINE MTL::PointerType* MTL::StructMember::pointerType()\n{\n    return Object::sendMessage<MTL::PointerType*>(this, _MTL_PRIVATE_SEL(pointerType));\n}\n\n_MTL_INLINE NS::UInteger MTL::StructMember::argumentIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(argumentIndex));\n}\n\n_MTL_INLINE MTL::StructType* MTL::StructType::alloc()\n{\n    return NS::Object::alloc<MTL::StructType>(_MTL_PRIVATE_CLS(MTLStructType));\n}\n\n_MTL_INLINE MTL::StructType* MTL::StructType::init()\n{\n    return NS::Object::init<MTL::StructType>();\n}\n\n_MTL_INLINE NS::Array* MTL::StructType::members() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(members));\n}\n\n_MTL_INLINE MTL::StructMember* MTL::StructType::memberByName(const NS::String* name)\n{\n    return Object::sendMessage<MTL::StructMember*>(this, _MTL_PRIVATE_SEL(memberByName_), name);\n}\n\n_MTL_INLINE MTL::ArrayType* MTL::ArrayType::alloc()\n{\n    return NS::Object::alloc<MTL::ArrayType>(_MTL_PRIVATE_CLS(MTLArrayType));\n}\n\n_MTL_INLINE MTL::ArrayType* MTL::ArrayType::init()\n{\n    return NS::Object::init<MTL::ArrayType>();\n}\n\n_MTL_INLINE MTL::DataType MTL::ArrayType::elementType() const\n{\n    return Object::sendMessage<MTL::DataType>(this, _MTL_PRIVATE_SEL(elementType));\n}\n\n_MTL_INLINE NS::UInteger MTL::ArrayType::arrayLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(arrayLength));\n}\n\n_MTL_INLINE NS::UInteger MTL::ArrayType::stride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(stride));\n}\n\n_MTL_INLINE NS::UInteger MTL::ArrayType::argumentIndexStride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(argumentIndexStride));\n}\n\n_MTL_INLINE MTL::StructType* MTL::ArrayType::elementStructType()\n{\n    return Object::sendMessage<MTL::StructType*>(this, _MTL_PRIVATE_SEL(elementStructType));\n}\n\n_MTL_INLINE MTL::ArrayType* MTL::ArrayType::elementArrayType()\n{\n    return Object::sendMessage<MTL::ArrayType*>(this, _MTL_PRIVATE_SEL(elementArrayType));\n}\n\n_MTL_INLINE MTL::TextureReferenceType* MTL::ArrayType::elementTextureReferenceType()\n{\n    return Object::sendMessage<MTL::TextureReferenceType*>(this, _MTL_PRIVATE_SEL(elementTextureReferenceType));\n}\n\n_MTL_INLINE MTL::PointerType* MTL::ArrayType::elementPointerType()\n{\n    return Object::sendMessage<MTL::PointerType*>(this, _MTL_PRIVATE_SEL(elementPointerType));\n}\n\n_MTL_INLINE MTL::PointerType* MTL::PointerType::alloc()\n{\n    return NS::Object::alloc<MTL::PointerType>(_MTL_PRIVATE_CLS(MTLPointerType));\n}\n\n_MTL_INLINE MTL::PointerType* MTL::PointerType::init()\n{\n    return NS::Object::init<MTL::PointerType>();\n}\n\n_MTL_INLINE MTL::DataType MTL::PointerType::elementType() const\n{\n    return Object::sendMessage<MTL::DataType>(this, _MTL_PRIVATE_SEL(elementType));\n}\n\n_MTL_INLINE MTL::BindingAccess MTL::PointerType::access() const\n{\n    return Object::sendMessage<MTL::BindingAccess>(this, _MTL_PRIVATE_SEL(access));\n}\n\n_MTL_INLINE NS::UInteger MTL::PointerType::alignment() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(alignment));\n}\n\n_MTL_INLINE NS::UInteger MTL::PointerType::dataSize() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(dataSize));\n}\n\n_MTL_INLINE bool MTL::PointerType::elementIsArgumentBuffer() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(elementIsArgumentBuffer));\n}\n\n_MTL_INLINE MTL::StructType* MTL::PointerType::elementStructType()\n{\n    return Object::sendMessage<MTL::StructType*>(this, _MTL_PRIVATE_SEL(elementStructType));\n}\n\n_MTL_INLINE MTL::ArrayType* MTL::PointerType::elementArrayType()\n{\n    return Object::sendMessage<MTL::ArrayType*>(this, _MTL_PRIVATE_SEL(elementArrayType));\n}\n\n_MTL_INLINE MTL::TextureReferenceType* MTL::TextureReferenceType::alloc()\n{\n    return NS::Object::alloc<MTL::TextureReferenceType>(_MTL_PRIVATE_CLS(MTLTextureReferenceType));\n}\n\n_MTL_INLINE MTL::TextureReferenceType* MTL::TextureReferenceType::init()\n{\n    return NS::Object::init<MTL::TextureReferenceType>();\n}\n\n_MTL_INLINE MTL::DataType MTL::TextureReferenceType::textureDataType() const\n{\n    return Object::sendMessage<MTL::DataType>(this, _MTL_PRIVATE_SEL(textureDataType));\n}\n\n_MTL_INLINE MTL::TextureType MTL::TextureReferenceType::textureType() const\n{\n    return Object::sendMessage<MTL::TextureType>(this, _MTL_PRIVATE_SEL(textureType));\n}\n\n_MTL_INLINE MTL::BindingAccess MTL::TextureReferenceType::access() const\n{\n    return Object::sendMessage<MTL::BindingAccess>(this, _MTL_PRIVATE_SEL(access));\n}\n\n_MTL_INLINE bool MTL::TextureReferenceType::isDepthTexture() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isDepthTexture));\n}\n\n_MTL_INLINE MTL::Argument* MTL::Argument::alloc()\n{\n    return NS::Object::alloc<MTL::Argument>(_MTL_PRIVATE_CLS(MTLArgument));\n}\n\n_MTL_INLINE MTL::Argument* MTL::Argument::init()\n{\n    return NS::Object::init<MTL::Argument>();\n}\n\n_MTL_INLINE NS::String* MTL::Argument::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_INLINE MTL::ArgumentType MTL::Argument::type() const\n{\n    return Object::sendMessage<MTL::ArgumentType>(this, _MTL_PRIVATE_SEL(type));\n}\n\n_MTL_INLINE MTL::BindingAccess MTL::Argument::access() const\n{\n    return Object::sendMessage<MTL::BindingAccess>(this, _MTL_PRIVATE_SEL(access));\n}\n\n_MTL_INLINE NS::UInteger MTL::Argument::index() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(index));\n}\n\n_MTL_INLINE bool MTL::Argument::active() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isActive));\n}\n\n_MTL_INLINE NS::UInteger MTL::Argument::bufferAlignment() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(bufferAlignment));\n}\n\n_MTL_INLINE NS::UInteger MTL::Argument::bufferDataSize() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(bufferDataSize));\n}\n\n_MTL_INLINE MTL::DataType MTL::Argument::bufferDataType() const\n{\n    return Object::sendMessage<MTL::DataType>(this, _MTL_PRIVATE_SEL(bufferDataType));\n}\n\n_MTL_INLINE MTL::StructType* MTL::Argument::bufferStructType() const\n{\n    return Object::sendMessage<MTL::StructType*>(this, _MTL_PRIVATE_SEL(bufferStructType));\n}\n\n_MTL_INLINE MTL::PointerType* MTL::Argument::bufferPointerType() const\n{\n    return Object::sendMessage<MTL::PointerType*>(this, _MTL_PRIVATE_SEL(bufferPointerType));\n}\n\n_MTL_INLINE NS::UInteger MTL::Argument::threadgroupMemoryAlignment() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(threadgroupMemoryAlignment));\n}\n\n_MTL_INLINE NS::UInteger MTL::Argument::threadgroupMemoryDataSize() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(threadgroupMemoryDataSize));\n}\n\n_MTL_INLINE MTL::TextureType MTL::Argument::textureType() const\n{\n    return Object::sendMessage<MTL::TextureType>(this, _MTL_PRIVATE_SEL(textureType));\n}\n\n_MTL_INLINE MTL::DataType MTL::Argument::textureDataType() const\n{\n    return Object::sendMessage<MTL::DataType>(this, _MTL_PRIVATE_SEL(textureDataType));\n}\n\n_MTL_INLINE bool MTL::Argument::isDepthTexture() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isDepthTexture));\n}\n\n_MTL_INLINE NS::UInteger MTL::Argument::arrayLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(arrayLength));\n}\n\n_MTL_INLINE NS::String* MTL::Binding::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_INLINE MTL::BindingType MTL::Binding::type() const\n{\n    return Object::sendMessage<MTL::BindingType>(this, _MTL_PRIVATE_SEL(type));\n}\n\n_MTL_INLINE MTL::BindingAccess MTL::Binding::access() const\n{\n    return Object::sendMessage<MTL::BindingAccess>(this, _MTL_PRIVATE_SEL(access));\n}\n\n_MTL_INLINE NS::UInteger MTL::Binding::index() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(index));\n}\n\n_MTL_INLINE bool MTL::Binding::used() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isUsed));\n}\n\n_MTL_INLINE bool MTL::Binding::argument() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isArgument));\n}\n\n_MTL_INLINE NS::UInteger MTL::BufferBinding::bufferAlignment() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(bufferAlignment));\n}\n\n_MTL_INLINE NS::UInteger MTL::BufferBinding::bufferDataSize() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(bufferDataSize));\n}\n\n_MTL_INLINE MTL::DataType MTL::BufferBinding::bufferDataType() const\n{\n    return Object::sendMessage<MTL::DataType>(this, _MTL_PRIVATE_SEL(bufferDataType));\n}\n\n_MTL_INLINE MTL::StructType* MTL::BufferBinding::bufferStructType() const\n{\n    return Object::sendMessage<MTL::StructType*>(this, _MTL_PRIVATE_SEL(bufferStructType));\n}\n\n_MTL_INLINE MTL::PointerType* MTL::BufferBinding::bufferPointerType() const\n{\n    return Object::sendMessage<MTL::PointerType*>(this, _MTL_PRIVATE_SEL(bufferPointerType));\n}\n\n_MTL_INLINE NS::UInteger MTL::ThreadgroupBinding::threadgroupMemoryAlignment() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(threadgroupMemoryAlignment));\n}\n\n_MTL_INLINE NS::UInteger MTL::ThreadgroupBinding::threadgroupMemoryDataSize() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(threadgroupMemoryDataSize));\n}\n\n_MTL_INLINE MTL::TextureType MTL::TextureBinding::textureType() const\n{\n    return Object::sendMessage<MTL::TextureType>(this, _MTL_PRIVATE_SEL(textureType));\n}\n\n_MTL_INLINE MTL::DataType MTL::TextureBinding::textureDataType() const\n{\n    return Object::sendMessage<MTL::DataType>(this, _MTL_PRIVATE_SEL(textureDataType));\n}\n\n_MTL_INLINE bool MTL::TextureBinding::depthTexture() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isDepthTexture));\n}\n\n_MTL_INLINE NS::UInteger MTL::TextureBinding::arrayLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(arrayLength));\n}\n\n_MTL_INLINE NS::UInteger MTL::ObjectPayloadBinding::objectPayloadAlignment() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(objectPayloadAlignment));\n}\n\n_MTL_INLINE NS::UInteger MTL::ObjectPayloadBinding::objectPayloadDataSize() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(objectPayloadDataSize));\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_OPTIONS(NS::UInteger, ResourceUsage) {\n    ResourceUsageRead = 1,\n    ResourceUsageWrite = 2,\n    ResourceUsageSample = 4,\n};\n\n_MTL_OPTIONS(NS::UInteger, BarrierScope) {\n    BarrierScopeBuffers = 1,\n    BarrierScopeTextures = 2,\n    BarrierScopeRenderTargets = 4,\n};\n\nclass CommandEncoder : public NS::Referencing<CommandEncoder>\n{\npublic:\n    class Device* device() const;\n\n    NS::String*   label() const;\n    void          setLabel(const NS::String* label);\n\n    void          endEncoding();\n\n    void          insertDebugSignpost(const NS::String* string);\n\n    void          pushDebugGroup(const NS::String* string);\n\n    void          popDebugGroup();\n};\n\n}\n\n_MTL_INLINE MTL::Device* MTL::CommandEncoder::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE NS::String* MTL::CommandEncoder::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::CommandEncoder::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE void MTL::CommandEncoder::endEncoding()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(endEncoding));\n}\n\n_MTL_INLINE void MTL::CommandEncoder::insertDebugSignpost(const NS::String* string)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(insertDebugSignpost_), string);\n}\n\n_MTL_INLINE void MTL::CommandEncoder::pushDebugGroup(const NS::String* string)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(pushDebugGroup_), string);\n}\n\n_MTL_INLINE void MTL::CommandEncoder::popDebugGroup()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(popDebugGroup));\n}\n\nnamespace MTL\n{\n_MTL_OPTIONS(NS::UInteger, AccelerationStructureRefitOptions) {\n    AccelerationStructureRefitOptionVertexData = 1,\n    AccelerationStructureRefitOptionPerPrimitiveData = 2,\n};\n\nclass AccelerationStructureCommandEncoder : public NS::Referencing<AccelerationStructureCommandEncoder, CommandEncoder>\n{\npublic:\n    void buildAccelerationStructure(const class AccelerationStructure* accelerationStructure, const class AccelerationStructureDescriptor* descriptor, const class Buffer* scratchBuffer, NS::UInteger scratchBufferOffset);\n\n    void refitAccelerationStructure(const class AccelerationStructure* sourceAccelerationStructure, const class AccelerationStructureDescriptor* descriptor, const class AccelerationStructure* destinationAccelerationStructure, const class Buffer* scratchBuffer, NS::UInteger scratchBufferOffset);\n\n    void refitAccelerationStructure(const class AccelerationStructure* sourceAccelerationStructure, const class AccelerationStructureDescriptor* descriptor, const class AccelerationStructure* destinationAccelerationStructure, const class Buffer* scratchBuffer, NS::UInteger scratchBufferOffset, MTL::AccelerationStructureRefitOptions options);\n\n    void copyAccelerationStructure(const class AccelerationStructure* sourceAccelerationStructure, const class AccelerationStructure* destinationAccelerationStructure);\n\n    void writeCompactedAccelerationStructureSize(const class AccelerationStructure* accelerationStructure, const class Buffer* buffer, NS::UInteger offset);\n\n    void writeCompactedAccelerationStructureSize(const class AccelerationStructure* accelerationStructure, const class Buffer* buffer, NS::UInteger offset, MTL::DataType sizeDataType);\n\n    void copyAndCompactAccelerationStructure(const class AccelerationStructure* sourceAccelerationStructure, const class AccelerationStructure* destinationAccelerationStructure);\n\n    void updateFence(const class Fence* fence);\n\n    void waitForFence(const class Fence* fence);\n\n    void useResource(const class Resource* resource, MTL::ResourceUsage usage);\n\n    void useResources(const class Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage);\n\n    void useHeap(const class Heap* heap);\n\n    void useHeaps(const class Heap* const heaps[], NS::UInteger count);\n\n    void sampleCountersInBuffer(const class CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier);\n};\n\nclass AccelerationStructurePassSampleBufferAttachmentDescriptor : public NS::Copying<AccelerationStructurePassSampleBufferAttachmentDescriptor>\n{\npublic:\n    static class AccelerationStructurePassSampleBufferAttachmentDescriptor* alloc();\n\n    class AccelerationStructurePassSampleBufferAttachmentDescriptor*        init();\n\n    class CounterSampleBuffer*                                              sampleBuffer() const;\n    void                                                                    setSampleBuffer(const class CounterSampleBuffer* sampleBuffer);\n\n    NS::UInteger                                                            startOfEncoderSampleIndex() const;\n    void                                                                    setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex);\n\n    NS::UInteger                                                            endOfEncoderSampleIndex() const;\n    void                                                                    setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex);\n};\n\nclass AccelerationStructurePassSampleBufferAttachmentDescriptorArray : public NS::Referencing<AccelerationStructurePassSampleBufferAttachmentDescriptorArray>\n{\npublic:\n    static class AccelerationStructurePassSampleBufferAttachmentDescriptorArray* alloc();\n\n    class AccelerationStructurePassSampleBufferAttachmentDescriptorArray*        init();\n\n    class AccelerationStructurePassSampleBufferAttachmentDescriptor*             object(NS::UInteger attachmentIndex);\n\n    void                                                                         setObject(const class AccelerationStructurePassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex);\n};\n\nclass AccelerationStructurePassDescriptor : public NS::Copying<AccelerationStructurePassDescriptor>\n{\npublic:\n    static class AccelerationStructurePassDescriptor*                     alloc();\n\n    class AccelerationStructurePassDescriptor*                            init();\n\n    static class AccelerationStructurePassDescriptor*                     accelerationStructurePassDescriptor();\n\n    class AccelerationStructurePassSampleBufferAttachmentDescriptorArray* sampleBufferAttachments() const;\n};\n\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::buildAccelerationStructure(const MTL::AccelerationStructure* accelerationStructure, const MTL::AccelerationStructureDescriptor* descriptor, const MTL::Buffer* scratchBuffer, NS::UInteger scratchBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(buildAccelerationStructure_descriptor_scratchBuffer_scratchBufferOffset_), accelerationStructure, descriptor, scratchBuffer, scratchBufferOffset);\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::refitAccelerationStructure(const MTL::AccelerationStructure* sourceAccelerationStructure, const MTL::AccelerationStructureDescriptor* descriptor, const MTL::AccelerationStructure* destinationAccelerationStructure, const MTL::Buffer* scratchBuffer, NS::UInteger scratchBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(refitAccelerationStructure_descriptor_destination_scratchBuffer_scratchBufferOffset_), sourceAccelerationStructure, descriptor, destinationAccelerationStructure, scratchBuffer, scratchBufferOffset);\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::refitAccelerationStructure(const MTL::AccelerationStructure* sourceAccelerationStructure, const MTL::AccelerationStructureDescriptor* descriptor, const MTL::AccelerationStructure* destinationAccelerationStructure, const MTL::Buffer* scratchBuffer, NS::UInteger scratchBufferOffset, MTL::AccelerationStructureRefitOptions options)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(refitAccelerationStructure_descriptor_destination_scratchBuffer_scratchBufferOffset_options_), sourceAccelerationStructure, descriptor, destinationAccelerationStructure, scratchBuffer, scratchBufferOffset, options);\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::copyAccelerationStructure(const MTL::AccelerationStructure* sourceAccelerationStructure, const MTL::AccelerationStructure* destinationAccelerationStructure)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(copyAccelerationStructure_toAccelerationStructure_), sourceAccelerationStructure, destinationAccelerationStructure);\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::writeCompactedAccelerationStructureSize(const MTL::AccelerationStructure* accelerationStructure, const MTL::Buffer* buffer, NS::UInteger offset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(writeCompactedAccelerationStructureSize_toBuffer_offset_), accelerationStructure, buffer, offset);\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::writeCompactedAccelerationStructureSize(const MTL::AccelerationStructure* accelerationStructure, const MTL::Buffer* buffer, NS::UInteger offset, MTL::DataType sizeDataType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(writeCompactedAccelerationStructureSize_toBuffer_offset_sizeDataType_), accelerationStructure, buffer, offset, sizeDataType);\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::copyAndCompactAccelerationStructure(const MTL::AccelerationStructure* sourceAccelerationStructure, const MTL::AccelerationStructure* destinationAccelerationStructure)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(copyAndCompactAccelerationStructure_toAccelerationStructure_), sourceAccelerationStructure, destinationAccelerationStructure);\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::updateFence(const MTL::Fence* fence)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(updateFence_), fence);\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::waitForFence(const MTL::Fence* fence)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(waitForFence_), fence);\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::useResource(const MTL::Resource* resource, MTL::ResourceUsage usage)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useResource_usage_), resource, usage);\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::useResources(const MTL::Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useResources_count_usage_), resources, count, usage);\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::useHeap(const MTL::Heap* heap)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useHeap_), heap);\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::useHeaps(const MTL::Heap* const heaps[], NS::UInteger count)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useHeaps_count_), heaps, count);\n}\n\n_MTL_INLINE void MTL::AccelerationStructureCommandEncoder::sampleCountersInBuffer(const MTL::CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(sampleCountersInBuffer_atSampleIndex_withBarrier_), sampleBuffer, sampleIndex, barrier);\n}\n\n_MTL_INLINE MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor* MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor>(_MTL_PRIVATE_CLS(MTLAccelerationStructurePassSampleBufferAttachmentDescriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor* MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::init()\n{\n    return NS::Object::init<MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor>();\n}\n\n_MTL_INLINE MTL::CounterSampleBuffer* MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::sampleBuffer() const\n{\n    return Object::sendMessage<MTL::CounterSampleBuffer*>(this, _MTL_PRIVATE_SEL(sampleBuffer));\n}\n\n_MTL_INLINE void MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::setSampleBuffer(const MTL::CounterSampleBuffer* sampleBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSampleBuffer_), sampleBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::startOfEncoderSampleIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(startOfEncoderSampleIndex));\n}\n\n_MTL_INLINE void MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStartOfEncoderSampleIndex_), startOfEncoderSampleIndex);\n}\n\n_MTL_INLINE NS::UInteger MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::endOfEncoderSampleIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(endOfEncoderSampleIndex));\n}\n\n_MTL_INLINE void MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setEndOfEncoderSampleIndex_), endOfEncoderSampleIndex);\n}\n\n_MTL_INLINE MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray* MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray::alloc()\n{\n    return NS::Object::alloc<MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray>(_MTL_PRIVATE_CLS(MTLAccelerationStructurePassSampleBufferAttachmentDescriptorArray));\n}\n\n_MTL_INLINE MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray* MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray::init()\n{\n    return NS::Object::init<MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray>();\n}\n\n_MTL_INLINE MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor* MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray::object(NS::UInteger attachmentIndex)\n{\n    return Object::sendMessage<MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex);\n}\n\n_MTL_INLINE void MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray::setObject(const MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex);\n}\n\n_MTL_INLINE MTL::AccelerationStructurePassDescriptor* MTL::AccelerationStructurePassDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::AccelerationStructurePassDescriptor>(_MTL_PRIVATE_CLS(MTLAccelerationStructurePassDescriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructurePassDescriptor* MTL::AccelerationStructurePassDescriptor::init()\n{\n    return NS::Object::init<MTL::AccelerationStructurePassDescriptor>();\n}\n\n_MTL_INLINE MTL::AccelerationStructurePassDescriptor* MTL::AccelerationStructurePassDescriptor::accelerationStructurePassDescriptor()\n{\n    return Object::sendMessage<MTL::AccelerationStructurePassDescriptor*>(_MTL_PRIVATE_CLS(MTLAccelerationStructurePassDescriptor), _MTL_PRIVATE_SEL(accelerationStructurePassDescriptor));\n}\n\n_MTL_INLINE MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray* MTL::AccelerationStructurePassDescriptor::sampleBufferAttachments() const\n{\n    return Object::sendMessage<MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray*>(this, _MTL_PRIVATE_SEL(sampleBufferAttachments));\n}\n\n#pragma once\n\nnamespace MTL\n{\n\nstatic const NS::UInteger AttributeStrideStatic = NS::UIntegerMax;\n\nclass ArgumentEncoder : public NS::Referencing<ArgumentEncoder>\n{\npublic:\n    class Device*          device() const;\n\n    NS::String*            label() const;\n    void                   setLabel(const NS::String* label);\n\n    NS::UInteger           encodedLength() const;\n\n    NS::UInteger           alignment() const;\n\n    void                   setArgumentBuffer(const class Buffer* argumentBuffer, NS::UInteger offset);\n\n    void                   setArgumentBuffer(const class Buffer* argumentBuffer, NS::UInteger startOffset, NS::UInteger arrayElement);\n\n    void                   setBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index);\n\n    void                   setBuffers(const class Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range);\n\n    void                   setTexture(const class Texture* texture, NS::UInteger index);\n\n    void                   setTextures(const class Texture* const textures[], NS::Range range);\n\n    void                   setSamplerState(const class SamplerState* sampler, NS::UInteger index);\n\n    void                   setSamplerStates(const class SamplerState* const samplers[], NS::Range range);\n\n    void*                  constantData(NS::UInteger index);\n\n    void                   setRenderPipelineState(const class RenderPipelineState* pipeline, NS::UInteger index);\n\n    void                   setRenderPipelineStates(const class RenderPipelineState* const pipelines[], NS::Range range);\n\n    void                   setComputePipelineState(const class ComputePipelineState* pipeline, NS::UInteger index);\n\n    void                   setComputePipelineStates(const class ComputePipelineState* const pipelines[], NS::Range range);\n\n    void                   setIndirectCommandBuffer(const class IndirectCommandBuffer* indirectCommandBuffer, NS::UInteger index);\n\n    void                   setIndirectCommandBuffers(const class IndirectCommandBuffer* const buffers[], NS::Range range);\n\n    void                   setAccelerationStructure(const class AccelerationStructure* accelerationStructure, NS::UInteger index);\n\n    class ArgumentEncoder* newArgumentEncoder(NS::UInteger index);\n\n    void                   setVisibleFunctionTable(const class VisibleFunctionTable* visibleFunctionTable, NS::UInteger index);\n\n    void                   setVisibleFunctionTables(const class VisibleFunctionTable* const visibleFunctionTables[], NS::Range range);\n\n    void                   setIntersectionFunctionTable(const class IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger index);\n\n    void                   setIntersectionFunctionTables(const class IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range);\n};\n\n}\n\n_MTL_INLINE MTL::Device* MTL::ArgumentEncoder::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE NS::String* MTL::ArgumentEncoder::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE NS::UInteger MTL::ArgumentEncoder::encodedLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(encodedLength));\n}\n\n_MTL_INLINE NS::UInteger MTL::ArgumentEncoder::alignment() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(alignment));\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setArgumentBuffer(const MTL::Buffer* argumentBuffer, NS::UInteger offset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setArgumentBuffer_offset_), argumentBuffer, offset);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setArgumentBuffer(const MTL::Buffer* argumentBuffer, NS::UInteger startOffset, NS::UInteger arrayElement)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setArgumentBuffer_startOffset_arrayElement_), argumentBuffer, startOffset, arrayElement);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBuffer_offset_atIndex_), buffer, offset, index);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setBuffers(const MTL::Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBuffers_offsets_withRange_), buffers, offsets, range);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setTexture(const MTL::Texture* texture, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTexture_atIndex_), texture, index);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setTextures(const MTL::Texture* const textures[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTextures_withRange_), textures, range);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setSamplerState(const MTL::SamplerState* sampler, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSamplerState_atIndex_), sampler, index);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSamplerStates_withRange_), samplers, range);\n}\n\n_MTL_INLINE void* MTL::ArgumentEncoder::constantData(NS::UInteger index)\n{\n    return Object::sendMessage<void*>(this, _MTL_PRIVATE_SEL(constantDataAtIndex_), index);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setRenderPipelineState(const MTL::RenderPipelineState* pipeline, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRenderPipelineState_atIndex_), pipeline, index);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setRenderPipelineStates(const MTL::RenderPipelineState* const pipelines[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRenderPipelineStates_withRange_), pipelines, range);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setComputePipelineState(const MTL::ComputePipelineState* pipeline, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setComputePipelineState_atIndex_), pipeline, index);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setComputePipelineStates(const MTL::ComputePipelineState* const pipelines[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setComputePipelineStates_withRange_), pipelines, range);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setIndirectCommandBuffer(const MTL::IndirectCommandBuffer* indirectCommandBuffer, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndirectCommandBuffer_atIndex_), indirectCommandBuffer, index);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setIndirectCommandBuffers(const MTL::IndirectCommandBuffer* const buffers[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndirectCommandBuffers_withRange_), buffers, range);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setAccelerationStructure(const MTL::AccelerationStructure* accelerationStructure, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setAccelerationStructure_atIndex_), accelerationStructure, index);\n}\n\n_MTL_INLINE MTL::ArgumentEncoder* MTL::ArgumentEncoder::newArgumentEncoder(NS::UInteger index)\n{\n    return Object::sendMessage<MTL::ArgumentEncoder*>(this, _MTL_PRIVATE_SEL(newArgumentEncoderForBufferAtIndex_), index);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setVisibleFunctionTable(const MTL::VisibleFunctionTable* visibleFunctionTable, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVisibleFunctionTable_atIndex_), visibleFunctionTable, index);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setVisibleFunctionTables(const MTL::VisibleFunctionTable* const visibleFunctionTables[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVisibleFunctionTables_withRange_), visibleFunctionTables, range);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setIntersectionFunctionTable(const MTL::IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIntersectionFunctionTable_atIndex_), intersectionFunctionTable, index);\n}\n\n_MTL_INLINE void MTL::ArgumentEncoder::setIntersectionFunctionTables(const MTL::IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIntersectionFunctionTables_withRange_), intersectionFunctionTables, range);\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_CONST(NS::ErrorDomain, BinaryArchiveDomain);\n\n_MTL_ENUM(NS::UInteger, BinaryArchiveError) {\n    BinaryArchiveErrorNone = 0,\n    BinaryArchiveErrorInvalidFile = 1,\n    BinaryArchiveErrorUnexpectedElement = 2,\n    BinaryArchiveErrorCompilationFailure = 3,\n    BinaryArchiveErrorInternalError = 4,\n};\n\nclass BinaryArchiveDescriptor : public NS::Copying<BinaryArchiveDescriptor>\n{\npublic:\n    static class BinaryArchiveDescriptor* alloc();\n\n    class BinaryArchiveDescriptor*        init();\n\n    NS::URL*                              url() const;\n    void                                  setUrl(const NS::URL* url);\n};\n\nclass BinaryArchive : public NS::Referencing<BinaryArchive>\n{\npublic:\n    NS::String*   label() const;\n    void          setLabel(const NS::String* label);\n\n    class Device* device() const;\n\n    bool          addComputePipelineFunctions(const class ComputePipelineDescriptor* descriptor, NS::Error** error);\n\n    bool          addRenderPipelineFunctions(const class RenderPipelineDescriptor* descriptor, NS::Error** error);\n\n    bool          addTileRenderPipelineFunctions(const class TileRenderPipelineDescriptor* descriptor, NS::Error** error);\n\n    bool          addMeshRenderPipelineFunctions(const class MeshRenderPipelineDescriptor* descriptor, NS::Error** error);\n\n    bool          addLibrary(const class StitchedLibraryDescriptor* descriptor, NS::Error** error);\n\n    bool          serializeToURL(const NS::URL* url, NS::Error** error);\n\n    bool          addFunction(const class FunctionDescriptor* descriptor, const class Library* library, NS::Error** error);\n};\n\n}\n\n_MTL_PRIVATE_DEF_STR(NS::ErrorDomain, BinaryArchiveDomain);\n\n_MTL_INLINE MTL::BinaryArchiveDescriptor* MTL::BinaryArchiveDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::BinaryArchiveDescriptor>(_MTL_PRIVATE_CLS(MTLBinaryArchiveDescriptor));\n}\n\n_MTL_INLINE MTL::BinaryArchiveDescriptor* MTL::BinaryArchiveDescriptor::init()\n{\n    return NS::Object::init<MTL::BinaryArchiveDescriptor>();\n}\n\n_MTL_INLINE NS::URL* MTL::BinaryArchiveDescriptor::url() const\n{\n    return Object::sendMessage<NS::URL*>(this, _MTL_PRIVATE_SEL(url));\n}\n\n_MTL_INLINE void MTL::BinaryArchiveDescriptor::setUrl(const NS::URL* url)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setUrl_), url);\n}\n\n_MTL_INLINE NS::String* MTL::BinaryArchive::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::BinaryArchive::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::Device* MTL::BinaryArchive::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE bool MTL::BinaryArchive::addComputePipelineFunctions(const MTL::ComputePipelineDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(addComputePipelineFunctionsWithDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE bool MTL::BinaryArchive::addRenderPipelineFunctions(const MTL::RenderPipelineDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(addRenderPipelineFunctionsWithDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE bool MTL::BinaryArchive::addTileRenderPipelineFunctions(const MTL::TileRenderPipelineDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(addTileRenderPipelineFunctionsWithDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE bool MTL::BinaryArchive::addMeshRenderPipelineFunctions(const MTL::MeshRenderPipelineDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(addMeshRenderPipelineFunctionsWithDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE bool MTL::BinaryArchive::addLibrary(const MTL::StitchedLibraryDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(addLibraryWithDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE bool MTL::BinaryArchive::serializeToURL(const NS::URL* url, NS::Error** error)\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(serializeToURL_error_), url, error);\n}\n\n_MTL_INLINE bool MTL::BinaryArchive::addFunction(const MTL::FunctionDescriptor* descriptor, const MTL::Library* library, NS::Error** error)\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(addFunctionWithDescriptor_library_error_), descriptor, library, error);\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_OPTIONS(NS::UInteger, BlitOption) {\n    BlitOptionNone = 0,\n    BlitOptionDepthFromDepthStencil = 1,\n    BlitOptionStencilFromDepthStencil = 2,\n    BlitOptionRowLinearPVRTC = 4,\n};\n\nclass BlitCommandEncoder : public NS::Referencing<BlitCommandEncoder, CommandEncoder>\n{\npublic:\n    void synchronizeResource(const class Resource* resource);\n\n    void synchronizeTexture(const class Texture* texture, NS::UInteger slice, NS::UInteger level);\n\n    void copyFromTexture(const class Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const class Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin);\n\n    void copyFromBuffer(const class Buffer* sourceBuffer, NS::UInteger sourceOffset, NS::UInteger sourceBytesPerRow, NS::UInteger sourceBytesPerImage, MTL::Size sourceSize, const class Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin);\n\n    void copyFromBuffer(const class Buffer* sourceBuffer, NS::UInteger sourceOffset, NS::UInteger sourceBytesPerRow, NS::UInteger sourceBytesPerImage, MTL::Size sourceSize, const class Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin, MTL::BlitOption options);\n\n    void copyFromTexture(const class Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const class Buffer* destinationBuffer, NS::UInteger destinationOffset, NS::UInteger destinationBytesPerRow, NS::UInteger destinationBytesPerImage);\n\n    void copyFromTexture(const class Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const class Buffer* destinationBuffer, NS::UInteger destinationOffset, NS::UInteger destinationBytesPerRow, NS::UInteger destinationBytesPerImage, MTL::BlitOption options);\n\n    void generateMipmaps(const class Texture* texture);\n\n    void fillBuffer(const class Buffer* buffer, NS::Range range, uint8_t value);\n\n    void copyFromTexture(const class Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, const class Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, NS::UInteger sliceCount, NS::UInteger levelCount);\n\n    void copyFromTexture(const class Texture* sourceTexture, const class Texture* destinationTexture);\n\n    void copyFromBuffer(const class Buffer* sourceBuffer, NS::UInteger sourceOffset, const class Buffer* destinationBuffer, NS::UInteger destinationOffset, NS::UInteger size);\n\n    void updateFence(const class Fence* fence);\n\n    void waitForFence(const class Fence* fence);\n\n    void getTextureAccessCounters(const class Texture* texture, MTL::Region region, NS::UInteger mipLevel, NS::UInteger slice, bool resetCounters, const class Buffer* countersBuffer, NS::UInteger countersBufferOffset);\n\n    void resetTextureAccessCounters(const class Texture* texture, MTL::Region region, NS::UInteger mipLevel, NS::UInteger slice);\n\n    void optimizeContentsForGPUAccess(const class Texture* texture);\n\n    void optimizeContentsForGPUAccess(const class Texture* texture, NS::UInteger slice, NS::UInteger level);\n\n    void optimizeContentsForCPUAccess(const class Texture* texture);\n\n    void optimizeContentsForCPUAccess(const class Texture* texture, NS::UInteger slice, NS::UInteger level);\n\n    void resetCommandsInBuffer(const class IndirectCommandBuffer* buffer, NS::Range range);\n\n    void copyIndirectCommandBuffer(const class IndirectCommandBuffer* source, NS::Range sourceRange, const class IndirectCommandBuffer* destination, NS::UInteger destinationIndex);\n\n    void optimizeIndirectCommandBuffer(const class IndirectCommandBuffer* indirectCommandBuffer, NS::Range range);\n\n    void sampleCountersInBuffer(const class CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier);\n\n    void resolveCounters(const class CounterSampleBuffer* sampleBuffer, NS::Range range, const class Buffer* destinationBuffer, NS::UInteger destinationOffset);\n};\n\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::synchronizeResource(const MTL::Resource* resource)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(synchronizeResource_), resource);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::synchronizeTexture(const MTL::Texture* texture, NS::UInteger slice, NS::UInteger level)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(synchronizeTexture_slice_level_), texture, slice, level);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::copyFromTexture(const MTL::Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const MTL::Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(copyFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_), sourceTexture, sourceSlice, sourceLevel, sourceOrigin, sourceSize, destinationTexture, destinationSlice, destinationLevel, destinationOrigin);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::copyFromBuffer(const MTL::Buffer* sourceBuffer, NS::UInteger sourceOffset, NS::UInteger sourceBytesPerRow, NS::UInteger sourceBytesPerImage, MTL::Size sourceSize, const MTL::Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(copyFromBuffer_sourceOffset_sourceBytesPerRow_sourceBytesPerImage_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_), sourceBuffer, sourceOffset, sourceBytesPerRow, sourceBytesPerImage, sourceSize, destinationTexture, destinationSlice, destinationLevel, destinationOrigin);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::copyFromBuffer(const MTL::Buffer* sourceBuffer, NS::UInteger sourceOffset, NS::UInteger sourceBytesPerRow, NS::UInteger sourceBytesPerImage, MTL::Size sourceSize, const MTL::Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin, MTL::BlitOption options)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(copyFromBuffer_sourceOffset_sourceBytesPerRow_sourceBytesPerImage_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_options_), sourceBuffer, sourceOffset, sourceBytesPerRow, sourceBytesPerImage, sourceSize, destinationTexture, destinationSlice, destinationLevel, destinationOrigin, options);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::copyFromTexture(const MTL::Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const MTL::Buffer* destinationBuffer, NS::UInteger destinationOffset, NS::UInteger destinationBytesPerRow, NS::UInteger destinationBytesPerImage)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(copyFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toBuffer_destinationOffset_destinationBytesPerRow_destinationBytesPerImage_), sourceTexture, sourceSlice, sourceLevel, sourceOrigin, sourceSize, destinationBuffer, destinationOffset, destinationBytesPerRow, destinationBytesPerImage);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::copyFromTexture(const MTL::Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const MTL::Buffer* destinationBuffer, NS::UInteger destinationOffset, NS::UInteger destinationBytesPerRow, NS::UInteger destinationBytesPerImage, MTL::BlitOption options)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(copyFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toBuffer_destinationOffset_destinationBytesPerRow_destinationBytesPerImage_options_), sourceTexture, sourceSlice, sourceLevel, sourceOrigin, sourceSize, destinationBuffer, destinationOffset, destinationBytesPerRow, destinationBytesPerImage, options);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::generateMipmaps(const MTL::Texture* texture)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(generateMipmapsForTexture_), texture);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::fillBuffer(const MTL::Buffer* buffer, NS::Range range, uint8_t value)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(fillBuffer_range_value_), buffer, range, value);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::copyFromTexture(const MTL::Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, const MTL::Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, NS::UInteger sliceCount, NS::UInteger levelCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(copyFromTexture_sourceSlice_sourceLevel_toTexture_destinationSlice_destinationLevel_sliceCount_levelCount_), sourceTexture, sourceSlice, sourceLevel, destinationTexture, destinationSlice, destinationLevel, sliceCount, levelCount);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::copyFromTexture(const MTL::Texture* sourceTexture, const MTL::Texture* destinationTexture)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(copyFromTexture_toTexture_), sourceTexture, destinationTexture);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::copyFromBuffer(const MTL::Buffer* sourceBuffer, NS::UInteger sourceOffset, const MTL::Buffer* destinationBuffer, NS::UInteger destinationOffset, NS::UInteger size)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(copyFromBuffer_sourceOffset_toBuffer_destinationOffset_size_), sourceBuffer, sourceOffset, destinationBuffer, destinationOffset, size);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::updateFence(const MTL::Fence* fence)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(updateFence_), fence);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::waitForFence(const MTL::Fence* fence)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(waitForFence_), fence);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::getTextureAccessCounters(const MTL::Texture* texture, MTL::Region region, NS::UInteger mipLevel, NS::UInteger slice, bool resetCounters, const MTL::Buffer* countersBuffer, NS::UInteger countersBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(getTextureAccessCounters_region_mipLevel_slice_resetCounters_countersBuffer_countersBufferOffset_), texture, region, mipLevel, slice, resetCounters, countersBuffer, countersBufferOffset);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::resetTextureAccessCounters(const MTL::Texture* texture, MTL::Region region, NS::UInteger mipLevel, NS::UInteger slice)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(resetTextureAccessCounters_region_mipLevel_slice_), texture, region, mipLevel, slice);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::optimizeContentsForGPUAccess(const MTL::Texture* texture)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(optimizeContentsForGPUAccess_), texture);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::optimizeContentsForGPUAccess(const MTL::Texture* texture, NS::UInteger slice, NS::UInteger level)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(optimizeContentsForGPUAccess_slice_level_), texture, slice, level);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::optimizeContentsForCPUAccess(const MTL::Texture* texture)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(optimizeContentsForCPUAccess_), texture);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::optimizeContentsForCPUAccess(const MTL::Texture* texture, NS::UInteger slice, NS::UInteger level)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(optimizeContentsForCPUAccess_slice_level_), texture, slice, level);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::resetCommandsInBuffer(const MTL::IndirectCommandBuffer* buffer, NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(resetCommandsInBuffer_withRange_), buffer, range);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::copyIndirectCommandBuffer(const MTL::IndirectCommandBuffer* source, NS::Range sourceRange, const MTL::IndirectCommandBuffer* destination, NS::UInteger destinationIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(copyIndirectCommandBuffer_sourceRange_destination_destinationIndex_), source, sourceRange, destination, destinationIndex);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::optimizeIndirectCommandBuffer(const MTL::IndirectCommandBuffer* indirectCommandBuffer, NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(optimizeIndirectCommandBuffer_withRange_), indirectCommandBuffer, range);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::sampleCountersInBuffer(const MTL::CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(sampleCountersInBuffer_atSampleIndex_withBarrier_), sampleBuffer, sampleIndex, barrier);\n}\n\n_MTL_INLINE void MTL::BlitCommandEncoder::resolveCounters(const MTL::CounterSampleBuffer* sampleBuffer, NS::Range range, const MTL::Buffer* destinationBuffer, NS::UInteger destinationOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(resolveCounters_inRange_destinationBuffer_destinationOffset_), sampleBuffer, range, destinationBuffer, destinationOffset);\n}\n\n#pragma once\n\nnamespace MTL\n{\nclass BlitPassSampleBufferAttachmentDescriptor : public NS::Copying<BlitPassSampleBufferAttachmentDescriptor>\n{\npublic:\n    static class BlitPassSampleBufferAttachmentDescriptor* alloc();\n\n    class BlitPassSampleBufferAttachmentDescriptor*        init();\n\n    class CounterSampleBuffer*                             sampleBuffer() const;\n    void                                                   setSampleBuffer(const class CounterSampleBuffer* sampleBuffer);\n\n    NS::UInteger                                           startOfEncoderSampleIndex() const;\n    void                                                   setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex);\n\n    NS::UInteger                                           endOfEncoderSampleIndex() const;\n    void                                                   setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex);\n};\n\nclass BlitPassSampleBufferAttachmentDescriptorArray : public NS::Referencing<BlitPassSampleBufferAttachmentDescriptorArray>\n{\npublic:\n    static class BlitPassSampleBufferAttachmentDescriptorArray* alloc();\n\n    class BlitPassSampleBufferAttachmentDescriptorArray*        init();\n\n    class BlitPassSampleBufferAttachmentDescriptor*             object(NS::UInteger attachmentIndex);\n\n    void                                                        setObject(const class BlitPassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex);\n};\n\nclass BlitPassDescriptor : public NS::Copying<BlitPassDescriptor>\n{\npublic:\n    static class BlitPassDescriptor*                     alloc();\n\n    class BlitPassDescriptor*                            init();\n\n    static class BlitPassDescriptor*                     blitPassDescriptor();\n\n    class BlitPassSampleBufferAttachmentDescriptorArray* sampleBufferAttachments() const;\n};\n\n}\n\n_MTL_INLINE MTL::BlitPassSampleBufferAttachmentDescriptor* MTL::BlitPassSampleBufferAttachmentDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::BlitPassSampleBufferAttachmentDescriptor>(_MTL_PRIVATE_CLS(MTLBlitPassSampleBufferAttachmentDescriptor));\n}\n\n_MTL_INLINE MTL::BlitPassSampleBufferAttachmentDescriptor* MTL::BlitPassSampleBufferAttachmentDescriptor::init()\n{\n    return NS::Object::init<MTL::BlitPassSampleBufferAttachmentDescriptor>();\n}\n\n_MTL_INLINE MTL::CounterSampleBuffer* MTL::BlitPassSampleBufferAttachmentDescriptor::sampleBuffer() const\n{\n    return Object::sendMessage<MTL::CounterSampleBuffer*>(this, _MTL_PRIVATE_SEL(sampleBuffer));\n}\n\n_MTL_INLINE void MTL::BlitPassSampleBufferAttachmentDescriptor::setSampleBuffer(const MTL::CounterSampleBuffer* sampleBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSampleBuffer_), sampleBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::BlitPassSampleBufferAttachmentDescriptor::startOfEncoderSampleIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(startOfEncoderSampleIndex));\n}\n\n_MTL_INLINE void MTL::BlitPassSampleBufferAttachmentDescriptor::setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStartOfEncoderSampleIndex_), startOfEncoderSampleIndex);\n}\n\n_MTL_INLINE NS::UInteger MTL::BlitPassSampleBufferAttachmentDescriptor::endOfEncoderSampleIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(endOfEncoderSampleIndex));\n}\n\n_MTL_INLINE void MTL::BlitPassSampleBufferAttachmentDescriptor::setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setEndOfEncoderSampleIndex_), endOfEncoderSampleIndex);\n}\n\n_MTL_INLINE MTL::BlitPassSampleBufferAttachmentDescriptorArray* MTL::BlitPassSampleBufferAttachmentDescriptorArray::alloc()\n{\n    return NS::Object::alloc<MTL::BlitPassSampleBufferAttachmentDescriptorArray>(_MTL_PRIVATE_CLS(MTLBlitPassSampleBufferAttachmentDescriptorArray));\n}\n\n_MTL_INLINE MTL::BlitPassSampleBufferAttachmentDescriptorArray* MTL::BlitPassSampleBufferAttachmentDescriptorArray::init()\n{\n    return NS::Object::init<MTL::BlitPassSampleBufferAttachmentDescriptorArray>();\n}\n\n_MTL_INLINE MTL::BlitPassSampleBufferAttachmentDescriptor* MTL::BlitPassSampleBufferAttachmentDescriptorArray::object(NS::UInteger attachmentIndex)\n{\n    return Object::sendMessage<MTL::BlitPassSampleBufferAttachmentDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex);\n}\n\n_MTL_INLINE void MTL::BlitPassSampleBufferAttachmentDescriptorArray::setObject(const MTL::BlitPassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex);\n}\n\n_MTL_INLINE MTL::BlitPassDescriptor* MTL::BlitPassDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::BlitPassDescriptor>(_MTL_PRIVATE_CLS(MTLBlitPassDescriptor));\n}\n\n_MTL_INLINE MTL::BlitPassDescriptor* MTL::BlitPassDescriptor::init()\n{\n    return NS::Object::init<MTL::BlitPassDescriptor>();\n}\n\n_MTL_INLINE MTL::BlitPassDescriptor* MTL::BlitPassDescriptor::blitPassDescriptor()\n{\n    return Object::sendMessage<MTL::BlitPassDescriptor*>(_MTL_PRIVATE_CLS(MTLBlitPassDescriptor), _MTL_PRIVATE_SEL(blitPassDescriptor));\n}\n\n_MTL_INLINE MTL::BlitPassSampleBufferAttachmentDescriptorArray* MTL::BlitPassDescriptor::sampleBufferAttachments() const\n{\n    return Object::sendMessage<MTL::BlitPassSampleBufferAttachmentDescriptorArray*>(this, _MTL_PRIVATE_SEL(sampleBufferAttachments));\n}\n\n#pragma once\n\nnamespace MTL\n{\nclass Buffer : public NS::Referencing<Buffer, Resource>\n{\npublic:\n    NS::UInteger   length() const;\n\n    void*          contents();\n\n    void           didModifyRange(NS::Range range);\n\n    class Texture* newTexture(const class TextureDescriptor* descriptor, NS::UInteger offset, NS::UInteger bytesPerRow);\n\n    void           addDebugMarker(const NS::String* marker, NS::Range range);\n\n    void           removeAllDebugMarkers();\n\n    class Buffer*  remoteStorageBuffer() const;\n\n    class Buffer*  newRemoteBufferViewForDevice(const class Device* device);\n\n    uint64_t       gpuAddress() const;\n};\n\n}\n\n_MTL_INLINE NS::UInteger MTL::Buffer::length() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(length));\n}\n\n_MTL_INLINE void* MTL::Buffer::contents()\n{\n    return Object::sendMessage<void*>(this, _MTL_PRIVATE_SEL(contents));\n}\n\n_MTL_INLINE void MTL::Buffer::didModifyRange(NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(didModifyRange_), range);\n}\n\n_MTL_INLINE MTL::Texture* MTL::Buffer::newTexture(const MTL::TextureDescriptor* descriptor, NS::UInteger offset, NS::UInteger bytesPerRow)\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(newTextureWithDescriptor_offset_bytesPerRow_), descriptor, offset, bytesPerRow);\n}\n\n_MTL_INLINE void MTL::Buffer::addDebugMarker(const NS::String* marker, NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(addDebugMarker_range_), marker, range);\n}\n\n_MTL_INLINE void MTL::Buffer::removeAllDebugMarkers()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(removeAllDebugMarkers));\n}\n\n_MTL_INLINE MTL::Buffer* MTL::Buffer::remoteStorageBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(remoteStorageBuffer));\n}\n\n_MTL_INLINE MTL::Buffer* MTL::Buffer::newRemoteBufferViewForDevice(const MTL::Device* device)\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(newRemoteBufferViewForDevice_), device);\n}\n\n_MTL_INLINE uint64_t MTL::Buffer::gpuAddress() const\n{\n    return Object::sendMessage<uint64_t>(this, _MTL_PRIVATE_SEL(gpuAddress));\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::Integer, CaptureError) {\n    CaptureErrorNotSupported = 1,\n    CaptureErrorAlreadyCapturing = 2,\n    CaptureErrorInvalidDescriptor = 3,\n};\n\n_MTL_ENUM(NS::Integer, CaptureDestination) {\n    CaptureDestinationDeveloperTools = 1,\n    CaptureDestinationGPUTraceDocument = 2,\n};\n\nclass CaptureDescriptor : public NS::Copying<CaptureDescriptor>\n{\npublic:\n    static class CaptureDescriptor* alloc();\n\n    class CaptureDescriptor*        init();\n\n    id                              captureObject() const;\n    void                            setCaptureObject(id captureObject);\n\n    MTL::CaptureDestination         destination() const;\n    void                            setDestination(MTL::CaptureDestination destination);\n\n    NS::URL*                        outputURL() const;\n    void                            setOutputURL(const NS::URL* outputURL);\n};\n\nclass CaptureManager : public NS::Referencing<CaptureManager>\n{\npublic:\n    static class CaptureManager* alloc();\n\n    static class CaptureManager* sharedCaptureManager();\n\n    MTL::CaptureManager*         init();\n\n    class CaptureScope*          newCaptureScope(const class Device* device);\n\n    class CaptureScope*          newCaptureScope(const class CommandQueue* commandQueue);\n\n    bool                         supportsDestination(MTL::CaptureDestination destination);\n\n    bool                         startCapture(const class CaptureDescriptor* descriptor, NS::Error** error);\n\n    void                         startCapture(const class Device* device);\n\n    void                         startCapture(const class CommandQueue* commandQueue);\n\n    void                         startCapture(const class CaptureScope* captureScope);\n\n    void                         stopCapture();\n\n    class CaptureScope*          defaultCaptureScope() const;\n    void                         setDefaultCaptureScope(const class CaptureScope* defaultCaptureScope);\n\n    bool                         isCapturing() const;\n};\n\n}\n\n_MTL_INLINE MTL::CaptureDescriptor* MTL::CaptureDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::CaptureDescriptor>(_MTL_PRIVATE_CLS(MTLCaptureDescriptor));\n}\n\n_MTL_INLINE MTL::CaptureDescriptor* MTL::CaptureDescriptor::init()\n{\n    return NS::Object::init<MTL::CaptureDescriptor>();\n}\n\n_MTL_INLINE id MTL::CaptureDescriptor::captureObject() const\n{\n    return Object::sendMessage<id>(this, _MTL_PRIVATE_SEL(captureObject));\n}\n\n_MTL_INLINE void MTL::CaptureDescriptor::setCaptureObject(id captureObject)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCaptureObject_), captureObject);\n}\n\n_MTL_INLINE MTL::CaptureDestination MTL::CaptureDescriptor::destination() const\n{\n    return Object::sendMessage<MTL::CaptureDestination>(this, _MTL_PRIVATE_SEL(destination));\n}\n\n_MTL_INLINE void MTL::CaptureDescriptor::setDestination(MTL::CaptureDestination destination)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDestination_), destination);\n}\n\n_MTL_INLINE NS::URL* MTL::CaptureDescriptor::outputURL() const\n{\n    return Object::sendMessage<NS::URL*>(this, _MTL_PRIVATE_SEL(outputURL));\n}\n\n_MTL_INLINE void MTL::CaptureDescriptor::setOutputURL(const NS::URL* outputURL)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setOutputURL_), outputURL);\n}\n\n_MTL_INLINE MTL::CaptureManager* MTL::CaptureManager::alloc()\n{\n    return NS::Object::alloc<MTL::CaptureManager>(_MTL_PRIVATE_CLS(MTLCaptureManager));\n}\n\n_MTL_INLINE MTL::CaptureManager* MTL::CaptureManager::sharedCaptureManager()\n{\n    return Object::sendMessage<MTL::CaptureManager*>(_MTL_PRIVATE_CLS(MTLCaptureManager), _MTL_PRIVATE_SEL(sharedCaptureManager));\n}\n\n_MTL_INLINE MTL::CaptureManager* MTL::CaptureManager::init()\n{\n    return NS::Object::init<MTL::CaptureManager>();\n}\n\n_MTL_INLINE MTL::CaptureScope* MTL::CaptureManager::newCaptureScope(const MTL::Device* device)\n{\n    return Object::sendMessage<MTL::CaptureScope*>(this, _MTL_PRIVATE_SEL(newCaptureScopeWithDevice_), device);\n}\n\n_MTL_INLINE MTL::CaptureScope* MTL::CaptureManager::newCaptureScope(const MTL::CommandQueue* commandQueue)\n{\n    return Object::sendMessage<MTL::CaptureScope*>(this, _MTL_PRIVATE_SEL(newCaptureScopeWithCommandQueue_), commandQueue);\n}\n\n_MTL_INLINE bool MTL::CaptureManager::supportsDestination(MTL::CaptureDestination destination)\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsDestination_), destination);\n}\n\n_MTL_INLINE bool MTL::CaptureManager::startCapture(const MTL::CaptureDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(startCaptureWithDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE void MTL::CaptureManager::startCapture(const MTL::Device* device)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(startCaptureWithDevice_), device);\n}\n\n_MTL_INLINE void MTL::CaptureManager::startCapture(const MTL::CommandQueue* commandQueue)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(startCaptureWithCommandQueue_), commandQueue);\n}\n\n_MTL_INLINE void MTL::CaptureManager::startCapture(const MTL::CaptureScope* captureScope)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(startCaptureWithScope_), captureScope);\n}\n\n_MTL_INLINE void MTL::CaptureManager::stopCapture()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(stopCapture));\n}\n\n_MTL_INLINE MTL::CaptureScope* MTL::CaptureManager::defaultCaptureScope() const\n{\n    return Object::sendMessage<MTL::CaptureScope*>(this, _MTL_PRIVATE_SEL(defaultCaptureScope));\n}\n\n_MTL_INLINE void MTL::CaptureManager::setDefaultCaptureScope(const MTL::CaptureScope* defaultCaptureScope)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDefaultCaptureScope_), defaultCaptureScope);\n}\n\n_MTL_INLINE bool MTL::CaptureManager::isCapturing() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isCapturing));\n}\n\nnamespace MTL\n{\nclass CaptureScope : public NS::Referencing<CaptureScope>\n{\npublic:\n    class Device*       device() const;\n\n    NS::String*         label() const;\n    void                setLabel(const NS::String* pLabel);\n\n    class CommandQueue* commandQueue() const;\n\n    void                beginScope();\n    void                endScope();\n};\n}\n\n_MTL_INLINE MTL::Device* MTL::CaptureScope::device() const\n{\n    return Object::sendMessage<Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE NS::String* MTL::CaptureScope::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::CaptureScope::setLabel(const NS::String* pLabel)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), pLabel);\n}\n\n_MTL_INLINE MTL::CommandQueue* MTL::CaptureScope::commandQueue() const\n{\n    return Object::sendMessage<CommandQueue*>(this, _MTL_PRIVATE_SEL(commandQueue));\n}\n\n_MTL_INLINE void MTL::CaptureScope::beginScope()\n{\n    return Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(beginScope));\n}\n\n_MTL_INLINE void MTL::CaptureScope::endScope()\n{\n    return Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(endScope));\n}\n\n#pragma once\n\n#include <functional>\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, CommandBufferStatus) {\n    CommandBufferStatusNotEnqueued = 0,\n    CommandBufferStatusEnqueued = 1,\n    CommandBufferStatusCommitted = 2,\n    CommandBufferStatusScheduled = 3,\n    CommandBufferStatusCompleted = 4,\n    CommandBufferStatusError = 5,\n};\n\n_MTL_ENUM(NS::UInteger, CommandBufferError) {\n    CommandBufferErrorNone = 0,\n    CommandBufferErrorInternal = 1,\n    CommandBufferErrorTimeout = 2,\n    CommandBufferErrorPageFault = 3,\n    CommandBufferErrorBlacklisted = 4,\n    CommandBufferErrorAccessRevoked = 4,\n    CommandBufferErrorNotPermitted = 7,\n    CommandBufferErrorOutOfMemory = 8,\n    CommandBufferErrorInvalidResource = 9,\n    CommandBufferErrorMemoryless = 10,\n    CommandBufferErrorDeviceRemoved = 11,\n    CommandBufferErrorStackOverflow = 12,\n};\n\n_MTL_OPTIONS(NS::UInteger, CommandBufferErrorOption) {\n    CommandBufferErrorOptionNone = 0,\n    CommandBufferErrorOptionEncoderExecutionStatus = 1,\n};\n\n_MTL_ENUM(NS::Integer, CommandEncoderErrorState) {\n    CommandEncoderErrorStateUnknown = 0,\n    CommandEncoderErrorStateCompleted = 1,\n    CommandEncoderErrorStateAffected = 2,\n    CommandEncoderErrorStatePending = 3,\n    CommandEncoderErrorStateFaulted = 4,\n};\n\nclass CommandBufferDescriptor : public NS::Copying<CommandBufferDescriptor>\n{\npublic:\n    static class CommandBufferDescriptor* alloc();\n\n    class CommandBufferDescriptor*        init();\n\n    bool                                  retainedReferences() const;\n    void                                  setRetainedReferences(bool retainedReferences);\n\n    MTL::CommandBufferErrorOption         errorOptions() const;\n    void                                  setErrorOptions(MTL::CommandBufferErrorOption errorOptions);\n\n    class LogState*                       logState() const;\n    void                                  setLogState(const class LogState* logState);\n};\n\nclass CommandBufferEncoderInfo : public NS::Referencing<CommandBufferEncoderInfo>\n{\npublic:\n    NS::String*                   label() const;\n\n    NS::Array*                    debugSignposts() const;\n\n    MTL::CommandEncoderErrorState errorState() const;\n};\n\n_MTL_ENUM(NS::UInteger, DispatchType) {\n    DispatchTypeSerial = 0,\n    DispatchTypeConcurrent = 1,\n};\n\nclass CommandBuffer;\n\nusing CommandBufferHandler = void (^)(CommandBuffer*);\n\nusing HandlerFunction = std::function<void(CommandBuffer*)>;\n\nclass CommandBuffer : public NS::Referencing<CommandBuffer>\n{\npublic:\n    void                                       addScheduledHandler(const HandlerFunction& function);\n\n    void                                       addCompletedHandler(const HandlerFunction& function);\n\n    class Device*                              device() const;\n\n    class CommandQueue*                        commandQueue() const;\n\n    bool                                       retainedReferences() const;\n\n    MTL::CommandBufferErrorOption              errorOptions() const;\n\n    NS::String*                                label() const;\n    void                                       setLabel(const NS::String* label);\n\n    CFTimeInterval                             kernelStartTime() const;\n\n    CFTimeInterval                             kernelEndTime() const;\n\n    class LogContainer*                        logs() const;\n\n    CFTimeInterval                             GPUStartTime() const;\n\n    CFTimeInterval                             GPUEndTime() const;\n\n    void                                       enqueue();\n\n    void                                       commit();\n\n    void                                       addScheduledHandler(const MTL::CommandBufferHandler block);\n\n    void                                       presentDrawable(const class Drawable* drawable);\n\n    void                                       presentDrawableAtTime(const class Drawable* drawable, CFTimeInterval presentationTime);\n\n    void                                       presentDrawableAfterMinimumDuration(const class Drawable* drawable, CFTimeInterval duration);\n\n    void                                       waitUntilScheduled();\n\n    void                                       addCompletedHandler(const MTL::CommandBufferHandler block);\n\n    void                                       waitUntilCompleted();\n\n    MTL::CommandBufferStatus                   status() const;\n\n    NS::Error*                                 error() const;\n\n    class BlitCommandEncoder*                  blitCommandEncoder();\n\n    class RenderCommandEncoder*                renderCommandEncoder(const class RenderPassDescriptor* renderPassDescriptor);\n\n    class ComputeCommandEncoder*               computeCommandEncoder(const class ComputePassDescriptor* computePassDescriptor);\n\n    class BlitCommandEncoder*                  blitCommandEncoder(const class BlitPassDescriptor* blitPassDescriptor);\n\n    class ComputeCommandEncoder*               computeCommandEncoder();\n\n    class ComputeCommandEncoder*               computeCommandEncoder(MTL::DispatchType dispatchType);\n\n    void                                       encodeWait(const class Event* event, uint64_t value);\n\n    void                                       encodeSignalEvent(const class Event* event, uint64_t value);\n\n    class ParallelRenderCommandEncoder*        parallelRenderCommandEncoder(const class RenderPassDescriptor* renderPassDescriptor);\n\n    class ResourceStateCommandEncoder*         resourceStateCommandEncoder();\n\n    class ResourceStateCommandEncoder*         resourceStateCommandEncoder(const class ResourceStatePassDescriptor* resourceStatePassDescriptor);\n\n    class AccelerationStructureCommandEncoder* accelerationStructureCommandEncoder();\n\n    class AccelerationStructureCommandEncoder* accelerationStructureCommandEncoder(const class AccelerationStructurePassDescriptor* descriptor);\n\n    void                                       pushDebugGroup(const NS::String* string);\n\n    void                                       popDebugGroup();\n\n    void                                       useResidencySet(const class ResidencySet* residencySet);\n\n    void                                       useResidencySets(const class ResidencySet* const residencySets[], NS::UInteger count);\n};\n\n}\n\n_MTL_INLINE MTL::CommandBufferDescriptor* MTL::CommandBufferDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::CommandBufferDescriptor>(_MTL_PRIVATE_CLS(MTLCommandBufferDescriptor));\n}\n\n_MTL_INLINE MTL::CommandBufferDescriptor* MTL::CommandBufferDescriptor::init()\n{\n    return NS::Object::init<MTL::CommandBufferDescriptor>();\n}\n\n_MTL_INLINE bool MTL::CommandBufferDescriptor::retainedReferences() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(retainedReferences));\n}\n\n_MTL_INLINE void MTL::CommandBufferDescriptor::setRetainedReferences(bool retainedReferences)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRetainedReferences_), retainedReferences);\n}\n\n_MTL_INLINE MTL::CommandBufferErrorOption MTL::CommandBufferDescriptor::errorOptions() const\n{\n    return Object::sendMessage<MTL::CommandBufferErrorOption>(this, _MTL_PRIVATE_SEL(errorOptions));\n}\n\n_MTL_INLINE void MTL::CommandBufferDescriptor::setErrorOptions(MTL::CommandBufferErrorOption errorOptions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setErrorOptions_), errorOptions);\n}\n\n_MTL_INLINE MTL::LogState* MTL::CommandBufferDescriptor::logState() const\n{\n    return Object::sendMessage<MTL::LogState*>(this, _MTL_PRIVATE_SEL(logState));\n}\n\n_MTL_INLINE void MTL::CommandBufferDescriptor::setLogState(const MTL::LogState* logState)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLogState_), logState);\n}\n\n_MTL_INLINE NS::String* MTL::CommandBufferEncoderInfo::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE NS::Array* MTL::CommandBufferEncoderInfo::debugSignposts() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(debugSignposts));\n}\n\n_MTL_INLINE MTL::CommandEncoderErrorState MTL::CommandBufferEncoderInfo::errorState() const\n{\n    return Object::sendMessage<MTL::CommandEncoderErrorState>(this, _MTL_PRIVATE_SEL(errorState));\n}\n\n_MTL_INLINE void MTL::CommandBuffer::addScheduledHandler(const HandlerFunction& function)\n{\n    __block HandlerFunction blockFunction = function;\n\n    addScheduledHandler(^(MTL::CommandBuffer* pCommandBuffer) { blockFunction(pCommandBuffer); });\n}\n\n_MTL_INLINE void MTL::CommandBuffer::addCompletedHandler(const HandlerFunction& function)\n{\n    __block HandlerFunction blockFunction = function;\n\n    addCompletedHandler(^(MTL::CommandBuffer* pCommandBuffer) { blockFunction(pCommandBuffer); });\n}\n\n_MTL_INLINE MTL::Device* MTL::CommandBuffer::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE MTL::CommandQueue* MTL::CommandBuffer::commandQueue() const\n{\n    return Object::sendMessage<MTL::CommandQueue*>(this, _MTL_PRIVATE_SEL(commandQueue));\n}\n\n_MTL_INLINE bool MTL::CommandBuffer::retainedReferences() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(retainedReferences));\n}\n\n_MTL_INLINE MTL::CommandBufferErrorOption MTL::CommandBuffer::errorOptions() const\n{\n    return Object::sendMessage<MTL::CommandBufferErrorOption>(this, _MTL_PRIVATE_SEL(errorOptions));\n}\n\n_MTL_INLINE NS::String* MTL::CommandBuffer::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::CommandBuffer::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE CFTimeInterval MTL::CommandBuffer::kernelStartTime() const\n{\n    return Object::sendMessage<CFTimeInterval>(this, _MTL_PRIVATE_SEL(kernelStartTime));\n}\n\n_MTL_INLINE CFTimeInterval MTL::CommandBuffer::kernelEndTime() const\n{\n    return Object::sendMessage<CFTimeInterval>(this, _MTL_PRIVATE_SEL(kernelEndTime));\n}\n\n_MTL_INLINE MTL::LogContainer* MTL::CommandBuffer::logs() const\n{\n    return Object::sendMessage<MTL::LogContainer*>(this, _MTL_PRIVATE_SEL(logs));\n}\n\n_MTL_INLINE CFTimeInterval MTL::CommandBuffer::GPUStartTime() const\n{\n    return Object::sendMessage<CFTimeInterval>(this, _MTL_PRIVATE_SEL(GPUStartTime));\n}\n\n_MTL_INLINE CFTimeInterval MTL::CommandBuffer::GPUEndTime() const\n{\n    return Object::sendMessage<CFTimeInterval>(this, _MTL_PRIVATE_SEL(GPUEndTime));\n}\n\n_MTL_INLINE void MTL::CommandBuffer::enqueue()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(enqueue));\n}\n\n_MTL_INLINE void MTL::CommandBuffer::commit()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(commit));\n}\n\n_MTL_INLINE void MTL::CommandBuffer::addScheduledHandler(const MTL::CommandBufferHandler block)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(addScheduledHandler_), block);\n}\n\n_MTL_INLINE void MTL::CommandBuffer::presentDrawable(const MTL::Drawable* drawable)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(presentDrawable_), drawable);\n}\n\n_MTL_INLINE void MTL::CommandBuffer::presentDrawableAtTime(const MTL::Drawable* drawable, CFTimeInterval presentationTime)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(presentDrawable_atTime_), drawable, presentationTime);\n}\n\n_MTL_INLINE void MTL::CommandBuffer::presentDrawableAfterMinimumDuration(const MTL::Drawable* drawable, CFTimeInterval duration)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(presentDrawable_afterMinimumDuration_), drawable, duration);\n}\n\n_MTL_INLINE void MTL::CommandBuffer::waitUntilScheduled()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(waitUntilScheduled));\n}\n\n_MTL_INLINE void MTL::CommandBuffer::addCompletedHandler(const MTL::CommandBufferHandler block)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(addCompletedHandler_), block);\n}\n\n_MTL_INLINE void MTL::CommandBuffer::waitUntilCompleted()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(waitUntilCompleted));\n}\n\n_MTL_INLINE MTL::CommandBufferStatus MTL::CommandBuffer::status() const\n{\n    return Object::sendMessage<MTL::CommandBufferStatus>(this, _MTL_PRIVATE_SEL(status));\n}\n\n_MTL_INLINE NS::Error* MTL::CommandBuffer::error() const\n{\n    return Object::sendMessage<NS::Error*>(this, _MTL_PRIVATE_SEL(error));\n}\n\n_MTL_INLINE MTL::BlitCommandEncoder* MTL::CommandBuffer::blitCommandEncoder()\n{\n    return Object::sendMessage<MTL::BlitCommandEncoder*>(this, _MTL_PRIVATE_SEL(blitCommandEncoder));\n}\n\n_MTL_INLINE MTL::RenderCommandEncoder* MTL::CommandBuffer::renderCommandEncoder(const MTL::RenderPassDescriptor* renderPassDescriptor)\n{\n    return Object::sendMessage<MTL::RenderCommandEncoder*>(this, _MTL_PRIVATE_SEL(renderCommandEncoderWithDescriptor_), renderPassDescriptor);\n}\n\n_MTL_INLINE MTL::ComputeCommandEncoder* MTL::CommandBuffer::computeCommandEncoder(const MTL::ComputePassDescriptor* computePassDescriptor)\n{\n    return Object::sendMessage<MTL::ComputeCommandEncoder*>(this, _MTL_PRIVATE_SEL(computeCommandEncoderWithDescriptor_), computePassDescriptor);\n}\n\n_MTL_INLINE MTL::BlitCommandEncoder* MTL::CommandBuffer::blitCommandEncoder(const MTL::BlitPassDescriptor* blitPassDescriptor)\n{\n    return Object::sendMessage<MTL::BlitCommandEncoder*>(this, _MTL_PRIVATE_SEL(blitCommandEncoderWithDescriptor_), blitPassDescriptor);\n}\n\n_MTL_INLINE MTL::ComputeCommandEncoder* MTL::CommandBuffer::computeCommandEncoder()\n{\n    return Object::sendMessage<MTL::ComputeCommandEncoder*>(this, _MTL_PRIVATE_SEL(computeCommandEncoder));\n}\n\n_MTL_INLINE MTL::ComputeCommandEncoder* MTL::CommandBuffer::computeCommandEncoder(MTL::DispatchType dispatchType)\n{\n    return Object::sendMessage<MTL::ComputeCommandEncoder*>(this, _MTL_PRIVATE_SEL(computeCommandEncoderWithDispatchType_), dispatchType);\n}\n\n_MTL_INLINE void MTL::CommandBuffer::encodeWait(const MTL::Event* event, uint64_t value)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(encodeWaitForEvent_value_), event, value);\n}\n\n_MTL_INLINE void MTL::CommandBuffer::encodeSignalEvent(const MTL::Event* event, uint64_t value)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(encodeSignalEvent_value_), event, value);\n}\n\n_MTL_INLINE MTL::ParallelRenderCommandEncoder* MTL::CommandBuffer::parallelRenderCommandEncoder(const MTL::RenderPassDescriptor* renderPassDescriptor)\n{\n    return Object::sendMessage<MTL::ParallelRenderCommandEncoder*>(this, _MTL_PRIVATE_SEL(parallelRenderCommandEncoderWithDescriptor_), renderPassDescriptor);\n}\n\n_MTL_INLINE MTL::ResourceStateCommandEncoder* MTL::CommandBuffer::resourceStateCommandEncoder()\n{\n    return Object::sendMessage<MTL::ResourceStateCommandEncoder*>(this, _MTL_PRIVATE_SEL(resourceStateCommandEncoder));\n}\n\n_MTL_INLINE MTL::ResourceStateCommandEncoder* MTL::CommandBuffer::resourceStateCommandEncoder(const MTL::ResourceStatePassDescriptor* resourceStatePassDescriptor)\n{\n    return Object::sendMessage<MTL::ResourceStateCommandEncoder*>(this, _MTL_PRIVATE_SEL(resourceStateCommandEncoderWithDescriptor_), resourceStatePassDescriptor);\n}\n\n_MTL_INLINE MTL::AccelerationStructureCommandEncoder* MTL::CommandBuffer::accelerationStructureCommandEncoder()\n{\n    return Object::sendMessage<MTL::AccelerationStructureCommandEncoder*>(this, _MTL_PRIVATE_SEL(accelerationStructureCommandEncoder));\n}\n\n_MTL_INLINE MTL::AccelerationStructureCommandEncoder* MTL::CommandBuffer::accelerationStructureCommandEncoder(const MTL::AccelerationStructurePassDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::AccelerationStructureCommandEncoder*>(this, _MTL_PRIVATE_SEL(accelerationStructureCommandEncoderWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE void MTL::CommandBuffer::pushDebugGroup(const NS::String* string)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(pushDebugGroup_), string);\n}\n\n_MTL_INLINE void MTL::CommandBuffer::popDebugGroup()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(popDebugGroup));\n}\n\n_MTL_INLINE void MTL::CommandBuffer::useResidencySet(const MTL::ResidencySet* residencySet)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useResidencySet_), residencySet);\n}\n\n_MTL_INLINE void MTL::CommandBuffer::useResidencySets(const MTL::ResidencySet* const residencySets[], NS::UInteger count)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useResidencySets_count_), residencySets, count);\n}\n\n#pragma once\n\nnamespace MTL\n{\nclass CommandQueue : public NS::Referencing<CommandQueue>\n{\npublic:\n    NS::String*          label() const;\n    void                 setLabel(const NS::String* label);\n\n    class Device*        device() const;\n\n    class CommandBuffer* commandBuffer();\n\n    class CommandBuffer* commandBuffer(const class CommandBufferDescriptor* descriptor);\n\n    class CommandBuffer* commandBufferWithUnretainedReferences();\n\n    void                 insertDebugCaptureBoundary();\n\n    void                 addResidencySet(const class ResidencySet* residencySet);\n\n    void                 addResidencySets(const class ResidencySet* const residencySets[], NS::UInteger count);\n\n    void                 removeResidencySet(const class ResidencySet* residencySet);\n\n    void                 removeResidencySets(const class ResidencySet* const residencySets[], NS::UInteger count);\n};\n\nclass CommandQueueDescriptor : public NS::Copying<CommandQueueDescriptor>\n{\npublic:\n    static class CommandQueueDescriptor* alloc();\n\n    class CommandQueueDescriptor*        init();\n\n    NS::UInteger                         maxCommandBufferCount() const;\n    void                                 setMaxCommandBufferCount(NS::UInteger maxCommandBufferCount);\n\n    class LogState*                      logState() const;\n    void                                 setLogState(const class LogState* logState);\n};\n\n}\n\n_MTL_INLINE NS::String* MTL::CommandQueue::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::CommandQueue::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::Device* MTL::CommandQueue::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE MTL::CommandBuffer* MTL::CommandQueue::commandBuffer()\n{\n    return Object::sendMessage<MTL::CommandBuffer*>(this, _MTL_PRIVATE_SEL(commandBuffer));\n}\n\n_MTL_INLINE MTL::CommandBuffer* MTL::CommandQueue::commandBuffer(const MTL::CommandBufferDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::CommandBuffer*>(this, _MTL_PRIVATE_SEL(commandBufferWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::CommandBuffer* MTL::CommandQueue::commandBufferWithUnretainedReferences()\n{\n    return Object::sendMessage<MTL::CommandBuffer*>(this, _MTL_PRIVATE_SEL(commandBufferWithUnretainedReferences));\n}\n\n_MTL_INLINE void MTL::CommandQueue::insertDebugCaptureBoundary()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(insertDebugCaptureBoundary));\n}\n\n_MTL_INLINE void MTL::CommandQueue::addResidencySet(const MTL::ResidencySet* residencySet)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(addResidencySet_), residencySet);\n}\n\n_MTL_INLINE void MTL::CommandQueue::addResidencySets(const MTL::ResidencySet* const residencySets[], NS::UInteger count)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(addResidencySets_count_), residencySets, count);\n}\n\n_MTL_INLINE void MTL::CommandQueue::removeResidencySet(const MTL::ResidencySet* residencySet)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(removeResidencySet_), residencySet);\n}\n\n_MTL_INLINE void MTL::CommandQueue::removeResidencySets(const MTL::ResidencySet* const residencySets[], NS::UInteger count)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(removeResidencySets_count_), residencySets, count);\n}\n\n_MTL_INLINE MTL::CommandQueueDescriptor* MTL::CommandQueueDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::CommandQueueDescriptor>(_MTL_PRIVATE_CLS(MTLCommandQueueDescriptor));\n}\n\n_MTL_INLINE MTL::CommandQueueDescriptor* MTL::CommandQueueDescriptor::init()\n{\n    return NS::Object::init<MTL::CommandQueueDescriptor>();\n}\n\n_MTL_INLINE NS::UInteger MTL::CommandQueueDescriptor::maxCommandBufferCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxCommandBufferCount));\n}\n\n_MTL_INLINE void MTL::CommandQueueDescriptor::setMaxCommandBufferCount(NS::UInteger maxCommandBufferCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxCommandBufferCount_), maxCommandBufferCount);\n}\n\n_MTL_INLINE MTL::LogState* MTL::CommandQueueDescriptor::logState() const\n{\n    return Object::sendMessage<MTL::LogState*>(this, _MTL_PRIVATE_SEL(logState));\n}\n\n_MTL_INLINE void MTL::CommandQueueDescriptor::setLogState(const MTL::LogState* logState)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLogState_), logState);\n}\n\n#pragma once\n\nnamespace MTL\n{\nstruct DispatchThreadgroupsIndirectArguments\n{\n    uint32_t threadgroupsPerGrid[3];\n} _MTL_PACKED;\n\nstruct StageInRegionIndirectArguments\n{\n    uint32_t stageInOrigin[3];\n    uint32_t stageInSize[3];\n} _MTL_PACKED;\n\nclass ComputeCommandEncoder : public NS::Referencing<ComputeCommandEncoder, CommandEncoder>\n{\npublic:\n    MTL::DispatchType dispatchType() const;\n\n    void              setComputePipelineState(const class ComputePipelineState* state);\n\n    void              setBytes(const void* bytes, NS::UInteger length, NS::UInteger index);\n\n    void              setBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index);\n\n    void              setBufferOffset(NS::UInteger offset, NS::UInteger index);\n\n    void              setBuffers(const class Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range);\n\n    void              setBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index);\n\n    void              setBuffers(const class Buffer* const buffers[], const NS::UInteger* offsets, const NS::UInteger* strides, NS::Range range);\n\n    void              setBufferOffset(NS::UInteger offset, NS::UInteger stride, NS::UInteger index);\n\n    void              setBytes(const void* bytes, NS::UInteger length, NS::UInteger stride, NS::UInteger index);\n\n    void              setVisibleFunctionTable(const class VisibleFunctionTable* visibleFunctionTable, NS::UInteger bufferIndex);\n\n    void              setVisibleFunctionTables(const class VisibleFunctionTable* const visibleFunctionTables[], NS::Range range);\n\n    void              setIntersectionFunctionTable(const class IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex);\n\n    void              setIntersectionFunctionTables(const class IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range);\n\n    void              setAccelerationStructure(const class AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex);\n\n    void              setTexture(const class Texture* texture, NS::UInteger index);\n\n    void              setTextures(const class Texture* const textures[], NS::Range range);\n\n    void              setSamplerState(const class SamplerState* sampler, NS::UInteger index);\n\n    void              setSamplerStates(const class SamplerState* const samplers[], NS::Range range);\n\n    void              setSamplerState(const class SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index);\n\n    void              setSamplerStates(const class SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range);\n\n    void              setThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index);\n\n    void              setImageblockWidth(NS::UInteger width, NS::UInteger height);\n\n    void              setStageInRegion(MTL::Region region);\n\n    void              setStageInRegion(const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset);\n\n    void              dispatchThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerThreadgroup);\n\n    void              dispatchThreadgroups(const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset, MTL::Size threadsPerThreadgroup);\n\n    void              dispatchThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerThreadgroup);\n\n    void              updateFence(const class Fence* fence);\n\n    void              waitForFence(const class Fence* fence);\n\n    void              useResource(const class Resource* resource, MTL::ResourceUsage usage);\n\n    void              useResources(const class Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage);\n\n    void              useHeap(const class Heap* heap);\n\n    void              useHeaps(const class Heap* const heaps[], NS::UInteger count);\n\n    void              executeCommandsInBuffer(const class IndirectCommandBuffer* indirectCommandBuffer, NS::Range executionRange);\n\n    void              executeCommandsInBuffer(const class IndirectCommandBuffer* indirectCommandbuffer, const class Buffer* indirectRangeBuffer, NS::UInteger indirectBufferOffset);\n\n    void              memoryBarrier(MTL::BarrierScope scope);\n\n    void              memoryBarrier(const class Resource* const resources[], NS::UInteger count);\n\n    void              sampleCountersInBuffer(const class CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier);\n};\n\n}\n\n_MTL_INLINE MTL::DispatchType MTL::ComputeCommandEncoder::dispatchType() const\n{\n    return Object::sendMessage<MTL::DispatchType>(this, _MTL_PRIVATE_SEL(dispatchType));\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setComputePipelineState(const MTL::ComputePipelineState* state)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setComputePipelineState_), state);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setBytes(const void* bytes, NS::UInteger length, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBytes_length_atIndex_), bytes, length, index);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBuffer_offset_atIndex_), buffer, offset, index);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setBufferOffset(NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBufferOffset_atIndex_), offset, index);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setBuffers(const MTL::Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBuffers_offsets_withRange_), buffers, offsets, range);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBuffer_offset_attributeStride_atIndex_), buffer, offset, stride, index);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setBuffers(const MTL::Buffer* const buffers[], const NS::UInteger* offsets, const NS::UInteger* strides, NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBuffers_offsets_attributeStrides_withRange_), buffers, offsets, strides, range);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setBufferOffset(NS::UInteger offset, NS::UInteger stride, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBufferOffset_attributeStride_atIndex_), offset, stride, index);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setBytes(const void* bytes, NS::UInteger length, NS::UInteger stride, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBytes_length_attributeStride_atIndex_), bytes, length, stride, index);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setVisibleFunctionTable(const MTL::VisibleFunctionTable* visibleFunctionTable, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVisibleFunctionTable_atBufferIndex_), visibleFunctionTable, bufferIndex);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setVisibleFunctionTables(const MTL::VisibleFunctionTable* const visibleFunctionTables[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVisibleFunctionTables_withBufferRange_), visibleFunctionTables, range);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setIntersectionFunctionTable(const MTL::IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIntersectionFunctionTable_atBufferIndex_), intersectionFunctionTable, bufferIndex);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setIntersectionFunctionTables(const MTL::IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIntersectionFunctionTables_withBufferRange_), intersectionFunctionTables, range);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setAccelerationStructure(const MTL::AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setAccelerationStructure_atBufferIndex_), accelerationStructure, bufferIndex);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setTexture(const MTL::Texture* texture, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTexture_atIndex_), texture, index);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setTextures(const MTL::Texture* const textures[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTextures_withRange_), textures, range);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setSamplerState(const MTL::SamplerState* sampler, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSamplerState_atIndex_), sampler, index);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSamplerStates_withRange_), samplers, range);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setSamplerState(const MTL::SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSamplerState_lodMinClamp_lodMaxClamp_atIndex_), sampler, lodMinClamp, lodMaxClamp, index);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setSamplerStates(const MTL::SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSamplerStates_lodMinClamps_lodMaxClamps_withRange_), samplers, lodMinClamps, lodMaxClamps, range);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setThreadgroupMemoryLength_atIndex_), length, index);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setImageblockWidth(NS::UInteger width, NS::UInteger height)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setImageblockWidth_height_), width, height);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setStageInRegion(MTL::Region region)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStageInRegion_), region);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::setStageInRegion(const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStageInRegionWithIndirectBuffer_indirectBufferOffset_), indirectBuffer, indirectBufferOffset);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::dispatchThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(dispatchThreadgroups_threadsPerThreadgroup_), threadgroupsPerGrid, threadsPerThreadgroup);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::dispatchThreadgroups(const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset, MTL::Size threadsPerThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(dispatchThreadgroupsWithIndirectBuffer_indirectBufferOffset_threadsPerThreadgroup_), indirectBuffer, indirectBufferOffset, threadsPerThreadgroup);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::dispatchThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(dispatchThreads_threadsPerThreadgroup_), threadsPerGrid, threadsPerThreadgroup);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::updateFence(const MTL::Fence* fence)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(updateFence_), fence);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::waitForFence(const MTL::Fence* fence)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(waitForFence_), fence);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::useResource(const MTL::Resource* resource, MTL::ResourceUsage usage)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useResource_usage_), resource, usage);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::useResources(const MTL::Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useResources_count_usage_), resources, count, usage);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::useHeap(const MTL::Heap* heap)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useHeap_), heap);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::useHeaps(const MTL::Heap* const heaps[], NS::UInteger count)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useHeaps_count_), heaps, count);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::executeCommandsInBuffer(const MTL::IndirectCommandBuffer* indirectCommandBuffer, NS::Range executionRange)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(executeCommandsInBuffer_withRange_), indirectCommandBuffer, executionRange);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::executeCommandsInBuffer(const MTL::IndirectCommandBuffer* indirectCommandbuffer, const MTL::Buffer* indirectRangeBuffer, NS::UInteger indirectBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(executeCommandsInBuffer_indirectBuffer_indirectBufferOffset_), indirectCommandbuffer, indirectRangeBuffer, indirectBufferOffset);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::memoryBarrier(MTL::BarrierScope scope)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(memoryBarrierWithScope_), scope);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::memoryBarrier(const MTL::Resource* const resources[], NS::UInteger count)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(memoryBarrierWithResources_count_), resources, count);\n}\n\n_MTL_INLINE void MTL::ComputeCommandEncoder::sampleCountersInBuffer(const MTL::CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(sampleCountersInBuffer_atSampleIndex_withBarrier_), sampleBuffer, sampleIndex, barrier);\n}\n\n#pragma once\n\nnamespace MTL\n{\nclass ComputePassSampleBufferAttachmentDescriptor : public NS::Copying<ComputePassSampleBufferAttachmentDescriptor>\n{\npublic:\n    static class ComputePassSampleBufferAttachmentDescriptor* alloc();\n\n    class ComputePassSampleBufferAttachmentDescriptor*        init();\n\n    class CounterSampleBuffer*                                sampleBuffer() const;\n    void                                                      setSampleBuffer(const class CounterSampleBuffer* sampleBuffer);\n\n    NS::UInteger                                              startOfEncoderSampleIndex() const;\n    void                                                      setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex);\n\n    NS::UInteger                                              endOfEncoderSampleIndex() const;\n    void                                                      setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex);\n};\n\nclass ComputePassSampleBufferAttachmentDescriptorArray : public NS::Referencing<ComputePassSampleBufferAttachmentDescriptorArray>\n{\npublic:\n    static class ComputePassSampleBufferAttachmentDescriptorArray* alloc();\n\n    class ComputePassSampleBufferAttachmentDescriptorArray*        init();\n\n    class ComputePassSampleBufferAttachmentDescriptor*             object(NS::UInteger attachmentIndex);\n\n    void                                                           setObject(const class ComputePassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex);\n};\n\nclass ComputePassDescriptor : public NS::Copying<ComputePassDescriptor>\n{\npublic:\n    static class ComputePassDescriptor*                     alloc();\n\n    class ComputePassDescriptor*                            init();\n\n    static class ComputePassDescriptor*                     computePassDescriptor();\n\n    MTL::DispatchType                                       dispatchType() const;\n    void                                                    setDispatchType(MTL::DispatchType dispatchType);\n\n    class ComputePassSampleBufferAttachmentDescriptorArray* sampleBufferAttachments() const;\n};\n\n}\n\n_MTL_INLINE MTL::ComputePassSampleBufferAttachmentDescriptor* MTL::ComputePassSampleBufferAttachmentDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::ComputePassSampleBufferAttachmentDescriptor>(_MTL_PRIVATE_CLS(MTLComputePassSampleBufferAttachmentDescriptor));\n}\n\n_MTL_INLINE MTL::ComputePassSampleBufferAttachmentDescriptor* MTL::ComputePassSampleBufferAttachmentDescriptor::init()\n{\n    return NS::Object::init<MTL::ComputePassSampleBufferAttachmentDescriptor>();\n}\n\n_MTL_INLINE MTL::CounterSampleBuffer* MTL::ComputePassSampleBufferAttachmentDescriptor::sampleBuffer() const\n{\n    return Object::sendMessage<MTL::CounterSampleBuffer*>(this, _MTL_PRIVATE_SEL(sampleBuffer));\n}\n\n_MTL_INLINE void MTL::ComputePassSampleBufferAttachmentDescriptor::setSampleBuffer(const MTL::CounterSampleBuffer* sampleBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSampleBuffer_), sampleBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::ComputePassSampleBufferAttachmentDescriptor::startOfEncoderSampleIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(startOfEncoderSampleIndex));\n}\n\n_MTL_INLINE void MTL::ComputePassSampleBufferAttachmentDescriptor::setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStartOfEncoderSampleIndex_), startOfEncoderSampleIndex);\n}\n\n_MTL_INLINE NS::UInteger MTL::ComputePassSampleBufferAttachmentDescriptor::endOfEncoderSampleIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(endOfEncoderSampleIndex));\n}\n\n_MTL_INLINE void MTL::ComputePassSampleBufferAttachmentDescriptor::setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setEndOfEncoderSampleIndex_), endOfEncoderSampleIndex);\n}\n\n_MTL_INLINE MTL::ComputePassSampleBufferAttachmentDescriptorArray* MTL::ComputePassSampleBufferAttachmentDescriptorArray::alloc()\n{\n    return NS::Object::alloc<MTL::ComputePassSampleBufferAttachmentDescriptorArray>(_MTL_PRIVATE_CLS(MTLComputePassSampleBufferAttachmentDescriptorArray));\n}\n\n_MTL_INLINE MTL::ComputePassSampleBufferAttachmentDescriptorArray* MTL::ComputePassSampleBufferAttachmentDescriptorArray::init()\n{\n    return NS::Object::init<MTL::ComputePassSampleBufferAttachmentDescriptorArray>();\n}\n\n_MTL_INLINE MTL::ComputePassSampleBufferAttachmentDescriptor* MTL::ComputePassSampleBufferAttachmentDescriptorArray::object(NS::UInteger attachmentIndex)\n{\n    return Object::sendMessage<MTL::ComputePassSampleBufferAttachmentDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex);\n}\n\n_MTL_INLINE void MTL::ComputePassSampleBufferAttachmentDescriptorArray::setObject(const MTL::ComputePassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex);\n}\n\n_MTL_INLINE MTL::ComputePassDescriptor* MTL::ComputePassDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::ComputePassDescriptor>(_MTL_PRIVATE_CLS(MTLComputePassDescriptor));\n}\n\n_MTL_INLINE MTL::ComputePassDescriptor* MTL::ComputePassDescriptor::init()\n{\n    return NS::Object::init<MTL::ComputePassDescriptor>();\n}\n\n_MTL_INLINE MTL::ComputePassDescriptor* MTL::ComputePassDescriptor::computePassDescriptor()\n{\n    return Object::sendMessage<MTL::ComputePassDescriptor*>(_MTL_PRIVATE_CLS(MTLComputePassDescriptor), _MTL_PRIVATE_SEL(computePassDescriptor));\n}\n\n_MTL_INLINE MTL::DispatchType MTL::ComputePassDescriptor::dispatchType() const\n{\n    return Object::sendMessage<MTL::DispatchType>(this, _MTL_PRIVATE_SEL(dispatchType));\n}\n\n_MTL_INLINE void MTL::ComputePassDescriptor::setDispatchType(MTL::DispatchType dispatchType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDispatchType_), dispatchType);\n}\n\n_MTL_INLINE MTL::ComputePassSampleBufferAttachmentDescriptorArray* MTL::ComputePassDescriptor::sampleBufferAttachments() const\n{\n    return Object::sendMessage<MTL::ComputePassSampleBufferAttachmentDescriptorArray*>(this, _MTL_PRIVATE_SEL(sampleBufferAttachments));\n}\n\n#pragma once\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, Mutability) {\n    MutabilityDefault = 0,\n    MutabilityMutable = 1,\n    MutabilityImmutable = 2,\n};\n\n_MTL_ENUM(NS::Integer, ShaderValidation) {\n    ShaderValidationDefault = 0,\n    ShaderValidationEnabled = 1,\n    ShaderValidationDisabled = 2,\n};\n\nclass PipelineBufferDescriptor : public NS::Copying<PipelineBufferDescriptor>\n{\npublic:\n    static class PipelineBufferDescriptor* alloc();\n\n    class PipelineBufferDescriptor*        init();\n\n    MTL::Mutability                        mutability() const;\n    void                                   setMutability(MTL::Mutability mutability);\n};\n\nclass PipelineBufferDescriptorArray : public NS::Referencing<PipelineBufferDescriptorArray>\n{\npublic:\n    static class PipelineBufferDescriptorArray* alloc();\n\n    class PipelineBufferDescriptorArray*        init();\n\n    class PipelineBufferDescriptor*             object(NS::UInteger bufferIndex);\n\n    void                                        setObject(const class PipelineBufferDescriptor* buffer, NS::UInteger bufferIndex);\n};\n\n}\n\n_MTL_INLINE MTL::PipelineBufferDescriptor* MTL::PipelineBufferDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::PipelineBufferDescriptor>(_MTL_PRIVATE_CLS(MTLPipelineBufferDescriptor));\n}\n\n_MTL_INLINE MTL::PipelineBufferDescriptor* MTL::PipelineBufferDescriptor::init()\n{\n    return NS::Object::init<MTL::PipelineBufferDescriptor>();\n}\n\n_MTL_INLINE MTL::Mutability MTL::PipelineBufferDescriptor::mutability() const\n{\n    return Object::sendMessage<MTL::Mutability>(this, _MTL_PRIVATE_SEL(mutability));\n}\n\n_MTL_INLINE void MTL::PipelineBufferDescriptor::setMutability(MTL::Mutability mutability)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMutability_), mutability);\n}\n\n_MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::PipelineBufferDescriptorArray::alloc()\n{\n    return NS::Object::alloc<MTL::PipelineBufferDescriptorArray>(_MTL_PRIVATE_CLS(MTLPipelineBufferDescriptorArray));\n}\n\n_MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::PipelineBufferDescriptorArray::init()\n{\n    return NS::Object::init<MTL::PipelineBufferDescriptorArray>();\n}\n\n_MTL_INLINE MTL::PipelineBufferDescriptor* MTL::PipelineBufferDescriptorArray::object(NS::UInteger bufferIndex)\n{\n    return Object::sendMessage<MTL::PipelineBufferDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), bufferIndex);\n}\n\n_MTL_INLINE void MTL::PipelineBufferDescriptorArray::setObject(const MTL::PipelineBufferDescriptor* buffer, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), buffer, bufferIndex);\n}\n\nnamespace MTL\n{\nclass ComputePipelineReflection : public NS::Referencing<ComputePipelineReflection>\n{\npublic:\n    static class ComputePipelineReflection* alloc();\n\n    class ComputePipelineReflection*        init();\n\n    NS::Array*                              bindings() const;\n\n    NS::Array*                              arguments() const;\n};\n\nclass ComputePipelineDescriptor : public NS::Copying<ComputePipelineDescriptor>\n{\npublic:\n    static class ComputePipelineDescriptor* alloc();\n\n    class ComputePipelineDescriptor*        init();\n\n    NS::String*                             label() const;\n    void                                    setLabel(const NS::String* label);\n\n    class Function*                         computeFunction() const;\n    void                                    setComputeFunction(const class Function* computeFunction);\n\n    bool                                    threadGroupSizeIsMultipleOfThreadExecutionWidth() const;\n    void                                    setThreadGroupSizeIsMultipleOfThreadExecutionWidth(bool threadGroupSizeIsMultipleOfThreadExecutionWidth);\n\n    NS::UInteger                            maxTotalThreadsPerThreadgroup() const;\n    void                                    setMaxTotalThreadsPerThreadgroup(NS::UInteger maxTotalThreadsPerThreadgroup);\n\n    class StageInputOutputDescriptor*       stageInputDescriptor() const;\n    void                                    setStageInputDescriptor(const class StageInputOutputDescriptor* stageInputDescriptor);\n\n    class PipelineBufferDescriptorArray*    buffers() const;\n\n    bool                                    supportIndirectCommandBuffers() const;\n    void                                    setSupportIndirectCommandBuffers(bool supportIndirectCommandBuffers);\n\n    NS::Array*                              insertLibraries() const;\n    void                                    setInsertLibraries(const NS::Array* insertLibraries);\n\n    NS::Array*                              preloadedLibraries() const;\n    void                                    setPreloadedLibraries(const NS::Array* preloadedLibraries);\n\n    NS::Array*                              binaryArchives() const;\n    void                                    setBinaryArchives(const NS::Array* binaryArchives);\n\n    void                                    reset();\n\n    class LinkedFunctions*                  linkedFunctions() const;\n    void                                    setLinkedFunctions(const class LinkedFunctions* linkedFunctions);\n\n    bool                                    supportAddingBinaryFunctions() const;\n    void                                    setSupportAddingBinaryFunctions(bool supportAddingBinaryFunctions);\n\n    NS::UInteger                            maxCallStackDepth() const;\n    void                                    setMaxCallStackDepth(NS::UInteger maxCallStackDepth);\n\n    MTL::ShaderValidation                   shaderValidation() const;\n    void                                    setShaderValidation(MTL::ShaderValidation shaderValidation);\n};\n\nclass ComputePipelineState : public NS::Referencing<ComputePipelineState>\n{\npublic:\n    NS::String*                      label() const;\n\n    class Device*                    device() const;\n\n    NS::UInteger                     maxTotalThreadsPerThreadgroup() const;\n\n    NS::UInteger                     threadExecutionWidth() const;\n\n    NS::UInteger                     staticThreadgroupMemoryLength() const;\n\n    NS::UInteger                     imageblockMemoryLength(MTL::Size imageblockDimensions);\n\n    bool                             supportIndirectCommandBuffers() const;\n\n    MTL::ResourceID                  gpuResourceID() const;\n\n    class FunctionHandle*            functionHandle(const class Function* function);\n\n    class ComputePipelineState*      newComputePipelineState(const NS::Array* functions, NS::Error** error);\n\n    class VisibleFunctionTable*      newVisibleFunctionTable(const class VisibleFunctionTableDescriptor* descriptor);\n\n    class IntersectionFunctionTable* newIntersectionFunctionTable(const class IntersectionFunctionTableDescriptor* descriptor);\n\n    MTL::ShaderValidation            shaderValidation() const;\n};\n\n}\n\n_MTL_INLINE MTL::ComputePipelineReflection* MTL::ComputePipelineReflection::alloc()\n{\n    return NS::Object::alloc<MTL::ComputePipelineReflection>(_MTL_PRIVATE_CLS(MTLComputePipelineReflection));\n}\n\n_MTL_INLINE MTL::ComputePipelineReflection* MTL::ComputePipelineReflection::init()\n{\n    return NS::Object::init<MTL::ComputePipelineReflection>();\n}\n\n_MTL_INLINE NS::Array* MTL::ComputePipelineReflection::bindings() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(bindings));\n}\n\n_MTL_INLINE NS::Array* MTL::ComputePipelineReflection::arguments() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(arguments));\n}\n\n_MTL_INLINE MTL::ComputePipelineDescriptor* MTL::ComputePipelineDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::ComputePipelineDescriptor>(_MTL_PRIVATE_CLS(MTLComputePipelineDescriptor));\n}\n\n_MTL_INLINE MTL::ComputePipelineDescriptor* MTL::ComputePipelineDescriptor::init()\n{\n    return NS::Object::init<MTL::ComputePipelineDescriptor>();\n}\n\n_MTL_INLINE NS::String* MTL::ComputePipelineDescriptor::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::Function* MTL::ComputePipelineDescriptor::computeFunction() const\n{\n    return Object::sendMessage<MTL::Function*>(this, _MTL_PRIVATE_SEL(computeFunction));\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::setComputeFunction(const MTL::Function* computeFunction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setComputeFunction_), computeFunction);\n}\n\n_MTL_INLINE bool MTL::ComputePipelineDescriptor::threadGroupSizeIsMultipleOfThreadExecutionWidth() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(threadGroupSizeIsMultipleOfThreadExecutionWidth));\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::setThreadGroupSizeIsMultipleOfThreadExecutionWidth(bool threadGroupSizeIsMultipleOfThreadExecutionWidth)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setThreadGroupSizeIsMultipleOfThreadExecutionWidth_), threadGroupSizeIsMultipleOfThreadExecutionWidth);\n}\n\n_MTL_INLINE NS::UInteger MTL::ComputePipelineDescriptor::maxTotalThreadsPerThreadgroup() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerThreadgroup));\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::setMaxTotalThreadsPerThreadgroup(NS::UInteger maxTotalThreadsPerThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxTotalThreadsPerThreadgroup_), maxTotalThreadsPerThreadgroup);\n}\n\n_MTL_INLINE MTL::StageInputOutputDescriptor* MTL::ComputePipelineDescriptor::stageInputDescriptor() const\n{\n    return Object::sendMessage<MTL::StageInputOutputDescriptor*>(this, _MTL_PRIVATE_SEL(stageInputDescriptor));\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::setStageInputDescriptor(const MTL::StageInputOutputDescriptor* stageInputDescriptor)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStageInputDescriptor_), stageInputDescriptor);\n}\n\n_MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::ComputePipelineDescriptor::buffers() const\n{\n    return Object::sendMessage<MTL::PipelineBufferDescriptorArray*>(this, _MTL_PRIVATE_SEL(buffers));\n}\n\n_MTL_INLINE bool MTL::ComputePipelineDescriptor::supportIndirectCommandBuffers() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportIndirectCommandBuffers));\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::setSupportIndirectCommandBuffers(bool supportIndirectCommandBuffers)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSupportIndirectCommandBuffers_), supportIndirectCommandBuffers);\n}\n\n_MTL_INLINE NS::Array* MTL::ComputePipelineDescriptor::insertLibraries() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(insertLibraries));\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::setInsertLibraries(const NS::Array* insertLibraries)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInsertLibraries_), insertLibraries);\n}\n\n_MTL_INLINE NS::Array* MTL::ComputePipelineDescriptor::preloadedLibraries() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(preloadedLibraries));\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::setPreloadedLibraries(const NS::Array* preloadedLibraries)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPreloadedLibraries_), preloadedLibraries);\n}\n\n_MTL_INLINE NS::Array* MTL::ComputePipelineDescriptor::binaryArchives() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(binaryArchives));\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::setBinaryArchives(const NS::Array* binaryArchives)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBinaryArchives_), binaryArchives);\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::reset()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(reset));\n}\n\n_MTL_INLINE MTL::LinkedFunctions* MTL::ComputePipelineDescriptor::linkedFunctions() const\n{\n    return Object::sendMessage<MTL::LinkedFunctions*>(this, _MTL_PRIVATE_SEL(linkedFunctions));\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::setLinkedFunctions(const MTL::LinkedFunctions* linkedFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLinkedFunctions_), linkedFunctions);\n}\n\n_MTL_INLINE bool MTL::ComputePipelineDescriptor::supportAddingBinaryFunctions() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportAddingBinaryFunctions));\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::setSupportAddingBinaryFunctions(bool supportAddingBinaryFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSupportAddingBinaryFunctions_), supportAddingBinaryFunctions);\n}\n\n_MTL_INLINE NS::UInteger MTL::ComputePipelineDescriptor::maxCallStackDepth() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxCallStackDepth));\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::setMaxCallStackDepth(NS::UInteger maxCallStackDepth)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxCallStackDepth_), maxCallStackDepth);\n}\n\n_MTL_INLINE MTL::ShaderValidation MTL::ComputePipelineDescriptor::shaderValidation() const\n{\n    return Object::sendMessage<MTL::ShaderValidation>(this, _MTL_PRIVATE_SEL(shaderValidation));\n}\n\n_MTL_INLINE void MTL::ComputePipelineDescriptor::setShaderValidation(MTL::ShaderValidation shaderValidation)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setShaderValidation_), shaderValidation);\n}\n\n_MTL_INLINE NS::String* MTL::ComputePipelineState::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE MTL::Device* MTL::ComputePipelineState::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE NS::UInteger MTL::ComputePipelineState::maxTotalThreadsPerThreadgroup() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerThreadgroup));\n}\n\n_MTL_INLINE NS::UInteger MTL::ComputePipelineState::threadExecutionWidth() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(threadExecutionWidth));\n}\n\n_MTL_INLINE NS::UInteger MTL::ComputePipelineState::staticThreadgroupMemoryLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(staticThreadgroupMemoryLength));\n}\n\n_MTL_INLINE NS::UInteger MTL::ComputePipelineState::imageblockMemoryLength(MTL::Size imageblockDimensions)\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(imageblockMemoryLengthForDimensions_), imageblockDimensions);\n}\n\n_MTL_INLINE bool MTL::ComputePipelineState::supportIndirectCommandBuffers() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportIndirectCommandBuffers));\n}\n\n_MTL_INLINE MTL::ResourceID MTL::ComputePipelineState::gpuResourceID() const\n{\n    return Object::sendMessage<MTL::ResourceID>(this, _MTL_PRIVATE_SEL(gpuResourceID));\n}\n\n_MTL_INLINE MTL::FunctionHandle* MTL::ComputePipelineState::functionHandle(const MTL::Function* function)\n{\n    return Object::sendMessage<MTL::FunctionHandle*>(this, _MTL_PRIVATE_SEL(functionHandleWithFunction_), function);\n}\n\n_MTL_INLINE MTL::ComputePipelineState* MTL::ComputePipelineState::newComputePipelineState(const NS::Array* functions, NS::Error** error)\n{\n    return Object::sendMessage<MTL::ComputePipelineState*>(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithAdditionalBinaryFunctions_error_), functions, error);\n}\n\n_MTL_INLINE MTL::VisibleFunctionTable* MTL::ComputePipelineState::newVisibleFunctionTable(const MTL::VisibleFunctionTableDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::VisibleFunctionTable*>(this, _MTL_PRIVATE_SEL(newVisibleFunctionTableWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::IntersectionFunctionTable* MTL::ComputePipelineState::newIntersectionFunctionTable(const MTL::IntersectionFunctionTableDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::IntersectionFunctionTable*>(this, _MTL_PRIVATE_SEL(newIntersectionFunctionTableWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::ShaderValidation MTL::ComputePipelineState::shaderValidation() const\n{\n    return Object::sendMessage<MTL::ShaderValidation>(this, _MTL_PRIVATE_SEL(shaderValidation));\n}\n\n#pragma once\n\nnamespace MTL\n{\nstruct CounterResultTimestamp\n{\n    uint64_t timestamp;\n} _MTL_PACKED;\n\nstruct CounterResultStageUtilization\n{\n    uint64_t totalCycles;\n    uint64_t vertexCycles;\n    uint64_t tessellationCycles;\n    uint64_t postTessellationVertexCycles;\n    uint64_t fragmentCycles;\n    uint64_t renderTargetCycles;\n} _MTL_PACKED;\n\nstruct CounterResultStatistic\n{\n    uint64_t tessellationInputPatches;\n    uint64_t vertexInvocations;\n    uint64_t postTessellationVertexInvocations;\n    uint64_t clipperInvocations;\n    uint64_t clipperPrimitivesOut;\n    uint64_t fragmentInvocations;\n    uint64_t fragmentsPassed;\n    uint64_t computeKernelInvocations;\n} _MTL_PACKED;\n\n_MTL_CONST(NS::ErrorDomain, CounterErrorDomain);\n\nusing CommonCounter = NS::String*;\n\n_MTL_CONST(CommonCounter, CommonCounterTimestamp);\n_MTL_CONST(CommonCounter, CommonCounterTessellationInputPatches);\n_MTL_CONST(CommonCounter, CommonCounterVertexInvocations);\n_MTL_CONST(CommonCounter, CommonCounterPostTessellationVertexInvocations);\n_MTL_CONST(CommonCounter, CommonCounterClipperInvocations);\n_MTL_CONST(CommonCounter, CommonCounterClipperPrimitivesOut);\n_MTL_CONST(CommonCounter, CommonCounterFragmentInvocations);\n_MTL_CONST(CommonCounter, CommonCounterFragmentsPassed);\n_MTL_CONST(CommonCounter, CommonCounterComputeKernelInvocations);\n_MTL_CONST(CommonCounter, CommonCounterTotalCycles);\n_MTL_CONST(CommonCounter, CommonCounterVertexCycles);\n_MTL_CONST(CommonCounter, CommonCounterTessellationCycles);\n_MTL_CONST(CommonCounter, CommonCounterPostTessellationVertexCycles);\n_MTL_CONST(CommonCounter, CommonCounterFragmentCycles);\n_MTL_CONST(CommonCounter, CommonCounterRenderTargetWriteCycles);\n\nusing CommonCounterSet = NS::String*;\n\n_MTL_CONST(CommonCounterSet, CommonCounterSetTimestamp);\n_MTL_CONST(CommonCounterSet, CommonCounterSetStageUtilization);\n_MTL_CONST(CommonCounterSet, CommonCounterSetStatistic);\n\nclass Counter : public NS::Referencing<Counter>\n{\npublic:\n    NS::String* name() const;\n};\n\nclass CounterSet : public NS::Referencing<CounterSet>\n{\npublic:\n    NS::String* name() const;\n\n    NS::Array*  counters() const;\n};\n\nclass CounterSampleBufferDescriptor : public NS::Copying<CounterSampleBufferDescriptor>\n{\npublic:\n    static class CounterSampleBufferDescriptor* alloc();\n\n    class CounterSampleBufferDescriptor*        init();\n\n    class CounterSet*                           counterSet() const;\n    void                                        setCounterSet(const class CounterSet* counterSet);\n\n    NS::String*                                 label() const;\n    void                                        setLabel(const NS::String* label);\n\n    MTL::StorageMode                            storageMode() const;\n    void                                        setStorageMode(MTL::StorageMode storageMode);\n\n    NS::UInteger                                sampleCount() const;\n    void                                        setSampleCount(NS::UInteger sampleCount);\n};\n\nclass CounterSampleBuffer : public NS::Referencing<CounterSampleBuffer>\n{\npublic:\n    class Device* device() const;\n\n    NS::String*   label() const;\n\n    NS::UInteger  sampleCount() const;\n\n    NS::Data*     resolveCounterRange(NS::Range range);\n};\n\n_MTL_ENUM(NS::Integer, CounterSampleBufferError) {\n    CounterSampleBufferErrorOutOfMemory = 0,\n    CounterSampleBufferErrorInvalid = 1,\n    CounterSampleBufferErrorInternal = 2,\n};\n\nstatic const NS::UInteger CounterErrorValue = static_cast<NS::UInteger>(~0ULL);\nstatic const NS::UInteger CounterDontSample = static_cast<NS::UInteger>(-1);\n\n}\n\n_MTL_PRIVATE_DEF_STR(NS::ErrorDomain, CounterErrorDomain);\n\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterTimestamp);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterTessellationInputPatches);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterVertexInvocations);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterPostTessellationVertexInvocations);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterClipperInvocations);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterClipperPrimitivesOut);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterFragmentInvocations);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterFragmentsPassed);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterComputeKernelInvocations);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterTotalCycles);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterVertexCycles);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterTessellationCycles);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterPostTessellationVertexCycles);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterFragmentCycles);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterRenderTargetWriteCycles);\n\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounterSet, CommonCounterSetTimestamp);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounterSet, CommonCounterSetStageUtilization);\n_MTL_PRIVATE_DEF_STR(MTL::CommonCounterSet, CommonCounterSetStatistic);\n\n_MTL_INLINE NS::String* MTL::Counter::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_INLINE NS::String* MTL::CounterSet::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_INLINE NS::Array* MTL::CounterSet::counters() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(counters));\n}\n\n_MTL_INLINE MTL::CounterSampleBufferDescriptor* MTL::CounterSampleBufferDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::CounterSampleBufferDescriptor>(_MTL_PRIVATE_CLS(MTLCounterSampleBufferDescriptor));\n}\n\n_MTL_INLINE MTL::CounterSampleBufferDescriptor* MTL::CounterSampleBufferDescriptor::init()\n{\n    return NS::Object::init<MTL::CounterSampleBufferDescriptor>();\n}\n\n_MTL_INLINE MTL::CounterSet* MTL::CounterSampleBufferDescriptor::counterSet() const\n{\n    return Object::sendMessage<MTL::CounterSet*>(this, _MTL_PRIVATE_SEL(counterSet));\n}\n\n_MTL_INLINE void MTL::CounterSampleBufferDescriptor::setCounterSet(const MTL::CounterSet* counterSet)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCounterSet_), counterSet);\n}\n\n_MTL_INLINE NS::String* MTL::CounterSampleBufferDescriptor::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::CounterSampleBufferDescriptor::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::StorageMode MTL::CounterSampleBufferDescriptor::storageMode() const\n{\n    return Object::sendMessage<MTL::StorageMode>(this, _MTL_PRIVATE_SEL(storageMode));\n}\n\n_MTL_INLINE void MTL::CounterSampleBufferDescriptor::setStorageMode(MTL::StorageMode storageMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStorageMode_), storageMode);\n}\n\n_MTL_INLINE NS::UInteger MTL::CounterSampleBufferDescriptor::sampleCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(sampleCount));\n}\n\n_MTL_INLINE void MTL::CounterSampleBufferDescriptor::setSampleCount(NS::UInteger sampleCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSampleCount_), sampleCount);\n}\n\n_MTL_INLINE MTL::Device* MTL::CounterSampleBuffer::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE NS::String* MTL::CounterSampleBuffer::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE NS::UInteger MTL::CounterSampleBuffer::sampleCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(sampleCount));\n}\n\n_MTL_INLINE NS::Data* MTL::CounterSampleBuffer::resolveCounterRange(NS::Range range)\n{\n    return Object::sendMessage<NS::Data*>(this, _MTL_PRIVATE_SEL(resolveCounterRange_), range);\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, CompareFunction) {\n    CompareFunctionNever = 0,\n    CompareFunctionLess = 1,\n    CompareFunctionEqual = 2,\n    CompareFunctionLessEqual = 3,\n    CompareFunctionGreater = 4,\n    CompareFunctionNotEqual = 5,\n    CompareFunctionGreaterEqual = 6,\n    CompareFunctionAlways = 7,\n};\n\n_MTL_ENUM(NS::UInteger, StencilOperation) {\n    StencilOperationKeep = 0,\n    StencilOperationZero = 1,\n    StencilOperationReplace = 2,\n    StencilOperationIncrementClamp = 3,\n    StencilOperationDecrementClamp = 4,\n    StencilOperationInvert = 5,\n    StencilOperationIncrementWrap = 6,\n    StencilOperationDecrementWrap = 7,\n};\n\nclass StencilDescriptor : public NS::Copying<StencilDescriptor>\n{\npublic:\n    static class StencilDescriptor* alloc();\n\n    class StencilDescriptor*        init();\n\n    MTL::CompareFunction            stencilCompareFunction() const;\n    void                            setStencilCompareFunction(MTL::CompareFunction stencilCompareFunction);\n\n    MTL::StencilOperation           stencilFailureOperation() const;\n    void                            setStencilFailureOperation(MTL::StencilOperation stencilFailureOperation);\n\n    MTL::StencilOperation           depthFailureOperation() const;\n    void                            setDepthFailureOperation(MTL::StencilOperation depthFailureOperation);\n\n    MTL::StencilOperation           depthStencilPassOperation() const;\n    void                            setDepthStencilPassOperation(MTL::StencilOperation depthStencilPassOperation);\n\n    uint32_t                        readMask() const;\n    void                            setReadMask(uint32_t readMask);\n\n    uint32_t                        writeMask() const;\n    void                            setWriteMask(uint32_t writeMask);\n};\n\nclass DepthStencilDescriptor : public NS::Copying<DepthStencilDescriptor>\n{\npublic:\n    static class DepthStencilDescriptor* alloc();\n\n    class DepthStencilDescriptor*        init();\n\n    MTL::CompareFunction                 depthCompareFunction() const;\n    void                                 setDepthCompareFunction(MTL::CompareFunction depthCompareFunction);\n\n    bool                                 depthWriteEnabled() const;\n    void                                 setDepthWriteEnabled(bool depthWriteEnabled);\n\n    class StencilDescriptor*             frontFaceStencil() const;\n    void                                 setFrontFaceStencil(const class StencilDescriptor* frontFaceStencil);\n\n    class StencilDescriptor*             backFaceStencil() const;\n    void                                 setBackFaceStencil(const class StencilDescriptor* backFaceStencil);\n\n    NS::String*                          label() const;\n    void                                 setLabel(const NS::String* label);\n};\n\nclass DepthStencilState : public NS::Referencing<DepthStencilState>\n{\npublic:\n    NS::String*   label() const;\n\n    class Device* device() const;\n};\n\n}\n\n_MTL_INLINE MTL::StencilDescriptor* MTL::StencilDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::StencilDescriptor>(_MTL_PRIVATE_CLS(MTLStencilDescriptor));\n}\n\n_MTL_INLINE MTL::StencilDescriptor* MTL::StencilDescriptor::init()\n{\n    return NS::Object::init<MTL::StencilDescriptor>();\n}\n\n_MTL_INLINE MTL::CompareFunction MTL::StencilDescriptor::stencilCompareFunction() const\n{\n    return Object::sendMessage<MTL::CompareFunction>(this, _MTL_PRIVATE_SEL(stencilCompareFunction));\n}\n\n_MTL_INLINE void MTL::StencilDescriptor::setStencilCompareFunction(MTL::CompareFunction stencilCompareFunction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStencilCompareFunction_), stencilCompareFunction);\n}\n\n_MTL_INLINE MTL::StencilOperation MTL::StencilDescriptor::stencilFailureOperation() const\n{\n    return Object::sendMessage<MTL::StencilOperation>(this, _MTL_PRIVATE_SEL(stencilFailureOperation));\n}\n\n_MTL_INLINE void MTL::StencilDescriptor::setStencilFailureOperation(MTL::StencilOperation stencilFailureOperation)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStencilFailureOperation_), stencilFailureOperation);\n}\n\n_MTL_INLINE MTL::StencilOperation MTL::StencilDescriptor::depthFailureOperation() const\n{\n    return Object::sendMessage<MTL::StencilOperation>(this, _MTL_PRIVATE_SEL(depthFailureOperation));\n}\n\n_MTL_INLINE void MTL::StencilDescriptor::setDepthFailureOperation(MTL::StencilOperation depthFailureOperation)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthFailureOperation_), depthFailureOperation);\n}\n\n_MTL_INLINE MTL::StencilOperation MTL::StencilDescriptor::depthStencilPassOperation() const\n{\n    return Object::sendMessage<MTL::StencilOperation>(this, _MTL_PRIVATE_SEL(depthStencilPassOperation));\n}\n\n_MTL_INLINE void MTL::StencilDescriptor::setDepthStencilPassOperation(MTL::StencilOperation depthStencilPassOperation)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthStencilPassOperation_), depthStencilPassOperation);\n}\n\n_MTL_INLINE uint32_t MTL::StencilDescriptor::readMask() const\n{\n    return Object::sendMessage<uint32_t>(this, _MTL_PRIVATE_SEL(readMask));\n}\n\n_MTL_INLINE void MTL::StencilDescriptor::setReadMask(uint32_t readMask)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setReadMask_), readMask);\n}\n\n_MTL_INLINE uint32_t MTL::StencilDescriptor::writeMask() const\n{\n    return Object::sendMessage<uint32_t>(this, _MTL_PRIVATE_SEL(writeMask));\n}\n\n_MTL_INLINE void MTL::StencilDescriptor::setWriteMask(uint32_t writeMask)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setWriteMask_), writeMask);\n}\n\n_MTL_INLINE MTL::DepthStencilDescriptor* MTL::DepthStencilDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::DepthStencilDescriptor>(_MTL_PRIVATE_CLS(MTLDepthStencilDescriptor));\n}\n\n_MTL_INLINE MTL::DepthStencilDescriptor* MTL::DepthStencilDescriptor::init()\n{\n    return NS::Object::init<MTL::DepthStencilDescriptor>();\n}\n\n_MTL_INLINE MTL::CompareFunction MTL::DepthStencilDescriptor::depthCompareFunction() const\n{\n    return Object::sendMessage<MTL::CompareFunction>(this, _MTL_PRIVATE_SEL(depthCompareFunction));\n}\n\n_MTL_INLINE void MTL::DepthStencilDescriptor::setDepthCompareFunction(MTL::CompareFunction depthCompareFunction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthCompareFunction_), depthCompareFunction);\n}\n\n_MTL_INLINE bool MTL::DepthStencilDescriptor::depthWriteEnabled() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isDepthWriteEnabled));\n}\n\n_MTL_INLINE void MTL::DepthStencilDescriptor::setDepthWriteEnabled(bool depthWriteEnabled)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthWriteEnabled_), depthWriteEnabled);\n}\n\n_MTL_INLINE MTL::StencilDescriptor* MTL::DepthStencilDescriptor::frontFaceStencil() const\n{\n    return Object::sendMessage<MTL::StencilDescriptor*>(this, _MTL_PRIVATE_SEL(frontFaceStencil));\n}\n\n_MTL_INLINE void MTL::DepthStencilDescriptor::setFrontFaceStencil(const MTL::StencilDescriptor* frontFaceStencil)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFrontFaceStencil_), frontFaceStencil);\n}\n\n_MTL_INLINE MTL::StencilDescriptor* MTL::DepthStencilDescriptor::backFaceStencil() const\n{\n    return Object::sendMessage<MTL::StencilDescriptor*>(this, _MTL_PRIVATE_SEL(backFaceStencil));\n}\n\n_MTL_INLINE void MTL::DepthStencilDescriptor::setBackFaceStencil(const MTL::StencilDescriptor* backFaceStencil)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBackFaceStencil_), backFaceStencil);\n}\n\n_MTL_INLINE NS::String* MTL::DepthStencilDescriptor::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::DepthStencilDescriptor::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE NS::String* MTL::DepthStencilState::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE MTL::Device* MTL::DepthStencilState::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n#pragma once\n\n#include <IOSurface/IOSurfaceRef.h>\n#include <functional>\n\nnamespace MTL\n{\n_MTL_ENUM(NS::Integer, IOCompressionMethod) {\n    IOCompressionMethodZlib = 0,\n    IOCompressionMethodLZFSE = 1,\n    IOCompressionMethodLZ4 = 2,\n    IOCompressionMethodLZMA = 3,\n    IOCompressionMethodLZBitmap = 4,\n};\n\n_MTL_ENUM(NS::UInteger, FeatureSet) {\n    FeatureSet_iOS_GPUFamily1_v1 = 0,\n    FeatureSet_iOS_GPUFamily2_v1 = 1,\n    FeatureSet_iOS_GPUFamily1_v2 = 2,\n    FeatureSet_iOS_GPUFamily2_v2 = 3,\n    FeatureSet_iOS_GPUFamily3_v1 = 4,\n    FeatureSet_iOS_GPUFamily1_v3 = 5,\n    FeatureSet_iOS_GPUFamily2_v3 = 6,\n    FeatureSet_iOS_GPUFamily3_v2 = 7,\n    FeatureSet_iOS_GPUFamily1_v4 = 8,\n    FeatureSet_iOS_GPUFamily2_v4 = 9,\n    FeatureSet_iOS_GPUFamily3_v3 = 10,\n    FeatureSet_iOS_GPUFamily4_v1 = 11,\n    FeatureSet_iOS_GPUFamily1_v5 = 12,\n    FeatureSet_iOS_GPUFamily2_v5 = 13,\n    FeatureSet_iOS_GPUFamily3_v4 = 14,\n    FeatureSet_iOS_GPUFamily4_v2 = 15,\n    FeatureSet_iOS_GPUFamily5_v1 = 16,\n    FeatureSet_macOS_GPUFamily1_v1 = 10000,\n    FeatureSet_OSX_GPUFamily1_v1 = 10000,\n    FeatureSet_macOS_GPUFamily1_v2 = 10001,\n    FeatureSet_OSX_GPUFamily1_v2 = 10001,\n    FeatureSet_macOS_ReadWriteTextureTier2 = 10002,\n    FeatureSet_OSX_ReadWriteTextureTier2 = 10002,\n    FeatureSet_macOS_GPUFamily1_v3 = 10003,\n    FeatureSet_macOS_GPUFamily1_v4 = 10004,\n    FeatureSet_macOS_GPUFamily2_v1 = 10005,\n    FeatureSet_watchOS_GPUFamily1_v1 = 20000,\n    FeatureSet_WatchOS_GPUFamily1_v1 = 20000,\n    FeatureSet_watchOS_GPUFamily2_v1 = 20001,\n    FeatureSet_WatchOS_GPUFamily2_v1 = 20001,\n    FeatureSet_tvOS_GPUFamily1_v1 = 30000,\n    FeatureSet_TVOS_GPUFamily1_v1 = 30000,\n    FeatureSet_tvOS_GPUFamily1_v2 = 30001,\n    FeatureSet_tvOS_GPUFamily1_v3 = 30002,\n    FeatureSet_tvOS_GPUFamily2_v1 = 30003,\n    FeatureSet_tvOS_GPUFamily1_v4 = 30004,\n    FeatureSet_tvOS_GPUFamily2_v2 = 30005,\n};\n\n_MTL_ENUM(NS::Integer, GPUFamily) {\n    GPUFamilyApple1 = 1001,\n    GPUFamilyApple2 = 1002,\n    GPUFamilyApple3 = 1003,\n    GPUFamilyApple4 = 1004,\n    GPUFamilyApple5 = 1005,\n    GPUFamilyApple6 = 1006,\n    GPUFamilyApple7 = 1007,\n    GPUFamilyApple8 = 1008,\n    GPUFamilyApple9 = 1009,\n    GPUFamilyMac1 = 2001,\n    GPUFamilyMac2 = 2002,\n    GPUFamilyCommon1 = 3001,\n    GPUFamilyCommon2 = 3002,\n    GPUFamilyCommon3 = 3003,\n    GPUFamilyMacCatalyst1 = 4001,\n    GPUFamilyMacCatalyst2 = 4002,\n    GPUFamilyMetal3 = 5001,\n};\n\n_MTL_ENUM(NS::UInteger, DeviceLocation) {\n    DeviceLocationBuiltIn = 0,\n    DeviceLocationSlot = 1,\n    DeviceLocationExternal = 2,\n    DeviceLocationUnspecified = NS::UIntegerMax,\n};\n\n_MTL_OPTIONS(NS::UInteger, PipelineOption) {\n    PipelineOptionNone = 0,\n    PipelineOptionArgumentInfo = 1,\n    PipelineOptionBindingInfo = 1,\n    PipelineOptionBufferTypeInfo = 2,\n    PipelineOptionFailOnBinaryArchiveMiss = 4,\n};\n\n_MTL_ENUM(NS::UInteger, ReadWriteTextureTier) {\n    ReadWriteTextureTierNone = 0,\n    ReadWriteTextureTier1 = 1,\n    ReadWriteTextureTier2 = 2,\n};\n\n_MTL_ENUM(NS::UInteger, ArgumentBuffersTier) {\n    ArgumentBuffersTier1 = 0,\n    ArgumentBuffersTier2 = 1,\n};\n\n_MTL_ENUM(NS::UInteger, SparseTextureRegionAlignmentMode) {\n    SparseTextureRegionAlignmentModeOutward = 0,\n    SparseTextureRegionAlignmentModeInward = 1,\n};\n\n_MTL_ENUM(NS::Integer, SparsePageSize) {\n    SparsePageSize16 = 101,\n    SparsePageSize64 = 102,\n    SparsePageSize256 = 103,\n};\n\nstruct AccelerationStructureSizes\n{\n    NS::UInteger accelerationStructureSize;\n    NS::UInteger buildScratchBufferSize;\n    NS::UInteger refitScratchBufferSize;\n} _MTL_PACKED;\n\n_MTL_ENUM(NS::UInteger, CounterSamplingPoint) {\n    CounterSamplingPointAtStageBoundary = 0,\n    CounterSamplingPointAtDrawBoundary = 1,\n    CounterSamplingPointAtDispatchBoundary = 2,\n    CounterSamplingPointAtTileDispatchBoundary = 3,\n    CounterSamplingPointAtBlitBoundary = 4,\n};\n\nstruct SizeAndAlign\n{\n    NS::UInteger size;\n    NS::UInteger align;\n} _MTL_PACKED;\n\nclass ArgumentDescriptor : public NS::Copying<ArgumentDescriptor>\n{\npublic:\n    static class ArgumentDescriptor* alloc();\n\n    class ArgumentDescriptor*        init();\n\n    static class ArgumentDescriptor* argumentDescriptor();\n\n    MTL::DataType                    dataType() const;\n    void                             setDataType(MTL::DataType dataType);\n\n    NS::UInteger                     index() const;\n    void                             setIndex(NS::UInteger index);\n\n    NS::UInteger                     arrayLength() const;\n    void                             setArrayLength(NS::UInteger arrayLength);\n\n    MTL::BindingAccess               access() const;\n    void                             setAccess(MTL::BindingAccess access);\n\n    MTL::TextureType                 textureType() const;\n    void                             setTextureType(MTL::TextureType textureType);\n\n    NS::UInteger                     constantBlockAlignment() const;\n    void                             setConstantBlockAlignment(NS::UInteger constantBlockAlignment);\n};\n\nclass Architecture : public NS::Copying<Architecture>\n{\npublic:\n    static class Architecture* alloc();\n\n    class Architecture*        init();\n\n    NS::String*                name() const;\n};\n\nusing DeviceNotificationName = NS::String*;\n_MTL_CONST(DeviceNotificationName, DeviceWasAddedNotification);\n_MTL_CONST(DeviceNotificationName, DeviceRemovalRequestedNotification);\n_MTL_CONST(DeviceNotificationName, DeviceWasRemovedNotification);\n_MTL_CONST(NS::ErrorUserInfoKey, CommandBufferEncoderInfoErrorKey);\n\nusing DeviceNotificationHandlerBlock = void (^)(class Device* pDevice, DeviceNotificationName notifyName);\n\nusing DeviceNotificationHandlerFunction = std::function<void(class Device* pDevice, DeviceNotificationName notifyName)>;\n\nusing AutoreleasedComputePipelineReflection = class ComputePipelineReflection*;\n\nusing AutoreleasedRenderPipelineReflection = class RenderPipelineReflection*;\n\nusing NewLibraryCompletionHandler = void (^)(class Library*, NS::Error*);\n\nusing NewLibraryCompletionHandlerFunction = std::function<void(class Library*, NS::Error*)>;\n\nusing NewRenderPipelineStateCompletionHandler = void (^)(class RenderPipelineState*, NS::Error*);\n\nusing NewRenderPipelineStateCompletionHandlerFunction = std::function<void(class RenderPipelineState*, NS::Error*)>;\n\nusing NewRenderPipelineStateWithReflectionCompletionHandler = void (^)(class RenderPipelineState*, class RenderPipelineReflection*, NS::Error*);\n\nusing NewRenderPipelineStateWithReflectionCompletionHandlerFunction = std::function<void(class RenderPipelineState*, class RenderPipelineReflection*, NS::Error*)>;\n\nusing NewComputePipelineStateCompletionHandler = void (^)(class ComputePipelineState*, NS::Error*);\n\nusing NewComputePipelineStateCompletionHandlerFunction = std::function<void(class ComputePipelineState*, NS::Error*)>;\n\nusing NewComputePipelineStateWithReflectionCompletionHandler = void (^)(class ComputePipelineState*, class ComputePipelineReflection*, NS::Error*);\n\nusing NewComputePipelineStateWithReflectionCompletionHandlerFunction = std::function<void(class ComputePipelineState*, class ComputePipelineReflection*, NS::Error*)>;\n\nusing Timestamp = std::uint64_t;\n\nMTL::Device* CreateSystemDefaultDevice();\n\nNS::Array*   CopyAllDevices();\n\nNS::Array*   CopyAllDevicesWithObserver(NS::Object** pOutObserver, DeviceNotificationHandlerBlock handler);\n\nNS::Array*   CopyAllDevicesWithObserver(NS::Object** pOutObserver, const DeviceNotificationHandlerFunction& handler);\n\nvoid         RemoveDeviceObserver(const NS::Object* pObserver);\n\nclass Device : public NS::Referencing<Device>\n{\npublic:\n    void                            newLibrary(const NS::String* pSource, const class CompileOptions* pOptions, const NewLibraryCompletionHandlerFunction& completionHandler);\n\n    void                            newLibrary(const class StitchedLibraryDescriptor* pDescriptor, const MTL::NewLibraryCompletionHandlerFunction& completionHandler);\n\n    void                            newRenderPipelineState(const class RenderPipelineDescriptor* pDescriptor, const NewRenderPipelineStateCompletionHandlerFunction& completionHandler);\n\n    void                            newRenderPipelineState(const class RenderPipelineDescriptor* pDescriptor, PipelineOption options, const NewRenderPipelineStateWithReflectionCompletionHandlerFunction& completionHandler);\n\n    void                            newRenderPipelineState(const class TileRenderPipelineDescriptor* pDescriptor, PipelineOption options, const NewRenderPipelineStateWithReflectionCompletionHandlerFunction& completionHandler);\n\n    void                            newComputePipelineState(const class Function* pFunction, const NewComputePipelineStateCompletionHandlerFunction& completionHandler);\n\n    void                            newComputePipelineState(const class Function* pFunction, PipelineOption options, const NewComputePipelineStateWithReflectionCompletionHandlerFunction& completionHandler);\n\n    void                            newComputePipelineState(const class ComputePipelineDescriptor* pDescriptor, PipelineOption options, const NewComputePipelineStateWithReflectionCompletionHandlerFunction& completionHandler);\n\n    bool                            isHeadless() const;\n\n    NS::String*                     name() const;\n\n    uint64_t                        registryID() const;\n\n    class Architecture*             architecture() const;\n\n    MTL::Size                       maxThreadsPerThreadgroup() const;\n\n    bool                            lowPower() const;\n\n    bool                            headless() const;\n\n    bool                            removable() const;\n\n    bool                            hasUnifiedMemory() const;\n\n    uint64_t                        recommendedMaxWorkingSetSize() const;\n\n    MTL::DeviceLocation             location() const;\n\n    NS::UInteger                    locationNumber() const;\n\n    uint64_t                        maxTransferRate() const;\n\n    bool                            depth24Stencil8PixelFormatSupported() const;\n\n    MTL::ReadWriteTextureTier       readWriteTextureSupport() const;\n\n    MTL::ArgumentBuffersTier        argumentBuffersSupport() const;\n\n    bool                            rasterOrderGroupsSupported() const;\n\n    bool                            supports32BitFloatFiltering() const;\n\n    bool                            supports32BitMSAA() const;\n\n    bool                            supportsQueryTextureLOD() const;\n\n    bool                            supportsBCTextureCompression() const;\n\n    bool                            supportsPullModelInterpolation() const;\n\n    bool                            barycentricCoordsSupported() const;\n\n    bool                            supportsShaderBarycentricCoordinates() const;\n\n    NS::UInteger                    currentAllocatedSize() const;\n\n    class LogState*                 newLogState(const class LogStateDescriptor* descriptor, NS::Error** error);\n\n    class CommandQueue*             newCommandQueue();\n\n    class CommandQueue*             newCommandQueue(NS::UInteger maxCommandBufferCount);\n\n    class CommandQueue*             newCommandQueue(const class CommandQueueDescriptor* descriptor);\n\n    MTL::SizeAndAlign               heapTextureSizeAndAlign(const class TextureDescriptor* desc);\n\n    MTL::SizeAndAlign               heapBufferSizeAndAlign(NS::UInteger length, MTL::ResourceOptions options);\n\n    class Heap*                     newHeap(const class HeapDescriptor* descriptor);\n\n    class Buffer*                   newBuffer(NS::UInteger length, MTL::ResourceOptions options);\n\n    class Buffer*                   newBuffer(const void* pointer, NS::UInteger length, MTL::ResourceOptions options);\n\n    class Buffer*                   newBuffer(const void* pointer, NS::UInteger length, MTL::ResourceOptions options, void (^deallocator)(void*, NS::UInteger));\n\n    class DepthStencilState*        newDepthStencilState(const class DepthStencilDescriptor* descriptor);\n\n    class Texture*                  newTexture(const class TextureDescriptor* descriptor);\n\n    class Texture*                  newTexture(const class TextureDescriptor* descriptor, const IOSurfaceRef iosurface, NS::UInteger plane);\n\n    class Texture*                  newSharedTexture(const class TextureDescriptor* descriptor);\n\n    class Texture*                  newSharedTexture(const class SharedTextureHandle* sharedHandle);\n\n    class SamplerState*             newSamplerState(const class SamplerDescriptor* descriptor);\n\n    class Library*                  newDefaultLibrary();\n\n    class Library*                  newDefaultLibrary(const NS::Bundle* bundle, NS::Error** error);\n\n    class Library*                  newLibrary(const NS::String* filepath, NS::Error** error);\n\n    class Library*                  newLibrary(const NS::URL* url, NS::Error** error);\n\n    class Library*                  newLibrary(const dispatch_data_t data, NS::Error** error);\n\n    class Library*                  newLibrary(const NS::String* source, const class CompileOptions* options, NS::Error** error);\n\n    void                            newLibrary(const NS::String* source, const class CompileOptions* options, const MTL::NewLibraryCompletionHandler completionHandler);\n\n    class Library*                  newLibrary(const class StitchedLibraryDescriptor* descriptor, NS::Error** error);\n\n    void                            newLibrary(const class StitchedLibraryDescriptor* descriptor, const MTL::NewLibraryCompletionHandler completionHandler);\n\n    class RenderPipelineState*      newRenderPipelineState(const class RenderPipelineDescriptor* descriptor, NS::Error** error);\n\n    class RenderPipelineState*      newRenderPipelineState(const class RenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedRenderPipelineReflection* reflection, NS::Error** error);\n\n    void                            newRenderPipelineState(const class RenderPipelineDescriptor* descriptor, const MTL::NewRenderPipelineStateCompletionHandler completionHandler);\n\n    void                            newRenderPipelineState(const class RenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewRenderPipelineStateWithReflectionCompletionHandler completionHandler);\n\n    class ComputePipelineState*     newComputePipelineState(const class Function* computeFunction, NS::Error** error);\n\n    class ComputePipelineState*     newComputePipelineState(const class Function* computeFunction, MTL::PipelineOption options, const MTL::AutoreleasedComputePipelineReflection* reflection, NS::Error** error);\n\n    void                            newComputePipelineState(const class Function* computeFunction, const MTL::NewComputePipelineStateCompletionHandler completionHandler);\n\n    void                            newComputePipelineState(const class Function* computeFunction, MTL::PipelineOption options, const MTL::NewComputePipelineStateWithReflectionCompletionHandler completionHandler);\n\n    class ComputePipelineState*     newComputePipelineState(const class ComputePipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedComputePipelineReflection* reflection, NS::Error** error);\n\n    void                            newComputePipelineState(const class ComputePipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewComputePipelineStateWithReflectionCompletionHandler completionHandler);\n\n    class Fence*                    newFence();\n\n    bool                            supportsFeatureSet(MTL::FeatureSet featureSet);\n\n    bool                            supportsFamily(MTL::GPUFamily gpuFamily);\n\n    bool                            supportsTextureSampleCount(NS::UInteger sampleCount);\n\n    NS::UInteger                    minimumLinearTextureAlignmentForPixelFormat(MTL::PixelFormat format);\n\n    NS::UInteger                    minimumTextureBufferAlignmentForPixelFormat(MTL::PixelFormat format);\n\n    class RenderPipelineState*      newRenderPipelineState(const class TileRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedRenderPipelineReflection* reflection, NS::Error** error);\n\n    void                            newRenderPipelineState(const class TileRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewRenderPipelineStateWithReflectionCompletionHandler completionHandler);\n\n    class RenderPipelineState*      newRenderPipelineState(const class MeshRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedRenderPipelineReflection* reflection, NS::Error** error);\n\n    void                            newRenderPipelineState(const class MeshRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewRenderPipelineStateWithReflectionCompletionHandler completionHandler);\n\n    NS::UInteger                    maxThreadgroupMemoryLength() const;\n\n    NS::UInteger                    maxArgumentBufferSamplerCount() const;\n\n    bool                            programmableSamplePositionsSupported() const;\n\n    void                            getDefaultSamplePositions(MTL::SamplePosition* positions, NS::UInteger count);\n\n    class ArgumentEncoder*          newArgumentEncoder(const NS::Array* arguments);\n\n    bool                            supportsRasterizationRateMap(NS::UInteger layerCount);\n\n    class RasterizationRateMap*     newRasterizationRateMap(const class RasterizationRateMapDescriptor* descriptor);\n\n    class IndirectCommandBuffer*    newIndirectCommandBuffer(const class IndirectCommandBufferDescriptor* descriptor, NS::UInteger maxCount, MTL::ResourceOptions options);\n\n    class Event*                    newEvent();\n\n    class SharedEvent*              newSharedEvent();\n\n    class SharedEvent*              newSharedEvent(const class SharedEventHandle* sharedEventHandle);\n\n    uint64_t                        peerGroupID() const;\n\n    uint32_t                        peerIndex() const;\n\n    uint32_t                        peerCount() const;\n\n    class IOFileHandle*             newIOHandle(const NS::URL* url, NS::Error** error);\n\n    class IOCommandQueue*           newIOCommandQueue(const class IOCommandQueueDescriptor* descriptor, NS::Error** error);\n\n    class IOFileHandle*             newIOHandle(const NS::URL* url, MTL::IOCompressionMethod compressionMethod, NS::Error** error);\n\n    class IOFileHandle*             newIOFileHandle(const NS::URL* url, NS::Error** error);\n\n    class IOFileHandle*             newIOFileHandle(const NS::URL* url, MTL::IOCompressionMethod compressionMethod, NS::Error** error);\n\n    MTL::Size                       sparseTileSize(MTL::TextureType textureType, MTL::PixelFormat pixelFormat, NS::UInteger sampleCount);\n\n    NS::UInteger                    sparseTileSizeInBytes() const;\n\n    void                            convertSparsePixelRegions(const MTL::Region* pixelRegions, MTL::Region* tileRegions, MTL::Size tileSize, MTL::SparseTextureRegionAlignmentMode mode, NS::UInteger numRegions);\n\n    void                            convertSparseTileRegions(const MTL::Region* tileRegions, MTL::Region* pixelRegions, MTL::Size tileSize, NS::UInteger numRegions);\n\n    NS::UInteger                    sparseTileSizeInBytes(MTL::SparsePageSize sparsePageSize);\n\n    MTL::Size                       sparseTileSize(MTL::TextureType textureType, MTL::PixelFormat pixelFormat, NS::UInteger sampleCount, MTL::SparsePageSize sparsePageSize);\n\n    NS::UInteger                    maxBufferLength() const;\n\n    NS::Array*                      counterSets() const;\n\n    class CounterSampleBuffer*      newCounterSampleBuffer(const class CounterSampleBufferDescriptor* descriptor, NS::Error** error);\n\n    void                            sampleTimestamps(MTL::Timestamp* cpuTimestamp, MTL::Timestamp* gpuTimestamp);\n\n    class ArgumentEncoder*          newArgumentEncoder(const class BufferBinding* bufferBinding);\n\n    bool                            supportsCounterSampling(MTL::CounterSamplingPoint samplingPoint);\n\n    bool                            supportsVertexAmplificationCount(NS::UInteger count);\n\n    bool                            supportsDynamicLibraries() const;\n\n    bool                            supportsRenderDynamicLibraries() const;\n\n    class DynamicLibrary*           newDynamicLibrary(const class Library* library, NS::Error** error);\n\n    class DynamicLibrary*           newDynamicLibrary(const NS::URL* url, NS::Error** error);\n\n    class BinaryArchive*            newBinaryArchive(const class BinaryArchiveDescriptor* descriptor, NS::Error** error);\n\n    bool                            supportsRaytracing() const;\n\n    MTL::AccelerationStructureSizes accelerationStructureSizes(const class AccelerationStructureDescriptor* descriptor);\n\n    class AccelerationStructure*    newAccelerationStructure(NS::UInteger size);\n\n    class AccelerationStructure*    newAccelerationStructure(const class AccelerationStructureDescriptor* descriptor);\n\n    MTL::SizeAndAlign               heapAccelerationStructureSizeAndAlign(NS::UInteger size);\n\n    MTL::SizeAndAlign               heapAccelerationStructureSizeAndAlign(const class AccelerationStructureDescriptor* descriptor);\n\n    bool                            supportsFunctionPointers() const;\n\n    bool                            supportsFunctionPointersFromRender() const;\n\n    bool                            supportsRaytracingFromRender() const;\n\n    bool                            supportsPrimitiveMotionBlur() const;\n\n    bool                            shouldMaximizeConcurrentCompilation() const;\n    void                            setShouldMaximizeConcurrentCompilation(bool shouldMaximizeConcurrentCompilation);\n\n    NS::UInteger                    maximumConcurrentCompilationTaskCount() const;\n\n    class ResidencySet*             newResidencySet(const class ResidencySetDescriptor* desc, NS::Error** error);\n};\n\n}\n\n_MTL_INLINE MTL::ArgumentDescriptor* MTL::ArgumentDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::ArgumentDescriptor>(_MTL_PRIVATE_CLS(MTLArgumentDescriptor));\n}\n\n_MTL_INLINE MTL::ArgumentDescriptor* MTL::ArgumentDescriptor::init()\n{\n    return NS::Object::init<MTL::ArgumentDescriptor>();\n}\n\n_MTL_INLINE MTL::ArgumentDescriptor* MTL::ArgumentDescriptor::argumentDescriptor()\n{\n    return Object::sendMessage<MTL::ArgumentDescriptor*>(_MTL_PRIVATE_CLS(MTLArgumentDescriptor), _MTL_PRIVATE_SEL(argumentDescriptor));\n}\n\n_MTL_INLINE MTL::DataType MTL::ArgumentDescriptor::dataType() const\n{\n    return Object::sendMessage<MTL::DataType>(this, _MTL_PRIVATE_SEL(dataType));\n}\n\n_MTL_INLINE void MTL::ArgumentDescriptor::setDataType(MTL::DataType dataType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDataType_), dataType);\n}\n\n_MTL_INLINE NS::UInteger MTL::ArgumentDescriptor::index() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(index));\n}\n\n_MTL_INLINE void MTL::ArgumentDescriptor::setIndex(NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setIndex_), index);\n}\n\n_MTL_INLINE NS::UInteger MTL::ArgumentDescriptor::arrayLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(arrayLength));\n}\n\n_MTL_INLINE void MTL::ArgumentDescriptor::setArrayLength(NS::UInteger arrayLength)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setArrayLength_), arrayLength);\n}\n\n_MTL_INLINE MTL::BindingAccess MTL::ArgumentDescriptor::access() const\n{\n    return Object::sendMessage<MTL::BindingAccess>(this, _MTL_PRIVATE_SEL(access));\n}\n\n_MTL_INLINE void MTL::ArgumentDescriptor::setAccess(MTL::BindingAccess access)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setAccess_), access);\n}\n\n_MTL_INLINE MTL::TextureType MTL::ArgumentDescriptor::textureType() const\n{\n    return Object::sendMessage<MTL::TextureType>(this, _MTL_PRIVATE_SEL(textureType));\n}\n\n_MTL_INLINE void MTL::ArgumentDescriptor::setTextureType(MTL::TextureType textureType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTextureType_), textureType);\n}\n\n_MTL_INLINE NS::UInteger MTL::ArgumentDescriptor::constantBlockAlignment() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(constantBlockAlignment));\n}\n\n_MTL_INLINE void MTL::ArgumentDescriptor::setConstantBlockAlignment(NS::UInteger constantBlockAlignment)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setConstantBlockAlignment_), constantBlockAlignment);\n}\n\n_MTL_INLINE MTL::Architecture* MTL::Architecture::alloc()\n{\n    return NS::Object::alloc<MTL::Architecture>(_MTL_PRIVATE_CLS(MTLArchitecture));\n}\n\n_MTL_INLINE MTL::Architecture* MTL::Architecture::init()\n{\n    return NS::Object::init<MTL::Architecture>();\n}\n\n_MTL_INLINE NS::String* MTL::Architecture::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_PRIVATE_DEF_WEAK_CONST(MTL::DeviceNotificationName, DeviceWasAddedNotification);\n_MTL_PRIVATE_DEF_WEAK_CONST(MTL::DeviceNotificationName, DeviceRemovalRequestedNotification);\n_MTL_PRIVATE_DEF_WEAK_CONST(MTL::DeviceNotificationName, DeviceWasRemovedNotification);\n_MTL_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, CommandBufferEncoderInfoErrorKey);\n\n#if defined(MTL_PRIVATE_IMPLEMENTATION)\n\nextern \"C\" MTL::Device* MTLCreateSystemDefaultDevice();\n\nextern \"C\" NS::Array*   MTLCopyAllDevices();\n\nextern \"C\" NS::Array*   MTLCopyAllDevicesWithObserver(NS::Object**, MTL::DeviceNotificationHandlerBlock);\n\nextern \"C\" void         MTLRemoveDeviceObserver(const NS::Object*);\n\n#include <TargetConditionals.h>\n\n_NS_EXPORT MTL::Device* MTL::CreateSystemDefaultDevice()\n{\n    return ::MTLCreateSystemDefaultDevice();\n}\n\n_NS_EXPORT NS::Array* MTL::CopyAllDevices()\n{\n#if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 180000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)\n    return ::MTLCopyAllDevices();\n#else\n    return nullptr;\n#endif // __IPHONE_18\n}\n\n_NS_EXPORT NS::Array* MTL::CopyAllDevicesWithObserver(NS::Object** pOutObserver, DeviceNotificationHandlerBlock handler)\n{\n#if TARGET_OS_OSX\n    return ::MTLCopyAllDevicesWithObserver(pOutObserver, handler);\n#else\n    (void)pOutObserver;\n    (void)handler;\n\n    return nullptr;\n#endif // TARGET_OS_OSX\n}\n\n_NS_EXPORT NS::Array* MTL::CopyAllDevicesWithObserver(NS::Object** pOutObserver, const DeviceNotificationHandlerFunction& handler)\n{\n    __block DeviceNotificationHandlerFunction function = handler;\n\n    return CopyAllDevicesWithObserver(pOutObserver, ^(Device* pDevice, DeviceNotificationName pNotificationName) { function(pDevice, pNotificationName); });\n}\n\n_NS_EXPORT void MTL::RemoveDeviceObserver(const NS::Object* pObserver)\n{\n    (void)pObserver;\n#if TARGET_OS_OSX\n    ::MTLRemoveDeviceObserver(pObserver);\n#endif // TARGET_OS_OSX\n}\n\n#endif // MTL_PRIVATE_IMPLEMENTATION\n\n_MTL_INLINE void MTL::Device::newLibrary(const NS::String* pSource, const CompileOptions* pOptions, const NewLibraryCompletionHandlerFunction& completionHandler)\n{\n    __block NewLibraryCompletionHandlerFunction blockCompletionHandler = completionHandler;\n\n    newLibrary(pSource, pOptions, ^(Library* pLibrary, NS::Error* pError) { blockCompletionHandler(pLibrary, pError); });\n}\n\n_MTL_INLINE void MTL::Device::newLibrary(const class StitchedLibraryDescriptor* pDescriptor, const MTL::NewLibraryCompletionHandlerFunction& completionHandler)\n{\n    __block NewLibraryCompletionHandlerFunction blockCompletionHandler = completionHandler;\n\n    newLibrary(pDescriptor, ^(Library* pLibrary, NS::Error* pError) { blockCompletionHandler(pLibrary, pError); });\n}\n\n_MTL_INLINE void MTL::Device::newRenderPipelineState(const RenderPipelineDescriptor* pDescriptor, const NewRenderPipelineStateCompletionHandlerFunction& completionHandler)\n{\n    __block NewRenderPipelineStateCompletionHandlerFunction blockCompletionHandler = completionHandler;\n\n    newRenderPipelineState(pDescriptor, ^(RenderPipelineState* pPipelineState, NS::Error* pError) { blockCompletionHandler(pPipelineState, pError); });\n}\n\n_MTL_INLINE void MTL::Device::newRenderPipelineState(const RenderPipelineDescriptor* pDescriptor, PipelineOption options, const NewRenderPipelineStateWithReflectionCompletionHandlerFunction& completionHandler)\n{\n    __block NewRenderPipelineStateWithReflectionCompletionHandlerFunction blockCompletionHandler = completionHandler;\n\n    newRenderPipelineState(pDescriptor, options, ^(RenderPipelineState* pPipelineState, class RenderPipelineReflection* pReflection, NS::Error* pError) { blockCompletionHandler(pPipelineState, pReflection, pError); });\n}\n\n_MTL_INLINE void MTL::Device::newRenderPipelineState(const TileRenderPipelineDescriptor* pDescriptor, PipelineOption options, const NewRenderPipelineStateWithReflectionCompletionHandlerFunction& completionHandler)\n{\n    __block NewRenderPipelineStateWithReflectionCompletionHandlerFunction blockCompletionHandler = completionHandler;\n\n    newRenderPipelineState(pDescriptor, options, ^(RenderPipelineState* pPipelineState, class RenderPipelineReflection* pReflection, NS::Error* pError) { blockCompletionHandler(pPipelineState, pReflection, pError); });\n}\n\n_MTL_INLINE void MTL::Device::newComputePipelineState(const class Function* pFunction, const NewComputePipelineStateCompletionHandlerFunction& completionHandler)\n{\n    __block NewComputePipelineStateCompletionHandlerFunction blockCompletionHandler = completionHandler;\n\n    newComputePipelineState(pFunction, ^(ComputePipelineState* pPipelineState, NS::Error* pError) { blockCompletionHandler(pPipelineState, pError); });\n}\n\n_MTL_INLINE void MTL::Device::newComputePipelineState(const Function* pFunction, PipelineOption options, const NewComputePipelineStateWithReflectionCompletionHandlerFunction& completionHandler)\n{\n    __block NewComputePipelineStateWithReflectionCompletionHandlerFunction blockCompletionHandler = completionHandler;\n\n    newComputePipelineState(pFunction, options, ^(ComputePipelineState* pPipelineState, ComputePipelineReflection* pReflection, NS::Error* pError) { blockCompletionHandler(pPipelineState, pReflection, pError); });\n}\n\n_MTL_INLINE void MTL::Device::newComputePipelineState(const ComputePipelineDescriptor* pDescriptor, PipelineOption options, const NewComputePipelineStateWithReflectionCompletionHandlerFunction& completionHandler)\n{\n    __block NewComputePipelineStateWithReflectionCompletionHandlerFunction blockCompletionHandler = completionHandler;\n\n    newComputePipelineState(pDescriptor, options, ^(ComputePipelineState* pPipelineState, ComputePipelineReflection* pReflection, NS::Error* pError) { blockCompletionHandler(pPipelineState, pReflection, pError); });\n}\n\n_MTL_INLINE bool MTL::Device::isHeadless() const\n{\n    return headless();\n}\n\n_MTL_INLINE NS::String* MTL::Device::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_INLINE uint64_t MTL::Device::registryID() const\n{\n    return Object::sendMessage<uint64_t>(this, _MTL_PRIVATE_SEL(registryID));\n}\n\n_MTL_INLINE MTL::Architecture* MTL::Device::architecture() const\n{\n    return Object::sendMessage<MTL::Architecture*>(this, _MTL_PRIVATE_SEL(architecture));\n}\n\n_MTL_INLINE MTL::Size MTL::Device::maxThreadsPerThreadgroup() const\n{\n    return Object::sendMessage<MTL::Size>(this, _MTL_PRIVATE_SEL(maxThreadsPerThreadgroup));\n}\n\n_MTL_INLINE bool MTL::Device::lowPower() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isLowPower));\n}\n\n_MTL_INLINE bool MTL::Device::headless() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isHeadless));\n}\n\n_MTL_INLINE bool MTL::Device::removable() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isRemovable));\n}\n\n_MTL_INLINE bool MTL::Device::hasUnifiedMemory() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(hasUnifiedMemory));\n}\n\n_MTL_INLINE uint64_t MTL::Device::recommendedMaxWorkingSetSize() const\n{\n    return Object::sendMessage<uint64_t>(this, _MTL_PRIVATE_SEL(recommendedMaxWorkingSetSize));\n}\n\n_MTL_INLINE MTL::DeviceLocation MTL::Device::location() const\n{\n    return Object::sendMessage<MTL::DeviceLocation>(this, _MTL_PRIVATE_SEL(location));\n}\n\n_MTL_INLINE NS::UInteger MTL::Device::locationNumber() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(locationNumber));\n}\n\n_MTL_INLINE uint64_t MTL::Device::maxTransferRate() const\n{\n    return Object::sendMessage<uint64_t>(this, _MTL_PRIVATE_SEL(maxTransferRate));\n}\n\n_MTL_INLINE bool MTL::Device::depth24Stencil8PixelFormatSupported() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(isDepth24Stencil8PixelFormatSupported));\n}\n\n_MTL_INLINE MTL::ReadWriteTextureTier MTL::Device::readWriteTextureSupport() const\n{\n    return Object::sendMessage<MTL::ReadWriteTextureTier>(this, _MTL_PRIVATE_SEL(readWriteTextureSupport));\n}\n\n_MTL_INLINE MTL::ArgumentBuffersTier MTL::Device::argumentBuffersSupport() const\n{\n    return Object::sendMessage<MTL::ArgumentBuffersTier>(this, _MTL_PRIVATE_SEL(argumentBuffersSupport));\n}\n\n_MTL_INLINE bool MTL::Device::rasterOrderGroupsSupported() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(areRasterOrderGroupsSupported));\n}\n\n_MTL_INLINE bool MTL::Device::supports32BitFloatFiltering() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supports32BitFloatFiltering));\n}\n\n_MTL_INLINE bool MTL::Device::supports32BitMSAA() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supports32BitMSAA));\n}\n\n_MTL_INLINE bool MTL::Device::supportsQueryTextureLOD() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsQueryTextureLOD));\n}\n\n_MTL_INLINE bool MTL::Device::supportsBCTextureCompression() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsBCTextureCompression));\n}\n\n_MTL_INLINE bool MTL::Device::supportsPullModelInterpolation() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsPullModelInterpolation));\n}\n\n_MTL_INLINE bool MTL::Device::barycentricCoordsSupported() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(areBarycentricCoordsSupported));\n}\n\n_MTL_INLINE bool MTL::Device::supportsShaderBarycentricCoordinates() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsShaderBarycentricCoordinates));\n}\n\n_MTL_INLINE NS::UInteger MTL::Device::currentAllocatedSize() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(currentAllocatedSize));\n}\n\n_MTL_INLINE MTL::LogState* MTL::Device::newLogState(const MTL::LogStateDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<MTL::LogState*>(this, _MTL_PRIVATE_SEL(newLogStateWithDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE MTL::CommandQueue* MTL::Device::newCommandQueue()\n{\n    return Object::sendMessage<MTL::CommandQueue*>(this, _MTL_PRIVATE_SEL(newCommandQueue));\n}\n\n_MTL_INLINE MTL::CommandQueue* MTL::Device::newCommandQueue(NS::UInteger maxCommandBufferCount)\n{\n    return Object::sendMessage<MTL::CommandQueue*>(this, _MTL_PRIVATE_SEL(newCommandQueueWithMaxCommandBufferCount_), maxCommandBufferCount);\n}\n\n_MTL_INLINE MTL::CommandQueue* MTL::Device::newCommandQueue(const MTL::CommandQueueDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::CommandQueue*>(this, _MTL_PRIVATE_SEL(newCommandQueueWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::SizeAndAlign MTL::Device::heapTextureSizeAndAlign(const MTL::TextureDescriptor* desc)\n{\n    return Object::sendMessage<MTL::SizeAndAlign>(this, _MTL_PRIVATE_SEL(heapTextureSizeAndAlignWithDescriptor_), desc);\n}\n\n_MTL_INLINE MTL::SizeAndAlign MTL::Device::heapBufferSizeAndAlign(NS::UInteger length, MTL::ResourceOptions options)\n{\n    return Object::sendMessage<MTL::SizeAndAlign>(this, _MTL_PRIVATE_SEL(heapBufferSizeAndAlignWithLength_options_), length, options);\n}\n\n_MTL_INLINE MTL::Heap* MTL::Device::newHeap(const MTL::HeapDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::Heap*>(this, _MTL_PRIVATE_SEL(newHeapWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::Device::newBuffer(NS::UInteger length, MTL::ResourceOptions options)\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(newBufferWithLength_options_), length, options);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::Device::newBuffer(const void* pointer, NS::UInteger length, MTL::ResourceOptions options)\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(newBufferWithBytes_length_options_), pointer, length, options);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::Device::newBuffer(const void* pointer, NS::UInteger length, MTL::ResourceOptions options, void (^deallocator)(void*, NS::UInteger))\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(newBufferWithBytesNoCopy_length_options_deallocator_), pointer, length, options, deallocator);\n}\n\n_MTL_INLINE MTL::DepthStencilState* MTL::Device::newDepthStencilState(const MTL::DepthStencilDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::DepthStencilState*>(this, _MTL_PRIVATE_SEL(newDepthStencilStateWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::Texture* MTL::Device::newTexture(const MTL::TextureDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(newTextureWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::Texture* MTL::Device::newTexture(const MTL::TextureDescriptor* descriptor, const IOSurfaceRef iosurface, NS::UInteger plane)\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(newTextureWithDescriptor_iosurface_plane_), descriptor, iosurface, plane);\n}\n\n_MTL_INLINE MTL::Texture* MTL::Device::newSharedTexture(const MTL::TextureDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(newSharedTextureWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::Texture* MTL::Device::newSharedTexture(const MTL::SharedTextureHandle* sharedHandle)\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(newSharedTextureWithHandle_), sharedHandle);\n}\n\n_MTL_INLINE MTL::SamplerState* MTL::Device::newSamplerState(const MTL::SamplerDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::SamplerState*>(this, _MTL_PRIVATE_SEL(newSamplerStateWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::Library* MTL::Device::newDefaultLibrary()\n{\n    return Object::sendMessage<MTL::Library*>(this, _MTL_PRIVATE_SEL(newDefaultLibrary));\n}\n\n_MTL_INLINE MTL::Library* MTL::Device::newDefaultLibrary(const NS::Bundle* bundle, NS::Error** error)\n{\n    return Object::sendMessage<MTL::Library*>(this, _MTL_PRIVATE_SEL(newDefaultLibraryWithBundle_error_), bundle, error);\n}\n\n_MTL_INLINE MTL::Library* MTL::Device::newLibrary(const NS::String* filepath, NS::Error** error)\n{\n    return Object::sendMessage<MTL::Library*>(this, _MTL_PRIVATE_SEL(newLibraryWithFile_error_), filepath, error);\n}\n\n_MTL_INLINE MTL::Library* MTL::Device::newLibrary(const NS::URL* url, NS::Error** error)\n{\n    return Object::sendMessage<MTL::Library*>(this, _MTL_PRIVATE_SEL(newLibraryWithURL_error_), url, error);\n}\n\n_MTL_INLINE MTL::Library* MTL::Device::newLibrary(const dispatch_data_t data, NS::Error** error)\n{\n    return Object::sendMessage<MTL::Library*>(this, _MTL_PRIVATE_SEL(newLibraryWithData_error_), data, error);\n}\n\n_MTL_INLINE MTL::Library* MTL::Device::newLibrary(const NS::String* source, const MTL::CompileOptions* options, NS::Error** error)\n{\n    return Object::sendMessage<MTL::Library*>(this, _MTL_PRIVATE_SEL(newLibraryWithSource_options_error_), source, options, error);\n}\n\n_MTL_INLINE void MTL::Device::newLibrary(const NS::String* source, const MTL::CompileOptions* options, const MTL::NewLibraryCompletionHandler completionHandler)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(newLibraryWithSource_options_completionHandler_), source, options, completionHandler);\n}\n\n_MTL_INLINE MTL::Library* MTL::Device::newLibrary(const MTL::StitchedLibraryDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<MTL::Library*>(this, _MTL_PRIVATE_SEL(newLibraryWithStitchedDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE void MTL::Device::newLibrary(const MTL::StitchedLibraryDescriptor* descriptor, const MTL::NewLibraryCompletionHandler completionHandler)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(newLibraryWithStitchedDescriptor_completionHandler_), descriptor, completionHandler);\n}\n\n_MTL_INLINE MTL::RenderPipelineState* MTL::Device::newRenderPipelineState(const MTL::RenderPipelineDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<MTL::RenderPipelineState*>(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE MTL::RenderPipelineState* MTL::Device::newRenderPipelineState(const MTL::RenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedRenderPipelineReflection* reflection, NS::Error** error)\n{\n    return Object::sendMessage<MTL::RenderPipelineState*>(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithDescriptor_options_reflection_error_), descriptor, options, reflection, error);\n}\n\n_MTL_INLINE void MTL::Device::newRenderPipelineState(const MTL::RenderPipelineDescriptor* descriptor, const MTL::NewRenderPipelineStateCompletionHandler completionHandler)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithDescriptor_completionHandler_), descriptor, completionHandler);\n}\n\n_MTL_INLINE void MTL::Device::newRenderPipelineState(const MTL::RenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewRenderPipelineStateWithReflectionCompletionHandler completionHandler)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithDescriptor_options_completionHandler_), descriptor, options, completionHandler);\n}\n\n_MTL_INLINE MTL::ComputePipelineState* MTL::Device::newComputePipelineState(const MTL::Function* computeFunction, NS::Error** error)\n{\n    return Object::sendMessage<MTL::ComputePipelineState*>(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithFunction_error_), computeFunction, error);\n}\n\n_MTL_INLINE MTL::ComputePipelineState* MTL::Device::newComputePipelineState(const MTL::Function* computeFunction, MTL::PipelineOption options, const MTL::AutoreleasedComputePipelineReflection* reflection, NS::Error** error)\n{\n    return Object::sendMessage<MTL::ComputePipelineState*>(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithFunction_options_reflection_error_), computeFunction, options, reflection, error);\n}\n\n_MTL_INLINE void MTL::Device::newComputePipelineState(const MTL::Function* computeFunction, const MTL::NewComputePipelineStateCompletionHandler completionHandler)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithFunction_completionHandler_), computeFunction, completionHandler);\n}\n\n_MTL_INLINE void MTL::Device::newComputePipelineState(const MTL::Function* computeFunction, MTL::PipelineOption options, const MTL::NewComputePipelineStateWithReflectionCompletionHandler completionHandler)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithFunction_options_completionHandler_), computeFunction, options, completionHandler);\n}\n\n_MTL_INLINE MTL::ComputePipelineState* MTL::Device::newComputePipelineState(const MTL::ComputePipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedComputePipelineReflection* reflection, NS::Error** error)\n{\n    return Object::sendMessage<MTL::ComputePipelineState*>(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithDescriptor_options_reflection_error_), descriptor, options, reflection, error);\n}\n\n_MTL_INLINE void MTL::Device::newComputePipelineState(const MTL::ComputePipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewComputePipelineStateWithReflectionCompletionHandler completionHandler)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithDescriptor_options_completionHandler_), descriptor, options, completionHandler);\n}\n\n_MTL_INLINE MTL::Fence* MTL::Device::newFence()\n{\n    return Object::sendMessage<MTL::Fence*>(this, _MTL_PRIVATE_SEL(newFence));\n}\n\n_MTL_INLINE bool MTL::Device::supportsFeatureSet(MTL::FeatureSet featureSet)\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsFeatureSet_), featureSet);\n}\n\n_MTL_INLINE bool MTL::Device::supportsFamily(MTL::GPUFamily gpuFamily)\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsFamily_), gpuFamily);\n}\n\n_MTL_INLINE bool MTL::Device::supportsTextureSampleCount(NS::UInteger sampleCount)\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsTextureSampleCount_), sampleCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::Device::minimumLinearTextureAlignmentForPixelFormat(MTL::PixelFormat format)\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(minimumLinearTextureAlignmentForPixelFormat_), format);\n}\n\n_MTL_INLINE NS::UInteger MTL::Device::minimumTextureBufferAlignmentForPixelFormat(MTL::PixelFormat format)\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(minimumTextureBufferAlignmentForPixelFormat_), format);\n}\n\n_MTL_INLINE MTL::RenderPipelineState* MTL::Device::newRenderPipelineState(const MTL::TileRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedRenderPipelineReflection* reflection, NS::Error** error)\n{\n    return Object::sendMessage<MTL::RenderPipelineState*>(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithTileDescriptor_options_reflection_error_), descriptor, options, reflection, error);\n}\n\n_MTL_INLINE void MTL::Device::newRenderPipelineState(const MTL::TileRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewRenderPipelineStateWithReflectionCompletionHandler completionHandler)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithTileDescriptor_options_completionHandler_), descriptor, options, completionHandler);\n}\n\n_MTL_INLINE MTL::RenderPipelineState* MTL::Device::newRenderPipelineState(const MTL::MeshRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedRenderPipelineReflection* reflection, NS::Error** error)\n{\n    return Object::sendMessage<MTL::RenderPipelineState*>(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithMeshDescriptor_options_reflection_error_), descriptor, options, reflection, error);\n}\n\n_MTL_INLINE void MTL::Device::newRenderPipelineState(const MTL::MeshRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewRenderPipelineStateWithReflectionCompletionHandler completionHandler)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithMeshDescriptor_options_completionHandler_), descriptor, options, completionHandler);\n}\n\n_MTL_INLINE NS::UInteger MTL::Device::maxThreadgroupMemoryLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxThreadgroupMemoryLength));\n}\n\n_MTL_INLINE NS::UInteger MTL::Device::maxArgumentBufferSamplerCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxArgumentBufferSamplerCount));\n}\n\n_MTL_INLINE bool MTL::Device::programmableSamplePositionsSupported() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(areProgrammableSamplePositionsSupported));\n}\n\n_MTL_INLINE void MTL::Device::getDefaultSamplePositions(MTL::SamplePosition* positions, NS::UInteger count)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(getDefaultSamplePositions_count_), positions, count);\n}\n\n_MTL_INLINE MTL::ArgumentEncoder* MTL::Device::newArgumentEncoder(const NS::Array* arguments)\n{\n    return Object::sendMessage<MTL::ArgumentEncoder*>(this, _MTL_PRIVATE_SEL(newArgumentEncoderWithArguments_), arguments);\n}\n\n_MTL_INLINE bool MTL::Device::supportsRasterizationRateMap(NS::UInteger layerCount)\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsRasterizationRateMapWithLayerCount_), layerCount);\n}\n\n_MTL_INLINE MTL::RasterizationRateMap* MTL::Device::newRasterizationRateMap(const MTL::RasterizationRateMapDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::RasterizationRateMap*>(this, _MTL_PRIVATE_SEL(newRasterizationRateMapWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::IndirectCommandBuffer* MTL::Device::newIndirectCommandBuffer(const MTL::IndirectCommandBufferDescriptor* descriptor, NS::UInteger maxCount, MTL::ResourceOptions options)\n{\n    return Object::sendMessage<MTL::IndirectCommandBuffer*>(this, _MTL_PRIVATE_SEL(newIndirectCommandBufferWithDescriptor_maxCommandCount_options_), descriptor, maxCount, options);\n}\n\n_MTL_INLINE MTL::Event* MTL::Device::newEvent()\n{\n    return Object::sendMessage<MTL::Event*>(this, _MTL_PRIVATE_SEL(newEvent));\n}\n\n_MTL_INLINE MTL::SharedEvent* MTL::Device::newSharedEvent()\n{\n    return Object::sendMessage<MTL::SharedEvent*>(this, _MTL_PRIVATE_SEL(newSharedEvent));\n}\n\n_MTL_INLINE MTL::SharedEvent* MTL::Device::newSharedEvent(const MTL::SharedEventHandle* sharedEventHandle)\n{\n    return Object::sendMessage<MTL::SharedEvent*>(this, _MTL_PRIVATE_SEL(newSharedEventWithHandle_), sharedEventHandle);\n}\n\n_MTL_INLINE uint64_t MTL::Device::peerGroupID() const\n{\n    return Object::sendMessage<uint64_t>(this, _MTL_PRIVATE_SEL(peerGroupID));\n}\n\n_MTL_INLINE uint32_t MTL::Device::peerIndex() const\n{\n    return Object::sendMessage<uint32_t>(this, _MTL_PRIVATE_SEL(peerIndex));\n}\n\n_MTL_INLINE uint32_t MTL::Device::peerCount() const\n{\n    return Object::sendMessage<uint32_t>(this, _MTL_PRIVATE_SEL(peerCount));\n}\n\n_MTL_INLINE MTL::IOFileHandle* MTL::Device::newIOHandle(const NS::URL* url, NS::Error** error)\n{\n    return Object::sendMessage<MTL::IOFileHandle*>(this, _MTL_PRIVATE_SEL(newIOHandleWithURL_error_), url, error);\n}\n\n_MTL_INLINE MTL::IOCommandQueue* MTL::Device::newIOCommandQueue(const MTL::IOCommandQueueDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<MTL::IOCommandQueue*>(this, _MTL_PRIVATE_SEL(newIOCommandQueueWithDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE MTL::IOFileHandle* MTL::Device::newIOHandle(const NS::URL* url, MTL::IOCompressionMethod compressionMethod, NS::Error** error)\n{\n    return Object::sendMessage<MTL::IOFileHandle*>(this, _MTL_PRIVATE_SEL(newIOHandleWithURL_compressionMethod_error_), url, compressionMethod, error);\n}\n\n_MTL_INLINE MTL::IOFileHandle* MTL::Device::newIOFileHandle(const NS::URL* url, NS::Error** error)\n{\n    return Object::sendMessage<MTL::IOFileHandle*>(this, _MTL_PRIVATE_SEL(newIOFileHandleWithURL_error_), url, error);\n}\n\n_MTL_INLINE MTL::IOFileHandle* MTL::Device::newIOFileHandle(const NS::URL* url, MTL::IOCompressionMethod compressionMethod, NS::Error** error)\n{\n    return Object::sendMessage<MTL::IOFileHandle*>(this, _MTL_PRIVATE_SEL(newIOFileHandleWithURL_compressionMethod_error_), url, compressionMethod, error);\n}\n\n_MTL_INLINE MTL::Size MTL::Device::sparseTileSize(MTL::TextureType textureType, MTL::PixelFormat pixelFormat, NS::UInteger sampleCount)\n{\n    return Object::sendMessage<MTL::Size>(this, _MTL_PRIVATE_SEL(sparseTileSizeWithTextureType_pixelFormat_sampleCount_), textureType, pixelFormat, sampleCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::Device::sparseTileSizeInBytes() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(sparseTileSizeInBytes));\n}\n\n_MTL_INLINE void MTL::Device::convertSparsePixelRegions(const MTL::Region* pixelRegions, MTL::Region* tileRegions, MTL::Size tileSize, MTL::SparseTextureRegionAlignmentMode mode, NS::UInteger numRegions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(convertSparsePixelRegions_toTileRegions_withTileSize_alignmentMode_numRegions_), pixelRegions, tileRegions, tileSize, mode, numRegions);\n}\n\n_MTL_INLINE void MTL::Device::convertSparseTileRegions(const MTL::Region* tileRegions, MTL::Region* pixelRegions, MTL::Size tileSize, NS::UInteger numRegions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(convertSparseTileRegions_toPixelRegions_withTileSize_numRegions_), tileRegions, pixelRegions, tileSize, numRegions);\n}\n\n_MTL_INLINE NS::UInteger MTL::Device::sparseTileSizeInBytes(MTL::SparsePageSize sparsePageSize)\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(sparseTileSizeInBytesForSparsePageSize_), sparsePageSize);\n}\n\n_MTL_INLINE MTL::Size MTL::Device::sparseTileSize(MTL::TextureType textureType, MTL::PixelFormat pixelFormat, NS::UInteger sampleCount, MTL::SparsePageSize sparsePageSize)\n{\n    return Object::sendMessage<MTL::Size>(this, _MTL_PRIVATE_SEL(sparseTileSizeWithTextureType_pixelFormat_sampleCount_sparsePageSize_), textureType, pixelFormat, sampleCount, sparsePageSize);\n}\n\n_MTL_INLINE NS::UInteger MTL::Device::maxBufferLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxBufferLength));\n}\n\n_MTL_INLINE NS::Array* MTL::Device::counterSets() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(counterSets));\n}\n\n_MTL_INLINE MTL::CounterSampleBuffer* MTL::Device::newCounterSampleBuffer(const MTL::CounterSampleBufferDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<MTL::CounterSampleBuffer*>(this, _MTL_PRIVATE_SEL(newCounterSampleBufferWithDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE void MTL::Device::sampleTimestamps(MTL::Timestamp* cpuTimestamp, MTL::Timestamp* gpuTimestamp)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(sampleTimestamps_gpuTimestamp_), cpuTimestamp, gpuTimestamp);\n}\n\n_MTL_INLINE MTL::ArgumentEncoder* MTL::Device::newArgumentEncoder(const MTL::BufferBinding* bufferBinding)\n{\n    return Object::sendMessage<MTL::ArgumentEncoder*>(this, _MTL_PRIVATE_SEL(newArgumentEncoderWithBufferBinding_), bufferBinding);\n}\n\n_MTL_INLINE bool MTL::Device::supportsCounterSampling(MTL::CounterSamplingPoint samplingPoint)\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsCounterSampling_), samplingPoint);\n}\n\n_MTL_INLINE bool MTL::Device::supportsVertexAmplificationCount(NS::UInteger count)\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsVertexAmplificationCount_), count);\n}\n\n_MTL_INLINE bool MTL::Device::supportsDynamicLibraries() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsDynamicLibraries));\n}\n\n_MTL_INLINE bool MTL::Device::supportsRenderDynamicLibraries() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsRenderDynamicLibraries));\n}\n\n_MTL_INLINE MTL::DynamicLibrary* MTL::Device::newDynamicLibrary(const MTL::Library* library, NS::Error** error)\n{\n    return Object::sendMessage<MTL::DynamicLibrary*>(this, _MTL_PRIVATE_SEL(newDynamicLibrary_error_), library, error);\n}\n\n_MTL_INLINE MTL::DynamicLibrary* MTL::Device::newDynamicLibrary(const NS::URL* url, NS::Error** error)\n{\n    return Object::sendMessage<MTL::DynamicLibrary*>(this, _MTL_PRIVATE_SEL(newDynamicLibraryWithURL_error_), url, error);\n}\n\n_MTL_INLINE MTL::BinaryArchive* MTL::Device::newBinaryArchive(const MTL::BinaryArchiveDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<MTL::BinaryArchive*>(this, _MTL_PRIVATE_SEL(newBinaryArchiveWithDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE bool MTL::Device::supportsRaytracing() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsRaytracing));\n}\n\n_MTL_INLINE MTL::AccelerationStructureSizes MTL::Device::accelerationStructureSizes(const MTL::AccelerationStructureDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::AccelerationStructureSizes>(this, _MTL_PRIVATE_SEL(accelerationStructureSizesWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::AccelerationStructure* MTL::Device::newAccelerationStructure(NS::UInteger size)\n{\n    return Object::sendMessage<MTL::AccelerationStructure*>(this, _MTL_PRIVATE_SEL(newAccelerationStructureWithSize_), size);\n}\n\n_MTL_INLINE MTL::AccelerationStructure* MTL::Device::newAccelerationStructure(const MTL::AccelerationStructureDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::AccelerationStructure*>(this, _MTL_PRIVATE_SEL(newAccelerationStructureWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::SizeAndAlign MTL::Device::heapAccelerationStructureSizeAndAlign(NS::UInteger size)\n{\n    return Object::sendMessage<MTL::SizeAndAlign>(this, _MTL_PRIVATE_SEL(heapAccelerationStructureSizeAndAlignWithSize_), size);\n}\n\n_MTL_INLINE MTL::SizeAndAlign MTL::Device::heapAccelerationStructureSizeAndAlign(const MTL::AccelerationStructureDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::SizeAndAlign>(this, _MTL_PRIVATE_SEL(heapAccelerationStructureSizeAndAlignWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE bool MTL::Device::supportsFunctionPointers() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsFunctionPointers));\n}\n\n_MTL_INLINE bool MTL::Device::supportsFunctionPointersFromRender() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsFunctionPointersFromRender));\n}\n\n_MTL_INLINE bool MTL::Device::supportsRaytracingFromRender() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsRaytracingFromRender));\n}\n\n_MTL_INLINE bool MTL::Device::supportsPrimitiveMotionBlur() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportsPrimitiveMotionBlur));\n}\n\n_MTL_INLINE bool MTL::Device::shouldMaximizeConcurrentCompilation() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(shouldMaximizeConcurrentCompilation));\n}\n\n_MTL_INLINE void MTL::Device::setShouldMaximizeConcurrentCompilation(bool shouldMaximizeConcurrentCompilation)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setShouldMaximizeConcurrentCompilation_), shouldMaximizeConcurrentCompilation);\n}\n\n_MTL_INLINE NS::UInteger MTL::Device::maximumConcurrentCompilationTaskCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maximumConcurrentCompilationTaskCount));\n}\n\n_MTL_INLINE MTL::ResidencySet* MTL::Device::newResidencySet(const MTL::ResidencySetDescriptor* desc, NS::Error** error)\n{\n    return Object::sendMessage<MTL::ResidencySet*>(this, _MTL_PRIVATE_SEL(newResidencySetWithDescriptor_error_), desc, error);\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, DynamicLibraryError) {\n    DynamicLibraryErrorNone = 0,\n    DynamicLibraryErrorInvalidFile = 1,\n    DynamicLibraryErrorCompilationFailure = 2,\n    DynamicLibraryErrorUnresolvedInstallName = 3,\n    DynamicLibraryErrorDependencyLoadFailure = 4,\n    DynamicLibraryErrorUnsupported = 5,\n};\n\nclass DynamicLibrary : public NS::Referencing<DynamicLibrary>\n{\npublic:\n    NS::String*   label() const;\n    void          setLabel(const NS::String* label);\n\n    class Device* device() const;\n\n    NS::String*   installName() const;\n\n    bool          serializeToURL(const NS::URL* url, NS::Error** error);\n};\n\n}\n\n_MTL_INLINE NS::String* MTL::DynamicLibrary::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::DynamicLibrary::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::Device* MTL::DynamicLibrary::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE NS::String* MTL::DynamicLibrary::installName() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(installName));\n}\n\n_MTL_INLINE bool MTL::DynamicLibrary::serializeToURL(const NS::URL* url, NS::Error** error)\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(serializeToURL_error_), url, error);\n}\n\n#pragma once\n\nnamespace MTL\n{\nclass Event : public NS::Referencing<Event>\n{\npublic:\n    class Device* device() const;\n\n    NS::String*   label() const;\n    void          setLabel(const NS::String* label);\n};\n\nclass SharedEventListener : public NS::Referencing<SharedEventListener>\n{\npublic:\n    static class SharedEventListener* alloc();\n\n    MTL::SharedEventListener*         init();\n\n    MTL::SharedEventListener*         init(const dispatch_queue_t dispatchQueue);\n\n    dispatch_queue_t                  dispatchQueue() const;\n};\n\nusing SharedEventNotificationBlock = void (^)(class SharedEvent* pEvent, std::uint64_t value);\nusing SharedEventNotificationFunction = std::function<void(class SharedEvent* pEvent, std::uint64_t value)>;\n\nclass SharedEvent : public NS::Referencing<SharedEvent, Event>\n{\npublic:\n    void                     notifyListener(const class SharedEventListener* listener, uint64_t value, const MTL::SharedEventNotificationBlock block);\n    void                     notifyListener(const class SharedEventListener* listener, uint64_t value, const MTL::SharedEventNotificationFunction& function);\n\n    class SharedEventHandle* newSharedEventHandle();\n\n    bool                     waitUntilSignaledValue(uint64_t value, uint64_t milliseconds);\n\n    uint64_t                 signaledValue() const;\n    void                     setSignaledValue(uint64_t signaledValue);\n};\n\nclass SharedEventHandle : public NS::SecureCoding<SharedEventHandle>\n{\npublic:\n    static class SharedEventHandle* alloc();\n\n    class SharedEventHandle*        init();\n\n    NS::String*                     label() const;\n};\n\n}\n\n_MTL_INLINE MTL::Device* MTL::Event::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE NS::String* MTL::Event::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::Event::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::SharedEventListener* MTL::SharedEventListener::alloc()\n{\n    return NS::Object::alloc<MTL::SharedEventListener>(_MTL_PRIVATE_CLS(MTLSharedEventListener));\n}\n\n_MTL_INLINE MTL::SharedEventListener* MTL::SharedEventListener::init()\n{\n    return NS::Object::init<MTL::SharedEventListener>();\n}\n\n_MTL_INLINE MTL::SharedEventListener* MTL::SharedEventListener::init(const dispatch_queue_t dispatchQueue)\n{\n    return Object::sendMessage<MTL::SharedEventListener*>(this, _MTL_PRIVATE_SEL(initWithDispatchQueue_), dispatchQueue);\n}\n\n_MTL_INLINE dispatch_queue_t MTL::SharedEventListener::dispatchQueue() const\n{\n    return Object::sendMessage<dispatch_queue_t>(this, _MTL_PRIVATE_SEL(dispatchQueue));\n}\n\n_MTL_INLINE void MTL::SharedEvent::notifyListener(const MTL::SharedEventListener* listener, uint64_t value, const MTL::SharedEventNotificationBlock block)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(notifyListener_atValue_block_), listener, value, block);\n}\n\n_MTL_INLINE void MTL::SharedEvent::notifyListener(const class SharedEventListener* listener, uint64_t value, const MTL::SharedEventNotificationFunction& function)\n{\n    __block MTL::SharedEventNotificationFunction callback = function;\n    notifyListener(listener, value, ^void(class SharedEvent* pEvent, std::uint64_t value){\n        callback(pEvent, value);\n    });\n}\n\n_MTL_INLINE MTL::SharedEventHandle* MTL::SharedEvent::newSharedEventHandle()\n{\n    return Object::sendMessage<MTL::SharedEventHandle*>(this, _MTL_PRIVATE_SEL(newSharedEventHandle));\n}\n\n_MTL_INLINE bool MTL::SharedEvent::waitUntilSignaledValue(uint64_t value, uint64_t milliseconds)\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(waitUntilSignaledValue_timeoutMS_), value, milliseconds);\n}\n\n_MTL_INLINE uint64_t MTL::SharedEvent::signaledValue() const\n{\n    return Object::sendMessage<uint64_t>(this, _MTL_PRIVATE_SEL(signaledValue));\n}\n\n_MTL_INLINE void MTL::SharedEvent::setSignaledValue(uint64_t signaledValue)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSignaledValue_), signaledValue);\n}\n\n_MTL_INLINE MTL::SharedEventHandle* MTL::SharedEventHandle::alloc()\n{\n    return NS::Object::alloc<MTL::SharedEventHandle>(_MTL_PRIVATE_CLS(MTLSharedEventHandle));\n}\n\n_MTL_INLINE MTL::SharedEventHandle* MTL::SharedEventHandle::init()\n{\n    return NS::Object::init<MTL::SharedEventHandle>();\n}\n\n_MTL_INLINE NS::String* MTL::SharedEventHandle::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n#pragma once\n\nnamespace MTL\n{\nclass Fence : public NS::Referencing<Fence>\n{\npublic:\n    class Device* device() const;\n\n    NS::String*   label() const;\n    void          setLabel(const NS::String* label);\n};\n\n}\n\n_MTL_INLINE MTL::Device* MTL::Fence::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE NS::String* MTL::Fence::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::Fence::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n#pragma once\n\nnamespace MTL\n{\nclass FunctionConstantValues : public NS::Copying<FunctionConstantValues>\n{\npublic:\n    static class FunctionConstantValues* alloc();\n\n    class FunctionConstantValues*        init();\n\n    void                                 setConstantValue(const void* value, MTL::DataType type, NS::UInteger index);\n\n    void                                 setConstantValues(const void* values, MTL::DataType type, NS::Range range);\n\n    void                                 setConstantValue(const void* value, MTL::DataType type, const NS::String* name);\n\n    void                                 reset();\n};\n\n}\n\n_MTL_INLINE MTL::FunctionConstantValues* MTL::FunctionConstantValues::alloc()\n{\n    return NS::Object::alloc<MTL::FunctionConstantValues>(_MTL_PRIVATE_CLS(MTLFunctionConstantValues));\n}\n\n_MTL_INLINE MTL::FunctionConstantValues* MTL::FunctionConstantValues::init()\n{\n    return NS::Object::init<MTL::FunctionConstantValues>();\n}\n\n_MTL_INLINE void MTL::FunctionConstantValues::setConstantValue(const void* value, MTL::DataType type, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setConstantValue_type_atIndex_), value, type, index);\n}\n\n_MTL_INLINE void MTL::FunctionConstantValues::setConstantValues(const void* values, MTL::DataType type, NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setConstantValues_type_withRange_), values, type, range);\n}\n\n_MTL_INLINE void MTL::FunctionConstantValues::setConstantValue(const void* value, MTL::DataType type, const NS::String* name)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setConstantValue_type_withName_), value, type, name);\n}\n\n_MTL_INLINE void MTL::FunctionConstantValues::reset()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(reset));\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_OPTIONS(NS::UInteger, FunctionOptions) {\n    FunctionOptionNone = 0,\n    FunctionOptionCompileToBinary = 1,\n    FunctionOptionStoreFunctionInMetalScript = 2,\n    FunctionOptionStoreFunctionInMetalPipelinesScript = 2,\n    FunctionOptionFailOnBinaryArchiveMiss = 4,\n};\n\nclass FunctionDescriptor : public NS::Copying<FunctionDescriptor>\n{\npublic:\n    static class FunctionDescriptor* alloc();\n\n    class FunctionDescriptor*        init();\n\n    static class FunctionDescriptor* functionDescriptor();\n\n    NS::String*                      name() const;\n    void                             setName(const NS::String* name);\n\n    NS::String*                      specializedName() const;\n    void                             setSpecializedName(const NS::String* specializedName);\n\n    class FunctionConstantValues*    constantValues() const;\n    void                             setConstantValues(const class FunctionConstantValues* constantValues);\n\n    MTL::FunctionOptions             options() const;\n    void                             setOptions(MTL::FunctionOptions options);\n\n    NS::Array*                       binaryArchives() const;\n    void                             setBinaryArchives(const NS::Array* binaryArchives);\n};\n\nclass IntersectionFunctionDescriptor : public NS::Copying<IntersectionFunctionDescriptor, MTL::FunctionDescriptor>\n{\npublic:\n    static class IntersectionFunctionDescriptor* alloc();\n\n    class IntersectionFunctionDescriptor*        init();\n};\n\n}\n\n_MTL_INLINE MTL::FunctionDescriptor* MTL::FunctionDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::FunctionDescriptor>(_MTL_PRIVATE_CLS(MTLFunctionDescriptor));\n}\n\n_MTL_INLINE MTL::FunctionDescriptor* MTL::FunctionDescriptor::init()\n{\n    return NS::Object::init<MTL::FunctionDescriptor>();\n}\n\n_MTL_INLINE MTL::FunctionDescriptor* MTL::FunctionDescriptor::functionDescriptor()\n{\n    return Object::sendMessage<MTL::FunctionDescriptor*>(_MTL_PRIVATE_CLS(MTLFunctionDescriptor), _MTL_PRIVATE_SEL(functionDescriptor));\n}\n\n_MTL_INLINE NS::String* MTL::FunctionDescriptor::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_INLINE void MTL::FunctionDescriptor::setName(const NS::String* name)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setName_), name);\n}\n\n_MTL_INLINE NS::String* MTL::FunctionDescriptor::specializedName() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(specializedName));\n}\n\n_MTL_INLINE void MTL::FunctionDescriptor::setSpecializedName(const NS::String* specializedName)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSpecializedName_), specializedName);\n}\n\n_MTL_INLINE MTL::FunctionConstantValues* MTL::FunctionDescriptor::constantValues() const\n{\n    return Object::sendMessage<MTL::FunctionConstantValues*>(this, _MTL_PRIVATE_SEL(constantValues));\n}\n\n_MTL_INLINE void MTL::FunctionDescriptor::setConstantValues(const MTL::FunctionConstantValues* constantValues)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setConstantValues_), constantValues);\n}\n\n_MTL_INLINE MTL::FunctionOptions MTL::FunctionDescriptor::options() const\n{\n    return Object::sendMessage<MTL::FunctionOptions>(this, _MTL_PRIVATE_SEL(options));\n}\n\n_MTL_INLINE void MTL::FunctionDescriptor::setOptions(MTL::FunctionOptions options)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setOptions_), options);\n}\n\n_MTL_INLINE NS::Array* MTL::FunctionDescriptor::binaryArchives() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(binaryArchives));\n}\n\n_MTL_INLINE void MTL::FunctionDescriptor::setBinaryArchives(const NS::Array* binaryArchives)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBinaryArchives_), binaryArchives);\n}\n\n_MTL_INLINE MTL::IntersectionFunctionDescriptor* MTL::IntersectionFunctionDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::IntersectionFunctionDescriptor>(_MTL_PRIVATE_CLS(MTLIntersectionFunctionDescriptor));\n}\n\n_MTL_INLINE MTL::IntersectionFunctionDescriptor* MTL::IntersectionFunctionDescriptor::init()\n{\n    return NS::Object::init<MTL::IntersectionFunctionDescriptor>();\n}\n\n#pragma once\n\n#pragma once\n\n#include <functional>\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, PatchType) {\n    PatchTypeNone = 0,\n    PatchTypeTriangle = 1,\n    PatchTypeQuad = 2,\n};\n\nclass VertexAttribute : public NS::Referencing<VertexAttribute>\n{\npublic:\n    static class VertexAttribute* alloc();\n\n    class VertexAttribute*        init();\n\n    NS::String*                   name() const;\n\n    NS::UInteger                  attributeIndex() const;\n\n    MTL::DataType                 attributeType() const;\n\n    bool                          active() const;\n\n    bool                          patchData() const;\n\n    bool                          patchControlPointData() const;\n};\n\nclass Attribute : public NS::Referencing<Attribute>\n{\npublic:\n    static class Attribute* alloc();\n\n    class Attribute*        init();\n\n    NS::String*             name() const;\n\n    NS::UInteger            attributeIndex() const;\n\n    MTL::DataType           attributeType() const;\n\n    bool                    active() const;\n\n    bool                    patchData() const;\n\n    bool                    patchControlPointData() const;\n};\n\n_MTL_ENUM(NS::UInteger, FunctionType) {\n    FunctionTypeVertex = 1,\n    FunctionTypeFragment = 2,\n    FunctionTypeKernel = 3,\n    FunctionTypeVisible = 5,\n    FunctionTypeIntersection = 6,\n    FunctionTypeMesh = 7,\n    FunctionTypeObject = 8,\n};\n\nclass FunctionConstant : public NS::Referencing<FunctionConstant>\n{\npublic:\n    static class FunctionConstant* alloc();\n\n    class FunctionConstant*        init();\n\n    NS::String*                    name() const;\n\n    MTL::DataType                  type() const;\n\n    NS::UInteger                   index() const;\n\n    bool                           required() const;\n};\n\nusing AutoreleasedArgument = class Argument*;\n\nclass Function : public NS::Referencing<Function>\n{\npublic:\n    NS::String*            label() const;\n    void                   setLabel(const NS::String* label);\n\n    class Device*          device() const;\n\n    MTL::FunctionType      functionType() const;\n\n    MTL::PatchType         patchType() const;\n\n    NS::Integer            patchControlPointCount() const;\n\n    NS::Array*             vertexAttributes() const;\n\n    NS::Array*             stageInputAttributes() const;\n\n    NS::String*            name() const;\n\n    NS::Dictionary*        functionConstantsDictionary() const;\n\n    class ArgumentEncoder* newArgumentEncoder(NS::UInteger bufferIndex);\n\n    class ArgumentEncoder* newArgumentEncoder(NS::UInteger bufferIndex, const MTL::AutoreleasedArgument* reflection);\n\n    MTL::FunctionOptions   options() const;\n};\n\n_MTL_ENUM(NS::UInteger, LanguageVersion) {\n    LanguageVersion1_0 = 65536,\n    LanguageVersion1_1 = 65537,\n    LanguageVersion1_2 = 65538,\n    LanguageVersion2_0 = 131072,\n    LanguageVersion2_1 = 131073,\n    LanguageVersion2_2 = 131074,\n    LanguageVersion2_3 = 131075,\n    LanguageVersion2_4 = 131076,\n    LanguageVersion3_0 = 196608,\n    LanguageVersion3_1 = 196609,\n    LanguageVersion3_2 = 196610,\n};\n\n_MTL_ENUM(NS::Integer, LibraryType) {\n    LibraryTypeExecutable = 0,\n    LibraryTypeDynamic = 1,\n};\n\n_MTL_ENUM(NS::Integer, LibraryOptimizationLevel) {\n    LibraryOptimizationLevelDefault = 0,\n    LibraryOptimizationLevelSize = 1,\n};\n\n_MTL_ENUM(NS::Integer, CompileSymbolVisibility) {\n    CompileSymbolVisibilityDefault = 0,\n    CompileSymbolVisibilityHidden = 1,\n};\n\n_MTL_ENUM(NS::Integer, MathMode) {\n    MathModeSafe = 0,\n    MathModeRelaxed = 1,\n    MathModeFast = 2,\n};\n\n_MTL_ENUM(NS::Integer, MathFloatingPointFunctions) {\n    MathFloatingPointFunctionsFast = 0,\n    MathFloatingPointFunctionsPrecise = 1,\n};\n\nclass CompileOptions : public NS::Copying<CompileOptions>\n{\npublic:\n    static class CompileOptions*    alloc();\n\n    class CompileOptions*           init();\n\n    NS::Dictionary*                 preprocessorMacros() const;\n    void                            setPreprocessorMacros(const NS::Dictionary* preprocessorMacros);\n\n    bool                            fastMathEnabled() const;\n    void                            setFastMathEnabled(bool fastMathEnabled);\n\n    MTL::MathMode                   mathMode() const;\n    void                            setMathMode(MTL::MathMode mathMode);\n\n    MTL::MathFloatingPointFunctions mathFloatingPointFunctions() const;\n    void                            setMathFloatingPointFunctions(MTL::MathFloatingPointFunctions mathFloatingPointFunctions);\n\n    MTL::LanguageVersion            languageVersion() const;\n    void                            setLanguageVersion(MTL::LanguageVersion languageVersion);\n\n    MTL::LibraryType                libraryType() const;\n    void                            setLibraryType(MTL::LibraryType libraryType);\n\n    NS::String*                     installName() const;\n    void                            setInstallName(const NS::String* installName);\n\n    NS::Array*                      libraries() const;\n    void                            setLibraries(const NS::Array* libraries);\n\n    bool                            preserveInvariance() const;\n    void                            setPreserveInvariance(bool preserveInvariance);\n\n    MTL::LibraryOptimizationLevel   optimizationLevel() const;\n    void                            setOptimizationLevel(MTL::LibraryOptimizationLevel optimizationLevel);\n\n    MTL::CompileSymbolVisibility    compileSymbolVisibility() const;\n    void                            setCompileSymbolVisibility(MTL::CompileSymbolVisibility compileSymbolVisibility);\n\n    bool                            allowReferencingUndefinedSymbols() const;\n    void                            setAllowReferencingUndefinedSymbols(bool allowReferencingUndefinedSymbols);\n\n    NS::UInteger                    maxTotalThreadsPerThreadgroup() const;\n    void                            setMaxTotalThreadsPerThreadgroup(NS::UInteger maxTotalThreadsPerThreadgroup);\n\n    bool                            enableLogging() const;\n    void                            setEnableLogging(bool enableLogging);\n};\n\n_MTL_ENUM(NS::UInteger, LibraryError) {\n    LibraryErrorUnsupported = 1,\n    LibraryErrorInternal = 2,\n    LibraryErrorCompileFailure = 3,\n    LibraryErrorCompileWarning = 4,\n    LibraryErrorFunctionNotFound = 5,\n    LibraryErrorFileNotFound = 6,\n};\n\nclass Library : public NS::Referencing<Library>\n{\npublic:\n    void             newFunction(const NS::String* pFunctionName, const class FunctionConstantValues* pConstantValues, const std::function<void(Function* pFunction, NS::Error* pError)>& completionHandler);\n\n    void             newFunction(const class FunctionDescriptor* pDescriptor, const std::function<void(Function* pFunction, NS::Error* pError)>& completionHandler);\n\n    void             newIntersectionFunction(const class IntersectionFunctionDescriptor* pDescriptor, const std::function<void(Function* pFunction, NS::Error* pError)>& completionHandler);\n\n    NS::String*      label() const;\n    void             setLabel(const NS::String* label);\n\n    class Device*    device() const;\n\n    class Function*  newFunction(const NS::String* functionName);\n\n    class Function*  newFunction(const NS::String* name, const class FunctionConstantValues* constantValues, NS::Error** error);\n\n    void             newFunction(const NS::String* name, const class FunctionConstantValues* constantValues, void (^completionHandler)(MTL::Function*, NS::Error*));\n\n    void             newFunction(const class FunctionDescriptor* descriptor, void (^completionHandler)(MTL::Function*, NS::Error*));\n\n    class Function*  newFunction(const class FunctionDescriptor* descriptor, NS::Error** error);\n\n    void             newIntersectionFunction(const class IntersectionFunctionDescriptor* descriptor, void (^completionHandler)(MTL::Function*, NS::Error*));\n\n    class Function*  newIntersectionFunction(const class IntersectionFunctionDescriptor* descriptor, NS::Error** error);\n\n    NS::Array*       functionNames() const;\n\n    MTL::LibraryType type() const;\n\n    NS::String*      installName() const;\n};\n\n}\n\n_MTL_INLINE MTL::VertexAttribute* MTL::VertexAttribute::alloc()\n{\n    return NS::Object::alloc<MTL::VertexAttribute>(_MTL_PRIVATE_CLS(MTLVertexAttribute));\n}\n\n_MTL_INLINE MTL::VertexAttribute* MTL::VertexAttribute::init()\n{\n    return NS::Object::init<MTL::VertexAttribute>();\n}\n\n_MTL_INLINE NS::String* MTL::VertexAttribute::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_INLINE NS::UInteger MTL::VertexAttribute::attributeIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(attributeIndex));\n}\n\n_MTL_INLINE MTL::DataType MTL::VertexAttribute::attributeType() const\n{\n    return Object::sendMessage<MTL::DataType>(this, _MTL_PRIVATE_SEL(attributeType));\n}\n\n_MTL_INLINE bool MTL::VertexAttribute::active() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isActive));\n}\n\n_MTL_INLINE bool MTL::VertexAttribute::patchData() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isPatchData));\n}\n\n_MTL_INLINE bool MTL::VertexAttribute::patchControlPointData() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isPatchControlPointData));\n}\n\n_MTL_INLINE MTL::Attribute* MTL::Attribute::alloc()\n{\n    return NS::Object::alloc<MTL::Attribute>(_MTL_PRIVATE_CLS(MTLAttribute));\n}\n\n_MTL_INLINE MTL::Attribute* MTL::Attribute::init()\n{\n    return NS::Object::init<MTL::Attribute>();\n}\n\n_MTL_INLINE NS::String* MTL::Attribute::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_INLINE NS::UInteger MTL::Attribute::attributeIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(attributeIndex));\n}\n\n_MTL_INLINE MTL::DataType MTL::Attribute::attributeType() const\n{\n    return Object::sendMessage<MTL::DataType>(this, _MTL_PRIVATE_SEL(attributeType));\n}\n\n_MTL_INLINE bool MTL::Attribute::active() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isActive));\n}\n\n_MTL_INLINE bool MTL::Attribute::patchData() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isPatchData));\n}\n\n_MTL_INLINE bool MTL::Attribute::patchControlPointData() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isPatchControlPointData));\n}\n\n_MTL_INLINE MTL::FunctionConstant* MTL::FunctionConstant::alloc()\n{\n    return NS::Object::alloc<MTL::FunctionConstant>(_MTL_PRIVATE_CLS(MTLFunctionConstant));\n}\n\n_MTL_INLINE MTL::FunctionConstant* MTL::FunctionConstant::init()\n{\n    return NS::Object::init<MTL::FunctionConstant>();\n}\n\n_MTL_INLINE NS::String* MTL::FunctionConstant::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_INLINE MTL::DataType MTL::FunctionConstant::type() const\n{\n    return Object::sendMessage<MTL::DataType>(this, _MTL_PRIVATE_SEL(type));\n}\n\n_MTL_INLINE NS::UInteger MTL::FunctionConstant::index() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(index));\n}\n\n_MTL_INLINE bool MTL::FunctionConstant::required() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(required));\n}\n\n_MTL_INLINE NS::String* MTL::Function::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::Function::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::Device* MTL::Function::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE MTL::FunctionType MTL::Function::functionType() const\n{\n    return Object::sendMessage<MTL::FunctionType>(this, _MTL_PRIVATE_SEL(functionType));\n}\n\n_MTL_INLINE MTL::PatchType MTL::Function::patchType() const\n{\n    return Object::sendMessage<MTL::PatchType>(this, _MTL_PRIVATE_SEL(patchType));\n}\n\n_MTL_INLINE NS::Integer MTL::Function::patchControlPointCount() const\n{\n    return Object::sendMessage<NS::Integer>(this, _MTL_PRIVATE_SEL(patchControlPointCount));\n}\n\n_MTL_INLINE NS::Array* MTL::Function::vertexAttributes() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(vertexAttributes));\n}\n\n_MTL_INLINE NS::Array* MTL::Function::stageInputAttributes() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(stageInputAttributes));\n}\n\n_MTL_INLINE NS::String* MTL::Function::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_INLINE NS::Dictionary* MTL::Function::functionConstantsDictionary() const\n{\n    return Object::sendMessage<NS::Dictionary*>(this, _MTL_PRIVATE_SEL(functionConstantsDictionary));\n}\n\n_MTL_INLINE MTL::ArgumentEncoder* MTL::Function::newArgumentEncoder(NS::UInteger bufferIndex)\n{\n    return Object::sendMessage<MTL::ArgumentEncoder*>(this, _MTL_PRIVATE_SEL(newArgumentEncoderWithBufferIndex_), bufferIndex);\n}\n\n_MTL_INLINE MTL::ArgumentEncoder* MTL::Function::newArgumentEncoder(NS::UInteger bufferIndex, const MTL::AutoreleasedArgument* reflection)\n{\n    return Object::sendMessage<MTL::ArgumentEncoder*>(this, _MTL_PRIVATE_SEL(newArgumentEncoderWithBufferIndex_reflection_), bufferIndex, reflection);\n}\n\n_MTL_INLINE MTL::FunctionOptions MTL::Function::options() const\n{\n    return Object::sendMessage<MTL::FunctionOptions>(this, _MTL_PRIVATE_SEL(options));\n}\n\n_MTL_INLINE MTL::CompileOptions* MTL::CompileOptions::alloc()\n{\n    return NS::Object::alloc<MTL::CompileOptions>(_MTL_PRIVATE_CLS(MTLCompileOptions));\n}\n\n_MTL_INLINE MTL::CompileOptions* MTL::CompileOptions::init()\n{\n    return NS::Object::init<MTL::CompileOptions>();\n}\n\n_MTL_INLINE NS::Dictionary* MTL::CompileOptions::preprocessorMacros() const\n{\n    return Object::sendMessage<NS::Dictionary*>(this, _MTL_PRIVATE_SEL(preprocessorMacros));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setPreprocessorMacros(const NS::Dictionary* preprocessorMacros)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPreprocessorMacros_), preprocessorMacros);\n}\n\n_MTL_INLINE bool MTL::CompileOptions::fastMathEnabled() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(fastMathEnabled));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setFastMathEnabled(bool fastMathEnabled)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFastMathEnabled_), fastMathEnabled);\n}\n\n_MTL_INLINE MTL::MathMode MTL::CompileOptions::mathMode() const\n{\n    return Object::sendMessage<MTL::MathMode>(this, _MTL_PRIVATE_SEL(mathMode));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setMathMode(MTL::MathMode mathMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMathMode_), mathMode);\n}\n\n_MTL_INLINE MTL::MathFloatingPointFunctions MTL::CompileOptions::mathFloatingPointFunctions() const\n{\n    return Object::sendMessage<MTL::MathFloatingPointFunctions>(this, _MTL_PRIVATE_SEL(mathFloatingPointFunctions));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setMathFloatingPointFunctions(MTL::MathFloatingPointFunctions mathFloatingPointFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMathFloatingPointFunctions_), mathFloatingPointFunctions);\n}\n\n_MTL_INLINE MTL::LanguageVersion MTL::CompileOptions::languageVersion() const\n{\n    return Object::sendMessage<MTL::LanguageVersion>(this, _MTL_PRIVATE_SEL(languageVersion));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setLanguageVersion(MTL::LanguageVersion languageVersion)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLanguageVersion_), languageVersion);\n}\n\n_MTL_INLINE MTL::LibraryType MTL::CompileOptions::libraryType() const\n{\n    return Object::sendMessage<MTL::LibraryType>(this, _MTL_PRIVATE_SEL(libraryType));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setLibraryType(MTL::LibraryType libraryType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLibraryType_), libraryType);\n}\n\n_MTL_INLINE NS::String* MTL::CompileOptions::installName() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(installName));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setInstallName(const NS::String* installName)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInstallName_), installName);\n}\n\n_MTL_INLINE NS::Array* MTL::CompileOptions::libraries() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(libraries));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setLibraries(const NS::Array* libraries)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLibraries_), libraries);\n}\n\n_MTL_INLINE bool MTL::CompileOptions::preserveInvariance() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(preserveInvariance));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setPreserveInvariance(bool preserveInvariance)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPreserveInvariance_), preserveInvariance);\n}\n\n_MTL_INLINE MTL::LibraryOptimizationLevel MTL::CompileOptions::optimizationLevel() const\n{\n    return Object::sendMessage<MTL::LibraryOptimizationLevel>(this, _MTL_PRIVATE_SEL(optimizationLevel));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setOptimizationLevel(MTL::LibraryOptimizationLevel optimizationLevel)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setOptimizationLevel_), optimizationLevel);\n}\n\n_MTL_INLINE MTL::CompileSymbolVisibility MTL::CompileOptions::compileSymbolVisibility() const\n{\n    return Object::sendMessage<MTL::CompileSymbolVisibility>(this, _MTL_PRIVATE_SEL(compileSymbolVisibility));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setCompileSymbolVisibility(MTL::CompileSymbolVisibility compileSymbolVisibility)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCompileSymbolVisibility_), compileSymbolVisibility);\n}\n\n_MTL_INLINE bool MTL::CompileOptions::allowReferencingUndefinedSymbols() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(allowReferencingUndefinedSymbols));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setAllowReferencingUndefinedSymbols(bool allowReferencingUndefinedSymbols)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setAllowReferencingUndefinedSymbols_), allowReferencingUndefinedSymbols);\n}\n\n_MTL_INLINE NS::UInteger MTL::CompileOptions::maxTotalThreadsPerThreadgroup() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerThreadgroup));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setMaxTotalThreadsPerThreadgroup(NS::UInteger maxTotalThreadsPerThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxTotalThreadsPerThreadgroup_), maxTotalThreadsPerThreadgroup);\n}\n\n_MTL_INLINE bool MTL::CompileOptions::enableLogging() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(enableLogging));\n}\n\n_MTL_INLINE void MTL::CompileOptions::setEnableLogging(bool enableLogging)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setEnableLogging_), enableLogging);\n}\n\n_MTL_INLINE void MTL::Library::newFunction(const NS::String* pFunctionName, const FunctionConstantValues* pConstantValues, const std::function<void(Function* pFunction, NS::Error* pError)>& completionHandler)\n{\n    __block std::function<void(Function * pFunction, NS::Error * pError)> blockCompletionHandler = completionHandler;\n\n    newFunction(pFunctionName, pConstantValues, ^(Function* pFunction, NS::Error* pError) { blockCompletionHandler(pFunction, pError); });\n}\n\n_MTL_INLINE void MTL::Library::newFunction(const FunctionDescriptor* pDescriptor, const std::function<void(Function* pFunction, NS::Error* pError)>& completionHandler)\n{\n    __block std::function<void(Function * pFunction, NS::Error * pError)> blockCompletionHandler = completionHandler;\n\n    newFunction(pDescriptor, ^(Function* pFunction, NS::Error* pError) { blockCompletionHandler(pFunction, pError); });\n}\n\n_MTL_INLINE void MTL::Library::newIntersectionFunction(const IntersectionFunctionDescriptor* pDescriptor, const std::function<void(Function* pFunction, NS::Error* pError)>& completionHandler)\n{\n    __block std::function<void(Function * pFunction, NS::Error * pError)> blockCompletionHandler = completionHandler;\n\n    newIntersectionFunction(pDescriptor, ^(Function* pFunction, NS::Error* pError) { blockCompletionHandler(pFunction, pError); });\n}\n\n_MTL_INLINE NS::String* MTL::Library::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::Library::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::Device* MTL::Library::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE MTL::Function* MTL::Library::newFunction(const NS::String* functionName)\n{\n    return Object::sendMessage<MTL::Function*>(this, _MTL_PRIVATE_SEL(newFunctionWithName_), functionName);\n}\n\n_MTL_INLINE MTL::Function* MTL::Library::newFunction(const NS::String* name, const MTL::FunctionConstantValues* constantValues, NS::Error** error)\n{\n    return Object::sendMessage<MTL::Function*>(this, _MTL_PRIVATE_SEL(newFunctionWithName_constantValues_error_), name, constantValues, error);\n}\n\n_MTL_INLINE void MTL::Library::newFunction(const NS::String* name, const MTL::FunctionConstantValues* constantValues, void (^completionHandler)(MTL::Function*, NS::Error*))\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(newFunctionWithName_constantValues_completionHandler_), name, constantValues, completionHandler);\n}\n\n_MTL_INLINE void MTL::Library::newFunction(const MTL::FunctionDescriptor* descriptor, void (^completionHandler)(MTL::Function*, NS::Error*))\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(newFunctionWithDescriptor_completionHandler_), descriptor, completionHandler);\n}\n\n_MTL_INLINE MTL::Function* MTL::Library::newFunction(const MTL::FunctionDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<MTL::Function*>(this, _MTL_PRIVATE_SEL(newFunctionWithDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE void MTL::Library::newIntersectionFunction(const MTL::IntersectionFunctionDescriptor* descriptor, void (^completionHandler)(MTL::Function*, NS::Error*))\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(newIntersectionFunctionWithDescriptor_completionHandler_), descriptor, completionHandler);\n}\n\n_MTL_INLINE MTL::Function* MTL::Library::newIntersectionFunction(const MTL::IntersectionFunctionDescriptor* descriptor, NS::Error** error)\n{\n    return Object::sendMessage<MTL::Function*>(this, _MTL_PRIVATE_SEL(newIntersectionFunctionWithDescriptor_error_), descriptor, error);\n}\n\n_MTL_INLINE NS::Array* MTL::Library::functionNames() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(functionNames));\n}\n\n_MTL_INLINE MTL::LibraryType MTL::Library::type() const\n{\n    return Object::sendMessage<MTL::LibraryType>(this, _MTL_PRIVATE_SEL(type));\n}\n\n_MTL_INLINE NS::String* MTL::Library::installName() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(installName));\n}\n\nnamespace MTL\n{\nclass FunctionHandle : public NS::Referencing<FunctionHandle>\n{\npublic:\n    MTL::FunctionType functionType() const;\n\n    NS::String*       name() const;\n\n    class Device*     device() const;\n};\n\n}\n\n_MTL_INLINE MTL::FunctionType MTL::FunctionHandle::functionType() const\n{\n    return Object::sendMessage<MTL::FunctionType>(this, _MTL_PRIVATE_SEL(functionType));\n}\n\n_MTL_INLINE NS::String* MTL::FunctionHandle::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_INLINE MTL::Device* MTL::FunctionHandle::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, FunctionLogType) {\n    FunctionLogTypeValidation = 0,\n};\n\nclass LogContainer : public NS::Referencing<LogContainer, NS::FastEnumeration>\n{\npublic:\n};\n\nclass FunctionLogDebugLocation : public NS::Referencing<FunctionLogDebugLocation>\n{\npublic:\n    NS::String*  functionName() const;\n\n    NS::URL*     URL() const;\n\n    NS::UInteger line() const;\n\n    NS::UInteger column() const;\n};\n\nclass FunctionLog : public NS::Referencing<FunctionLog>\n{\npublic:\n    MTL::FunctionLogType            type() const;\n\n    NS::String*                     encoderLabel() const;\n\n    class Function*                 function() const;\n\n    class FunctionLogDebugLocation* debugLocation() const;\n};\n\n}\n\n_MTL_INLINE NS::String* MTL::FunctionLogDebugLocation::functionName() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(functionName));\n}\n\n_MTL_INLINE NS::URL* MTL::FunctionLogDebugLocation::URL() const\n{\n    return Object::sendMessage<NS::URL*>(this, _MTL_PRIVATE_SEL(URL));\n}\n\n_MTL_INLINE NS::UInteger MTL::FunctionLogDebugLocation::line() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(line));\n}\n\n_MTL_INLINE NS::UInteger MTL::FunctionLogDebugLocation::column() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(column));\n}\n\n_MTL_INLINE MTL::FunctionLogType MTL::FunctionLog::type() const\n{\n    return Object::sendMessage<MTL::FunctionLogType>(this, _MTL_PRIVATE_SEL(type));\n}\n\n_MTL_INLINE NS::String* MTL::FunctionLog::encoderLabel() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(encoderLabel));\n}\n\n_MTL_INLINE MTL::Function* MTL::FunctionLog::function() const\n{\n    return Object::sendMessage<MTL::Function*>(this, _MTL_PRIVATE_SEL(function));\n}\n\n_MTL_INLINE MTL::FunctionLogDebugLocation* MTL::FunctionLog::debugLocation() const\n{\n    return Object::sendMessage<MTL::FunctionLogDebugLocation*>(this, _MTL_PRIVATE_SEL(debugLocation));\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_OPTIONS(NS::UInteger, StitchedLibraryOptions) {\n    StitchedLibraryOptionNone = 0,\n    StitchedLibraryOptionFailOnBinaryArchiveMiss = 1,\n    StitchedLibraryOptionStoreLibraryInMetalScript = 2,\n    StitchedLibraryOptionStoreLibraryInMetalPipelinesScript = 2\n};\n\nclass FunctionStitchingAttribute : public NS::Referencing<FunctionStitchingAttribute>\n{\npublic:\n};\n\nclass FunctionStitchingAttributeAlwaysInline : public NS::Referencing<FunctionStitchingAttributeAlwaysInline, FunctionStitchingAttribute>\n{\npublic:\n    static class FunctionStitchingAttributeAlwaysInline* alloc();\n\n    class FunctionStitchingAttributeAlwaysInline*        init();\n};\n\nclass FunctionStitchingNode : public NS::Copying<FunctionStitchingNode>\n{\npublic:\n};\n\nclass FunctionStitchingInputNode : public NS::Referencing<FunctionStitchingInputNode, FunctionStitchingNode>\n{\npublic:\n    static class FunctionStitchingInputNode* alloc();\n\n    class FunctionStitchingInputNode*        init();\n\n    NS::UInteger                             argumentIndex() const;\n    void                                     setArgumentIndex(NS::UInteger argumentIndex);\n\n    MTL::FunctionStitchingInputNode*         init(NS::UInteger argument);\n};\n\nclass FunctionStitchingFunctionNode : public NS::Referencing<FunctionStitchingFunctionNode, FunctionStitchingNode>\n{\npublic:\n    static class FunctionStitchingFunctionNode* alloc();\n\n    class FunctionStitchingFunctionNode*        init();\n\n    NS::String*                                 name() const;\n    void                                        setName(const NS::String* name);\n\n    NS::Array*                                  arguments() const;\n    void                                        setArguments(const NS::Array* arguments);\n\n    NS::Array*                                  controlDependencies() const;\n    void                                        setControlDependencies(const NS::Array* controlDependencies);\n\n    MTL::FunctionStitchingFunctionNode*         init(const NS::String* name, const NS::Array* arguments, const NS::Array* controlDependencies);\n};\n\nclass FunctionStitchingGraph : public NS::Copying<FunctionStitchingGraph>\n{\npublic:\n    static class FunctionStitchingGraph* alloc();\n\n    class FunctionStitchingGraph*        init();\n\n    NS::String*                          functionName() const;\n    void                                 setFunctionName(const NS::String* functionName);\n\n    NS::Array*                           nodes() const;\n    void                                 setNodes(const NS::Array* nodes);\n\n    class FunctionStitchingFunctionNode* outputNode() const;\n    void                                 setOutputNode(const class FunctionStitchingFunctionNode* outputNode);\n\n    NS::Array*                           attributes() const;\n    void                                 setAttributes(const NS::Array* attributes);\n\n    MTL::FunctionStitchingGraph*         init(const NS::String* functionName, const NS::Array* nodes, const class FunctionStitchingFunctionNode* outputNode, const NS::Array* attributes);\n};\n\nclass StitchedLibraryDescriptor : public NS::Copying<StitchedLibraryDescriptor>\n{\npublic:\n    static class StitchedLibraryDescriptor* alloc();\n\n    class StitchedLibraryDescriptor*        init();\n\n    NS::Array*                              functionGraphs() const;\n    void                                    setFunctionGraphs(const NS::Array* functionGraphs);\n\n    NS::Array*                              functions() const;\n    void                                    setFunctions(const NS::Array* functions);\n\n    NS::Array*                              binaryArchives() const;\n    void                                    setBinaryArchives(const NS::Array* binaryArchives);\n\n    MTL::StitchedLibraryOptions             options() const;\n    void                                    setOptions(MTL::StitchedLibraryOptions options);\n};\n\n}\n\n_MTL_INLINE MTL::FunctionStitchingAttributeAlwaysInline* MTL::FunctionStitchingAttributeAlwaysInline::alloc()\n{\n    return NS::Object::alloc<MTL::FunctionStitchingAttributeAlwaysInline>(_MTL_PRIVATE_CLS(MTLFunctionStitchingAttributeAlwaysInline));\n}\n\n_MTL_INLINE MTL::FunctionStitchingAttributeAlwaysInline* MTL::FunctionStitchingAttributeAlwaysInline::init()\n{\n    return NS::Object::init<MTL::FunctionStitchingAttributeAlwaysInline>();\n}\n\n_MTL_INLINE MTL::FunctionStitchingInputNode* MTL::FunctionStitchingInputNode::alloc()\n{\n    return NS::Object::alloc<MTL::FunctionStitchingInputNode>(_MTL_PRIVATE_CLS(MTLFunctionStitchingInputNode));\n}\n\n_MTL_INLINE MTL::FunctionStitchingInputNode* MTL::FunctionStitchingInputNode::init()\n{\n    return NS::Object::init<MTL::FunctionStitchingInputNode>();\n}\n\n_MTL_INLINE NS::UInteger MTL::FunctionStitchingInputNode::argumentIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(argumentIndex));\n}\n\n_MTL_INLINE void MTL::FunctionStitchingInputNode::setArgumentIndex(NS::UInteger argumentIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setArgumentIndex_), argumentIndex);\n}\n\n_MTL_INLINE MTL::FunctionStitchingInputNode* MTL::FunctionStitchingInputNode::init(NS::UInteger argument)\n{\n    return Object::sendMessage<MTL::FunctionStitchingInputNode*>(this, _MTL_PRIVATE_SEL(initWithArgumentIndex_), argument);\n}\n\n_MTL_INLINE MTL::FunctionStitchingFunctionNode* MTL::FunctionStitchingFunctionNode::alloc()\n{\n    return NS::Object::alloc<MTL::FunctionStitchingFunctionNode>(_MTL_PRIVATE_CLS(MTLFunctionStitchingFunctionNode));\n}\n\n_MTL_INLINE MTL::FunctionStitchingFunctionNode* MTL::FunctionStitchingFunctionNode::init()\n{\n    return NS::Object::init<MTL::FunctionStitchingFunctionNode>();\n}\n\n_MTL_INLINE NS::String* MTL::FunctionStitchingFunctionNode::name() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(name));\n}\n\n_MTL_INLINE void MTL::FunctionStitchingFunctionNode::setName(const NS::String* name)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setName_), name);\n}\n\n_MTL_INLINE NS::Array* MTL::FunctionStitchingFunctionNode::arguments() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(arguments));\n}\n\n_MTL_INLINE void MTL::FunctionStitchingFunctionNode::setArguments(const NS::Array* arguments)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setArguments_), arguments);\n}\n\n_MTL_INLINE NS::Array* MTL::FunctionStitchingFunctionNode::controlDependencies() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(controlDependencies));\n}\n\n_MTL_INLINE void MTL::FunctionStitchingFunctionNode::setControlDependencies(const NS::Array* controlDependencies)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setControlDependencies_), controlDependencies);\n}\n\n_MTL_INLINE MTL::FunctionStitchingFunctionNode* MTL::FunctionStitchingFunctionNode::init(const NS::String* name, const NS::Array* arguments, const NS::Array* controlDependencies)\n{\n    return Object::sendMessage<MTL::FunctionStitchingFunctionNode*>(this, _MTL_PRIVATE_SEL(initWithName_arguments_controlDependencies_), name, arguments, controlDependencies);\n}\n\n_MTL_INLINE MTL::FunctionStitchingGraph* MTL::FunctionStitchingGraph::alloc()\n{\n    return NS::Object::alloc<MTL::FunctionStitchingGraph>(_MTL_PRIVATE_CLS(MTLFunctionStitchingGraph));\n}\n\n_MTL_INLINE MTL::FunctionStitchingGraph* MTL::FunctionStitchingGraph::init()\n{\n    return NS::Object::init<MTL::FunctionStitchingGraph>();\n}\n\n_MTL_INLINE NS::String* MTL::FunctionStitchingGraph::functionName() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(functionName));\n}\n\n_MTL_INLINE void MTL::FunctionStitchingGraph::setFunctionName(const NS::String* functionName)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFunctionName_), functionName);\n}\n\n_MTL_INLINE NS::Array* MTL::FunctionStitchingGraph::nodes() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(nodes));\n}\n\n_MTL_INLINE void MTL::FunctionStitchingGraph::setNodes(const NS::Array* nodes)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setNodes_), nodes);\n}\n\n_MTL_INLINE MTL::FunctionStitchingFunctionNode* MTL::FunctionStitchingGraph::outputNode() const\n{\n    return Object::sendMessage<MTL::FunctionStitchingFunctionNode*>(this, _MTL_PRIVATE_SEL(outputNode));\n}\n\n_MTL_INLINE void MTL::FunctionStitchingGraph::setOutputNode(const MTL::FunctionStitchingFunctionNode* outputNode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setOutputNode_), outputNode);\n}\n\n_MTL_INLINE NS::Array* MTL::FunctionStitchingGraph::attributes() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(attributes));\n}\n\n_MTL_INLINE void MTL::FunctionStitchingGraph::setAttributes(const NS::Array* attributes)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setAttributes_), attributes);\n}\n\n_MTL_INLINE MTL::FunctionStitchingGraph* MTL::FunctionStitchingGraph::init(const NS::String* functionName, const NS::Array* nodes, const MTL::FunctionStitchingFunctionNode* outputNode, const NS::Array* attributes)\n{\n    return Object::sendMessage<MTL::FunctionStitchingGraph*>(this, _MTL_PRIVATE_SEL(initWithFunctionName_nodes_outputNode_attributes_), functionName, nodes, outputNode, attributes);\n}\n\n_MTL_INLINE MTL::StitchedLibraryDescriptor* MTL::StitchedLibraryDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::StitchedLibraryDescriptor>(_MTL_PRIVATE_CLS(MTLStitchedLibraryDescriptor));\n}\n\n_MTL_INLINE MTL::StitchedLibraryDescriptor* MTL::StitchedLibraryDescriptor::init()\n{\n    return NS::Object::init<MTL::StitchedLibraryDescriptor>();\n}\n\n_MTL_INLINE NS::Array* MTL::StitchedLibraryDescriptor::functionGraphs() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(functionGraphs));\n}\n\n_MTL_INLINE void MTL::StitchedLibraryDescriptor::setFunctionGraphs(const NS::Array* functionGraphs)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFunctionGraphs_), functionGraphs);\n}\n\n_MTL_INLINE NS::Array* MTL::StitchedLibraryDescriptor::functions() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(functions));\n}\n\n_MTL_INLINE void MTL::StitchedLibraryDescriptor::setFunctions(const NS::Array* functions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFunctions_), functions);\n}\n\n_MTL_INLINE NS::Array* MTL::StitchedLibraryDescriptor::binaryArchives() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(binaryArchives));\n}\n\n_MTL_INLINE void MTL::StitchedLibraryDescriptor::setBinaryArchives(const NS::Array* binaryArchives)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBinaryArchives_), binaryArchives);\n}\n\n_MTL_INLINE MTL::StitchedLibraryOptions MTL::StitchedLibraryDescriptor::options() const\n{\n    return Object::sendMessage<MTL::StitchedLibraryOptions>(this, _MTL_PRIVATE_SEL(options));\n}\n\n_MTL_INLINE void MTL::StitchedLibraryDescriptor::setOptions(MTL::StitchedLibraryOptions options)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setOptions_), options);\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::Integer, HeapType) {\n    HeapTypeAutomatic = 0,\n    HeapTypePlacement = 1,\n    HeapTypeSparse = 2,\n};\n\nclass HeapDescriptor : public NS::Copying<HeapDescriptor>\n{\npublic:\n    static class HeapDescriptor* alloc();\n\n    class HeapDescriptor*        init();\n\n    NS::UInteger                 size() const;\n    void                         setSize(NS::UInteger size);\n\n    MTL::StorageMode             storageMode() const;\n    void                         setStorageMode(MTL::StorageMode storageMode);\n\n    MTL::CPUCacheMode            cpuCacheMode() const;\n    void                         setCpuCacheMode(MTL::CPUCacheMode cpuCacheMode);\n\n    MTL::SparsePageSize          sparsePageSize() const;\n    void                         setSparsePageSize(MTL::SparsePageSize sparsePageSize);\n\n    MTL::HazardTrackingMode      hazardTrackingMode() const;\n    void                         setHazardTrackingMode(MTL::HazardTrackingMode hazardTrackingMode);\n\n    MTL::ResourceOptions         resourceOptions() const;\n    void                         setResourceOptions(MTL::ResourceOptions resourceOptions);\n\n    MTL::HeapType                type() const;\n    void                         setType(MTL::HeapType type);\n};\n\nclass Heap : public NS::Referencing<Heap, Allocation>\n{\npublic:\n    NS::String*                  label() const;\n    void                         setLabel(const NS::String* label);\n\n    class Device*                device() const;\n\n    MTL::StorageMode             storageMode() const;\n\n    MTL::CPUCacheMode            cpuCacheMode() const;\n\n    MTL::HazardTrackingMode      hazardTrackingMode() const;\n\n    MTL::ResourceOptions         resourceOptions() const;\n\n    NS::UInteger                 size() const;\n\n    NS::UInteger                 usedSize() const;\n\n    NS::UInteger                 currentAllocatedSize() const;\n\n    NS::UInteger                 maxAvailableSize(NS::UInteger alignment);\n\n    class Buffer*                newBuffer(NS::UInteger length, MTL::ResourceOptions options);\n\n    class Texture*               newTexture(const class TextureDescriptor* descriptor);\n\n    MTL::PurgeableState          setPurgeableState(MTL::PurgeableState state);\n\n    MTL::HeapType                type() const;\n\n    class Buffer*                newBuffer(NS::UInteger length, MTL::ResourceOptions options, NS::UInteger offset);\n\n    class Texture*               newTexture(const class TextureDescriptor* descriptor, NS::UInteger offset);\n\n    class AccelerationStructure* newAccelerationStructure(NS::UInteger size);\n\n    class AccelerationStructure* newAccelerationStructure(const class AccelerationStructureDescriptor* descriptor);\n\n    class AccelerationStructure* newAccelerationStructure(NS::UInteger size, NS::UInteger offset);\n\n    class AccelerationStructure* newAccelerationStructure(const class AccelerationStructureDescriptor* descriptor, NS::UInteger offset);\n};\n\n}\n\n_MTL_INLINE MTL::HeapDescriptor* MTL::HeapDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::HeapDescriptor>(_MTL_PRIVATE_CLS(MTLHeapDescriptor));\n}\n\n_MTL_INLINE MTL::HeapDescriptor* MTL::HeapDescriptor::init()\n{\n    return NS::Object::init<MTL::HeapDescriptor>();\n}\n\n_MTL_INLINE NS::UInteger MTL::HeapDescriptor::size() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(size));\n}\n\n_MTL_INLINE void MTL::HeapDescriptor::setSize(NS::UInteger size)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSize_), size);\n}\n\n_MTL_INLINE MTL::StorageMode MTL::HeapDescriptor::storageMode() const\n{\n    return Object::sendMessage<MTL::StorageMode>(this, _MTL_PRIVATE_SEL(storageMode));\n}\n\n_MTL_INLINE void MTL::HeapDescriptor::setStorageMode(MTL::StorageMode storageMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStorageMode_), storageMode);\n}\n\n_MTL_INLINE MTL::CPUCacheMode MTL::HeapDescriptor::cpuCacheMode() const\n{\n    return Object::sendMessage<MTL::CPUCacheMode>(this, _MTL_PRIVATE_SEL(cpuCacheMode));\n}\n\n_MTL_INLINE void MTL::HeapDescriptor::setCpuCacheMode(MTL::CPUCacheMode cpuCacheMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCpuCacheMode_), cpuCacheMode);\n}\n\n_MTL_INLINE MTL::SparsePageSize MTL::HeapDescriptor::sparsePageSize() const\n{\n    return Object::sendMessage<MTL::SparsePageSize>(this, _MTL_PRIVATE_SEL(sparsePageSize));\n}\n\n_MTL_INLINE void MTL::HeapDescriptor::setSparsePageSize(MTL::SparsePageSize sparsePageSize)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSparsePageSize_), sparsePageSize);\n}\n\n_MTL_INLINE MTL::HazardTrackingMode MTL::HeapDescriptor::hazardTrackingMode() const\n{\n    return Object::sendMessage<MTL::HazardTrackingMode>(this, _MTL_PRIVATE_SEL(hazardTrackingMode));\n}\n\n_MTL_INLINE void MTL::HeapDescriptor::setHazardTrackingMode(MTL::HazardTrackingMode hazardTrackingMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setHazardTrackingMode_), hazardTrackingMode);\n}\n\n_MTL_INLINE MTL::ResourceOptions MTL::HeapDescriptor::resourceOptions() const\n{\n    return Object::sendMessage<MTL::ResourceOptions>(this, _MTL_PRIVATE_SEL(resourceOptions));\n}\n\n_MTL_INLINE void MTL::HeapDescriptor::setResourceOptions(MTL::ResourceOptions resourceOptions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setResourceOptions_), resourceOptions);\n}\n\n_MTL_INLINE MTL::HeapType MTL::HeapDescriptor::type() const\n{\n    return Object::sendMessage<MTL::HeapType>(this, _MTL_PRIVATE_SEL(type));\n}\n\n_MTL_INLINE void MTL::HeapDescriptor::setType(MTL::HeapType type)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setType_), type);\n}\n\n_MTL_INLINE NS::String* MTL::Heap::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::Heap::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::Device* MTL::Heap::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE MTL::StorageMode MTL::Heap::storageMode() const\n{\n    return Object::sendMessage<MTL::StorageMode>(this, _MTL_PRIVATE_SEL(storageMode));\n}\n\n_MTL_INLINE MTL::CPUCacheMode MTL::Heap::cpuCacheMode() const\n{\n    return Object::sendMessage<MTL::CPUCacheMode>(this, _MTL_PRIVATE_SEL(cpuCacheMode));\n}\n\n_MTL_INLINE MTL::HazardTrackingMode MTL::Heap::hazardTrackingMode() const\n{\n    return Object::sendMessage<MTL::HazardTrackingMode>(this, _MTL_PRIVATE_SEL(hazardTrackingMode));\n}\n\n_MTL_INLINE MTL::ResourceOptions MTL::Heap::resourceOptions() const\n{\n    return Object::sendMessage<MTL::ResourceOptions>(this, _MTL_PRIVATE_SEL(resourceOptions));\n}\n\n_MTL_INLINE NS::UInteger MTL::Heap::size() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(size));\n}\n\n_MTL_INLINE NS::UInteger MTL::Heap::usedSize() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(usedSize));\n}\n\n_MTL_INLINE NS::UInteger MTL::Heap::currentAllocatedSize() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(currentAllocatedSize));\n}\n\n_MTL_INLINE NS::UInteger MTL::Heap::maxAvailableSize(NS::UInteger alignment)\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxAvailableSizeWithAlignment_), alignment);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::Heap::newBuffer(NS::UInteger length, MTL::ResourceOptions options)\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(newBufferWithLength_options_), length, options);\n}\n\n_MTL_INLINE MTL::Texture* MTL::Heap::newTexture(const MTL::TextureDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(newTextureWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::PurgeableState MTL::Heap::setPurgeableState(MTL::PurgeableState state)\n{\n    return Object::sendMessage<MTL::PurgeableState>(this, _MTL_PRIVATE_SEL(setPurgeableState_), state);\n}\n\n_MTL_INLINE MTL::HeapType MTL::Heap::type() const\n{\n    return Object::sendMessage<MTL::HeapType>(this, _MTL_PRIVATE_SEL(type));\n}\n\n_MTL_INLINE MTL::Buffer* MTL::Heap::newBuffer(NS::UInteger length, MTL::ResourceOptions options, NS::UInteger offset)\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(newBufferWithLength_options_offset_), length, options, offset);\n}\n\n_MTL_INLINE MTL::Texture* MTL::Heap::newTexture(const MTL::TextureDescriptor* descriptor, NS::UInteger offset)\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(newTextureWithDescriptor_offset_), descriptor, offset);\n}\n\n_MTL_INLINE MTL::AccelerationStructure* MTL::Heap::newAccelerationStructure(NS::UInteger size)\n{\n    return Object::sendMessage<MTL::AccelerationStructure*>(this, _MTL_PRIVATE_SEL(newAccelerationStructureWithSize_), size);\n}\n\n_MTL_INLINE MTL::AccelerationStructure* MTL::Heap::newAccelerationStructure(const MTL::AccelerationStructureDescriptor* descriptor)\n{\n    return Object::sendMessage<MTL::AccelerationStructure*>(this, _MTL_PRIVATE_SEL(newAccelerationStructureWithDescriptor_), descriptor);\n}\n\n_MTL_INLINE MTL::AccelerationStructure* MTL::Heap::newAccelerationStructure(NS::UInteger size, NS::UInteger offset)\n{\n    return Object::sendMessage<MTL::AccelerationStructure*>(this, _MTL_PRIVATE_SEL(newAccelerationStructureWithSize_offset_), size, offset);\n}\n\n_MTL_INLINE MTL::AccelerationStructure* MTL::Heap::newAccelerationStructure(const MTL::AccelerationStructureDescriptor* descriptor, NS::UInteger offset)\n{\n    return Object::sendMessage<MTL::AccelerationStructure*>(this, _MTL_PRIVATE_SEL(newAccelerationStructureWithDescriptor_offset_), descriptor, offset);\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_OPTIONS(NS::UInteger, IndirectCommandType) {\n    IndirectCommandTypeDraw = 1,\n    IndirectCommandTypeDrawIndexed = 2,\n    IndirectCommandTypeDrawPatches = 4,\n    IndirectCommandTypeDrawIndexedPatches = 8,\n    IndirectCommandTypeConcurrentDispatch = 32,\n    IndirectCommandTypeConcurrentDispatchThreads = 64,\n    IndirectCommandTypeDrawMeshThreadgroups = 128,\n    IndirectCommandTypeDrawMeshThreads = 256,\n};\n\nstruct IndirectCommandBufferExecutionRange\n{\n    uint32_t location;\n    uint32_t length;\n} _MTL_PACKED;\n\nclass IndirectCommandBufferDescriptor : public NS::Copying<IndirectCommandBufferDescriptor>\n{\npublic:\n    static class IndirectCommandBufferDescriptor* alloc();\n\n    class IndirectCommandBufferDescriptor*        init();\n\n    MTL::IndirectCommandType                      commandTypes() const;\n    void                                          setCommandTypes(MTL::IndirectCommandType commandTypes);\n\n    bool                                          inheritPipelineState() const;\n    void                                          setInheritPipelineState(bool inheritPipelineState);\n\n    bool                                          inheritBuffers() const;\n    void                                          setInheritBuffers(bool inheritBuffers);\n\n    NS::UInteger                                  maxVertexBufferBindCount() const;\n    void                                          setMaxVertexBufferBindCount(NS::UInteger maxVertexBufferBindCount);\n\n    NS::UInteger                                  maxFragmentBufferBindCount() const;\n    void                                          setMaxFragmentBufferBindCount(NS::UInteger maxFragmentBufferBindCount);\n\n    NS::UInteger                                  maxKernelBufferBindCount() const;\n    void                                          setMaxKernelBufferBindCount(NS::UInteger maxKernelBufferBindCount);\n\n    NS::UInteger                                  maxKernelThreadgroupMemoryBindCount() const;\n    void                                          setMaxKernelThreadgroupMemoryBindCount(NS::UInteger maxKernelThreadgroupMemoryBindCount);\n\n    NS::UInteger                                  maxObjectBufferBindCount() const;\n    void                                          setMaxObjectBufferBindCount(NS::UInteger maxObjectBufferBindCount);\n\n    NS::UInteger                                  maxMeshBufferBindCount() const;\n    void                                          setMaxMeshBufferBindCount(NS::UInteger maxMeshBufferBindCount);\n\n    NS::UInteger                                  maxObjectThreadgroupMemoryBindCount() const;\n    void                                          setMaxObjectThreadgroupMemoryBindCount(NS::UInteger maxObjectThreadgroupMemoryBindCount);\n\n    bool                                          supportRayTracing() const;\n    void                                          setSupportRayTracing(bool supportRayTracing);\n\n    bool                                          supportDynamicAttributeStride() const;\n    void                                          setSupportDynamicAttributeStride(bool supportDynamicAttributeStride);\n};\n\nclass IndirectCommandBuffer : public NS::Referencing<IndirectCommandBuffer, Resource>\n{\npublic:\n    NS::UInteger                  size() const;\n\n    MTL::ResourceID               gpuResourceID() const;\n\n    void                          reset(NS::Range range);\n\n    class IndirectRenderCommand*  indirectRenderCommand(NS::UInteger commandIndex);\n\n    class IndirectComputeCommand* indirectComputeCommand(NS::UInteger commandIndex);\n};\n\n}\n\n_MTL_INLINE MTL::IndirectCommandBufferDescriptor* MTL::IndirectCommandBufferDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::IndirectCommandBufferDescriptor>(_MTL_PRIVATE_CLS(MTLIndirectCommandBufferDescriptor));\n}\n\n_MTL_INLINE MTL::IndirectCommandBufferDescriptor* MTL::IndirectCommandBufferDescriptor::init()\n{\n    return NS::Object::init<MTL::IndirectCommandBufferDescriptor>();\n}\n\n_MTL_INLINE MTL::IndirectCommandType MTL::IndirectCommandBufferDescriptor::commandTypes() const\n{\n    return Object::sendMessage<MTL::IndirectCommandType>(this, _MTL_PRIVATE_SEL(commandTypes));\n}\n\n_MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setCommandTypes(MTL::IndirectCommandType commandTypes)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCommandTypes_), commandTypes);\n}\n\n_MTL_INLINE bool MTL::IndirectCommandBufferDescriptor::inheritPipelineState() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(inheritPipelineState));\n}\n\n_MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setInheritPipelineState(bool inheritPipelineState)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInheritPipelineState_), inheritPipelineState);\n}\n\n_MTL_INLINE bool MTL::IndirectCommandBufferDescriptor::inheritBuffers() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(inheritBuffers));\n}\n\n_MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setInheritBuffers(bool inheritBuffers)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInheritBuffers_), inheritBuffers);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxVertexBufferBindCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxVertexBufferBindCount));\n}\n\n_MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxVertexBufferBindCount(NS::UInteger maxVertexBufferBindCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxVertexBufferBindCount_), maxVertexBufferBindCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxFragmentBufferBindCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxFragmentBufferBindCount));\n}\n\n_MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxFragmentBufferBindCount(NS::UInteger maxFragmentBufferBindCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxFragmentBufferBindCount_), maxFragmentBufferBindCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxKernelBufferBindCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxKernelBufferBindCount));\n}\n\n_MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxKernelBufferBindCount(NS::UInteger maxKernelBufferBindCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxKernelBufferBindCount_), maxKernelBufferBindCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxKernelThreadgroupMemoryBindCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxKernelThreadgroupMemoryBindCount));\n}\n\n_MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxKernelThreadgroupMemoryBindCount(NS::UInteger maxKernelThreadgroupMemoryBindCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxKernelThreadgroupMemoryBindCount_), maxKernelThreadgroupMemoryBindCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxObjectBufferBindCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxObjectBufferBindCount));\n}\n\n_MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxObjectBufferBindCount(NS::UInteger maxObjectBufferBindCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxObjectBufferBindCount_), maxObjectBufferBindCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxMeshBufferBindCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxMeshBufferBindCount));\n}\n\n_MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxMeshBufferBindCount(NS::UInteger maxMeshBufferBindCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxMeshBufferBindCount_), maxMeshBufferBindCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxObjectThreadgroupMemoryBindCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxObjectThreadgroupMemoryBindCount));\n}\n\n_MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxObjectThreadgroupMemoryBindCount(NS::UInteger maxObjectThreadgroupMemoryBindCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxObjectThreadgroupMemoryBindCount_), maxObjectThreadgroupMemoryBindCount);\n}\n\n_MTL_INLINE bool MTL::IndirectCommandBufferDescriptor::supportRayTracing() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportRayTracing));\n}\n\n_MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setSupportRayTracing(bool supportRayTracing)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSupportRayTracing_), supportRayTracing);\n}\n\n_MTL_INLINE bool MTL::IndirectCommandBufferDescriptor::supportDynamicAttributeStride() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportDynamicAttributeStride));\n}\n\n_MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setSupportDynamicAttributeStride(bool supportDynamicAttributeStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSupportDynamicAttributeStride_), supportDynamicAttributeStride);\n}\n\n_MTL_INLINE NS::UInteger MTL::IndirectCommandBuffer::size() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(size));\n}\n\n_MTL_INLINE MTL::ResourceID MTL::IndirectCommandBuffer::gpuResourceID() const\n{\n    return Object::sendMessage<MTL::ResourceID>(this, _MTL_PRIVATE_SEL(gpuResourceID));\n}\n\n_MTL_INLINE void MTL::IndirectCommandBuffer::reset(NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(resetWithRange_), range);\n}\n\n_MTL_INLINE MTL::IndirectRenderCommand* MTL::IndirectCommandBuffer::indirectRenderCommand(NS::UInteger commandIndex)\n{\n    return Object::sendMessage<MTL::IndirectRenderCommand*>(this, _MTL_PRIVATE_SEL(indirectRenderCommandAtIndex_), commandIndex);\n}\n\n_MTL_INLINE MTL::IndirectComputeCommand* MTL::IndirectCommandBuffer::indirectComputeCommand(NS::UInteger commandIndex)\n{\n    return Object::sendMessage<MTL::IndirectComputeCommand*>(this, _MTL_PRIVATE_SEL(indirectComputeCommandAtIndex_), commandIndex);\n}\n\n#pragma once\n\n#pragma once\n\n#pragma once\n\nnamespace MTL\n{\nstruct ClearColor\n{\n    static ClearColor Make(double red, double green, double blue, double alpha);\n\n    ClearColor() = default;\n\n    ClearColor(double red, double green, double blue, double alpha);\n\n    double red;\n    double green;\n    double blue;\n    double alpha;\n} _MTL_PACKED;\n\n_MTL_ENUM(NS::UInteger, LoadAction) {\n    LoadActionDontCare = 0,\n    LoadActionLoad = 1,\n    LoadActionClear = 2,\n};\n\n_MTL_ENUM(NS::UInteger, StoreAction) {\n    StoreActionDontCare = 0,\n    StoreActionStore = 1,\n    StoreActionMultisampleResolve = 2,\n    StoreActionStoreAndMultisampleResolve = 3,\n    StoreActionUnknown = 4,\n    StoreActionCustomSampleDepthStore = 5,\n};\n\n_MTL_OPTIONS(NS::UInteger, StoreActionOptions) {\n    StoreActionOptionNone = 0,\n    StoreActionOptionCustomSamplePositions = 1,\n    StoreActionOptionValidMask = 1,\n};\n\nclass RenderPassAttachmentDescriptor : public NS::Copying<RenderPassAttachmentDescriptor>\n{\npublic:\n    static class RenderPassAttachmentDescriptor* alloc();\n\n    class RenderPassAttachmentDescriptor*        init();\n\n    class Texture*                               texture() const;\n    void                                         setTexture(const class Texture* texture);\n\n    NS::UInteger                                 level() const;\n    void                                         setLevel(NS::UInteger level);\n\n    NS::UInteger                                 slice() const;\n    void                                         setSlice(NS::UInteger slice);\n\n    NS::UInteger                                 depthPlane() const;\n    void                                         setDepthPlane(NS::UInteger depthPlane);\n\n    class Texture*                               resolveTexture() const;\n    void                                         setResolveTexture(const class Texture* resolveTexture);\n\n    NS::UInteger                                 resolveLevel() const;\n    void                                         setResolveLevel(NS::UInteger resolveLevel);\n\n    NS::UInteger                                 resolveSlice() const;\n    void                                         setResolveSlice(NS::UInteger resolveSlice);\n\n    NS::UInteger                                 resolveDepthPlane() const;\n    void                                         setResolveDepthPlane(NS::UInteger resolveDepthPlane);\n\n    MTL::LoadAction                              loadAction() const;\n    void                                         setLoadAction(MTL::LoadAction loadAction);\n\n    MTL::StoreAction                             storeAction() const;\n    void                                         setStoreAction(MTL::StoreAction storeAction);\n\n    MTL::StoreActionOptions                      storeActionOptions() const;\n    void                                         setStoreActionOptions(MTL::StoreActionOptions storeActionOptions);\n};\n\nclass RenderPassColorAttachmentDescriptor : public NS::Copying<RenderPassColorAttachmentDescriptor, MTL::RenderPassAttachmentDescriptor>\n{\npublic:\n    static class RenderPassColorAttachmentDescriptor* alloc();\n\n    class RenderPassColorAttachmentDescriptor*        init();\n\n    MTL::ClearColor                                   clearColor() const;\n    void                                              setClearColor(MTL::ClearColor clearColor);\n};\n\n_MTL_ENUM(NS::UInteger, MultisampleDepthResolveFilter) {\n    MultisampleDepthResolveFilterSample0 = 0,\n    MultisampleDepthResolveFilterMin = 1,\n    MultisampleDepthResolveFilterMax = 2,\n};\n\nclass RenderPassDepthAttachmentDescriptor : public NS::Copying<RenderPassDepthAttachmentDescriptor, MTL::RenderPassAttachmentDescriptor>\n{\npublic:\n    static class RenderPassDepthAttachmentDescriptor* alloc();\n\n    class RenderPassDepthAttachmentDescriptor*        init();\n\n    double                                            clearDepth() const;\n    void                                              setClearDepth(double clearDepth);\n\n    MTL::MultisampleDepthResolveFilter                depthResolveFilter() const;\n    void                                              setDepthResolveFilter(MTL::MultisampleDepthResolveFilter depthResolveFilter);\n};\n\n_MTL_ENUM(NS::UInteger, MultisampleStencilResolveFilter) {\n    MultisampleStencilResolveFilterSample0 = 0,\n    MultisampleStencilResolveFilterDepthResolvedSample = 1,\n};\n\nclass RenderPassStencilAttachmentDescriptor : public NS::Copying<RenderPassStencilAttachmentDescriptor, MTL::RenderPassAttachmentDescriptor>\n{\npublic:\n    static class RenderPassStencilAttachmentDescriptor* alloc();\n\n    class RenderPassStencilAttachmentDescriptor*        init();\n\n    uint32_t                                            clearStencil() const;\n    void                                                setClearStencil(uint32_t clearStencil);\n\n    MTL::MultisampleStencilResolveFilter                stencilResolveFilter() const;\n    void                                                setStencilResolveFilter(MTL::MultisampleStencilResolveFilter stencilResolveFilter);\n};\n\nclass RenderPassColorAttachmentDescriptorArray : public NS::Referencing<RenderPassColorAttachmentDescriptorArray>\n{\npublic:\n    static class RenderPassColorAttachmentDescriptorArray* alloc();\n\n    class RenderPassColorAttachmentDescriptorArray*        init();\n\n    class RenderPassColorAttachmentDescriptor*             object(NS::UInteger attachmentIndex);\n\n    void                                                   setObject(const class RenderPassColorAttachmentDescriptor* attachment, NS::UInteger attachmentIndex);\n};\n\nclass RenderPassSampleBufferAttachmentDescriptor : public NS::Copying<RenderPassSampleBufferAttachmentDescriptor>\n{\npublic:\n    static class RenderPassSampleBufferAttachmentDescriptor* alloc();\n\n    class RenderPassSampleBufferAttachmentDescriptor*        init();\n\n    class CounterSampleBuffer*                               sampleBuffer() const;\n    void                                                     setSampleBuffer(const class CounterSampleBuffer* sampleBuffer);\n\n    NS::UInteger                                             startOfVertexSampleIndex() const;\n    void                                                     setStartOfVertexSampleIndex(NS::UInteger startOfVertexSampleIndex);\n\n    NS::UInteger                                             endOfVertexSampleIndex() const;\n    void                                                     setEndOfVertexSampleIndex(NS::UInteger endOfVertexSampleIndex);\n\n    NS::UInteger                                             startOfFragmentSampleIndex() const;\n    void                                                     setStartOfFragmentSampleIndex(NS::UInteger startOfFragmentSampleIndex);\n\n    NS::UInteger                                             endOfFragmentSampleIndex() const;\n    void                                                     setEndOfFragmentSampleIndex(NS::UInteger endOfFragmentSampleIndex);\n};\n\nclass RenderPassSampleBufferAttachmentDescriptorArray : public NS::Referencing<RenderPassSampleBufferAttachmentDescriptorArray>\n{\npublic:\n    static class RenderPassSampleBufferAttachmentDescriptorArray* alloc();\n\n    class RenderPassSampleBufferAttachmentDescriptorArray*        init();\n\n    class RenderPassSampleBufferAttachmentDescriptor*             object(NS::UInteger attachmentIndex);\n\n    void                                                          setObject(const class RenderPassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex);\n};\n\nclass RenderPassDescriptor : public NS::Copying<RenderPassDescriptor>\n{\npublic:\n    static class RenderPassDescriptor*                     alloc();\n\n    class RenderPassDescriptor*                            init();\n\n    static class RenderPassDescriptor*                     renderPassDescriptor();\n\n    class RenderPassColorAttachmentDescriptorArray*        colorAttachments() const;\n\n    class RenderPassDepthAttachmentDescriptor*             depthAttachment() const;\n    void                                                   setDepthAttachment(const class RenderPassDepthAttachmentDescriptor* depthAttachment);\n\n    class RenderPassStencilAttachmentDescriptor*           stencilAttachment() const;\n    void                                                   setStencilAttachment(const class RenderPassStencilAttachmentDescriptor* stencilAttachment);\n\n    class Buffer*                                          visibilityResultBuffer() const;\n    void                                                   setVisibilityResultBuffer(const class Buffer* visibilityResultBuffer);\n\n    NS::UInteger                                           renderTargetArrayLength() const;\n    void                                                   setRenderTargetArrayLength(NS::UInteger renderTargetArrayLength);\n\n    NS::UInteger                                           imageblockSampleLength() const;\n    void                                                   setImageblockSampleLength(NS::UInteger imageblockSampleLength);\n\n    NS::UInteger                                           threadgroupMemoryLength() const;\n    void                                                   setThreadgroupMemoryLength(NS::UInteger threadgroupMemoryLength);\n\n    NS::UInteger                                           tileWidth() const;\n    void                                                   setTileWidth(NS::UInteger tileWidth);\n\n    NS::UInteger                                           tileHeight() const;\n    void                                                   setTileHeight(NS::UInteger tileHeight);\n\n    NS::UInteger                                           defaultRasterSampleCount() const;\n    void                                                   setDefaultRasterSampleCount(NS::UInteger defaultRasterSampleCount);\n\n    NS::UInteger                                           renderTargetWidth() const;\n    void                                                   setRenderTargetWidth(NS::UInteger renderTargetWidth);\n\n    NS::UInteger                                           renderTargetHeight() const;\n    void                                                   setRenderTargetHeight(NS::UInteger renderTargetHeight);\n\n    void                                                   setSamplePositions(const MTL::SamplePosition* positions, NS::UInteger count);\n\n    NS::UInteger                                           getSamplePositions(MTL::SamplePosition* positions, NS::UInteger count);\n\n    class RasterizationRateMap*                            rasterizationRateMap() const;\n    void                                                   setRasterizationRateMap(const class RasterizationRateMap* rasterizationRateMap);\n\n    class RenderPassSampleBufferAttachmentDescriptorArray* sampleBufferAttachments() const;\n};\n\n}\n\n_MTL_INLINE MTL::ClearColor MTL::ClearColor::Make(double red, double green, double blue, double alpha)\n{\n    return ClearColor(red, green, blue, alpha);\n}\n\n_MTL_INLINE MTL::ClearColor::ClearColor(double _red, double _green, double _blue, double _alpha)\n    : red(_red)\n    , green(_green)\n    , blue(_blue)\n    , alpha(_alpha)\n{\n}\n\n_MTL_INLINE MTL::RenderPassAttachmentDescriptor* MTL::RenderPassAttachmentDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::RenderPassAttachmentDescriptor>(_MTL_PRIVATE_CLS(MTLRenderPassAttachmentDescriptor));\n}\n\n_MTL_INLINE MTL::RenderPassAttachmentDescriptor* MTL::RenderPassAttachmentDescriptor::init()\n{\n    return NS::Object::init<MTL::RenderPassAttachmentDescriptor>();\n}\n\n_MTL_INLINE MTL::Texture* MTL::RenderPassAttachmentDescriptor::texture() const\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(texture));\n}\n\n_MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setTexture(const MTL::Texture* texture)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTexture_), texture);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassAttachmentDescriptor::level() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(level));\n}\n\n_MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setLevel(NS::UInteger level)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLevel_), level);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassAttachmentDescriptor::slice() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(slice));\n}\n\n_MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setSlice(NS::UInteger slice)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSlice_), slice);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassAttachmentDescriptor::depthPlane() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(depthPlane));\n}\n\n_MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setDepthPlane(NS::UInteger depthPlane)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthPlane_), depthPlane);\n}\n\n_MTL_INLINE MTL::Texture* MTL::RenderPassAttachmentDescriptor::resolveTexture() const\n{\n    return Object::sendMessage<MTL::Texture*>(this, _MTL_PRIVATE_SEL(resolveTexture));\n}\n\n_MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setResolveTexture(const MTL::Texture* resolveTexture)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setResolveTexture_), resolveTexture);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassAttachmentDescriptor::resolveLevel() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(resolveLevel));\n}\n\n_MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setResolveLevel(NS::UInteger resolveLevel)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setResolveLevel_), resolveLevel);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassAttachmentDescriptor::resolveSlice() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(resolveSlice));\n}\n\n_MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setResolveSlice(NS::UInteger resolveSlice)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setResolveSlice_), resolveSlice);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassAttachmentDescriptor::resolveDepthPlane() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(resolveDepthPlane));\n}\n\n_MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setResolveDepthPlane(NS::UInteger resolveDepthPlane)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setResolveDepthPlane_), resolveDepthPlane);\n}\n\n_MTL_INLINE MTL::LoadAction MTL::RenderPassAttachmentDescriptor::loadAction() const\n{\n    return Object::sendMessage<MTL::LoadAction>(this, _MTL_PRIVATE_SEL(loadAction));\n}\n\n_MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setLoadAction(MTL::LoadAction loadAction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLoadAction_), loadAction);\n}\n\n_MTL_INLINE MTL::StoreAction MTL::RenderPassAttachmentDescriptor::storeAction() const\n{\n    return Object::sendMessage<MTL::StoreAction>(this, _MTL_PRIVATE_SEL(storeAction));\n}\n\n_MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setStoreAction(MTL::StoreAction storeAction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStoreAction_), storeAction);\n}\n\n_MTL_INLINE MTL::StoreActionOptions MTL::RenderPassAttachmentDescriptor::storeActionOptions() const\n{\n    return Object::sendMessage<MTL::StoreActionOptions>(this, _MTL_PRIVATE_SEL(storeActionOptions));\n}\n\n_MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setStoreActionOptions(MTL::StoreActionOptions storeActionOptions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStoreActionOptions_), storeActionOptions);\n}\n\n_MTL_INLINE MTL::RenderPassColorAttachmentDescriptor* MTL::RenderPassColorAttachmentDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::RenderPassColorAttachmentDescriptor>(_MTL_PRIVATE_CLS(MTLRenderPassColorAttachmentDescriptor));\n}\n\n_MTL_INLINE MTL::RenderPassColorAttachmentDescriptor* MTL::RenderPassColorAttachmentDescriptor::init()\n{\n    return NS::Object::init<MTL::RenderPassColorAttachmentDescriptor>();\n}\n\n_MTL_INLINE MTL::ClearColor MTL::RenderPassColorAttachmentDescriptor::clearColor() const\n{\n    return Object::sendMessage<MTL::ClearColor>(this, _MTL_PRIVATE_SEL(clearColor));\n}\n\n_MTL_INLINE void MTL::RenderPassColorAttachmentDescriptor::setClearColor(MTL::ClearColor clearColor)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setClearColor_), clearColor);\n}\n\n_MTL_INLINE MTL::RenderPassDepthAttachmentDescriptor* MTL::RenderPassDepthAttachmentDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::RenderPassDepthAttachmentDescriptor>(_MTL_PRIVATE_CLS(MTLRenderPassDepthAttachmentDescriptor));\n}\n\n_MTL_INLINE MTL::RenderPassDepthAttachmentDescriptor* MTL::RenderPassDepthAttachmentDescriptor::init()\n{\n    return NS::Object::init<MTL::RenderPassDepthAttachmentDescriptor>();\n}\n\n_MTL_INLINE double MTL::RenderPassDepthAttachmentDescriptor::clearDepth() const\n{\n    return Object::sendMessage<double>(this, _MTL_PRIVATE_SEL(clearDepth));\n}\n\n_MTL_INLINE void MTL::RenderPassDepthAttachmentDescriptor::setClearDepth(double clearDepth)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setClearDepth_), clearDepth);\n}\n\n_MTL_INLINE MTL::MultisampleDepthResolveFilter MTL::RenderPassDepthAttachmentDescriptor::depthResolveFilter() const\n{\n    return Object::sendMessage<MTL::MultisampleDepthResolveFilter>(this, _MTL_PRIVATE_SEL(depthResolveFilter));\n}\n\n_MTL_INLINE void MTL::RenderPassDepthAttachmentDescriptor::setDepthResolveFilter(MTL::MultisampleDepthResolveFilter depthResolveFilter)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthResolveFilter_), depthResolveFilter);\n}\n\n_MTL_INLINE MTL::RenderPassStencilAttachmentDescriptor* MTL::RenderPassStencilAttachmentDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::RenderPassStencilAttachmentDescriptor>(_MTL_PRIVATE_CLS(MTLRenderPassStencilAttachmentDescriptor));\n}\n\n_MTL_INLINE MTL::RenderPassStencilAttachmentDescriptor* MTL::RenderPassStencilAttachmentDescriptor::init()\n{\n    return NS::Object::init<MTL::RenderPassStencilAttachmentDescriptor>();\n}\n\n_MTL_INLINE uint32_t MTL::RenderPassStencilAttachmentDescriptor::clearStencil() const\n{\n    return Object::sendMessage<uint32_t>(this, _MTL_PRIVATE_SEL(clearStencil));\n}\n\n_MTL_INLINE void MTL::RenderPassStencilAttachmentDescriptor::setClearStencil(uint32_t clearStencil)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setClearStencil_), clearStencil);\n}\n\n_MTL_INLINE MTL::MultisampleStencilResolveFilter MTL::RenderPassStencilAttachmentDescriptor::stencilResolveFilter() const\n{\n    return Object::sendMessage<MTL::MultisampleStencilResolveFilter>(this, _MTL_PRIVATE_SEL(stencilResolveFilter));\n}\n\n_MTL_INLINE void MTL::RenderPassStencilAttachmentDescriptor::setStencilResolveFilter(MTL::MultisampleStencilResolveFilter stencilResolveFilter)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStencilResolveFilter_), stencilResolveFilter);\n}\n\n_MTL_INLINE MTL::RenderPassColorAttachmentDescriptorArray* MTL::RenderPassColorAttachmentDescriptorArray::alloc()\n{\n    return NS::Object::alloc<MTL::RenderPassColorAttachmentDescriptorArray>(_MTL_PRIVATE_CLS(MTLRenderPassColorAttachmentDescriptorArray));\n}\n\n_MTL_INLINE MTL::RenderPassColorAttachmentDescriptorArray* MTL::RenderPassColorAttachmentDescriptorArray::init()\n{\n    return NS::Object::init<MTL::RenderPassColorAttachmentDescriptorArray>();\n}\n\n_MTL_INLINE MTL::RenderPassColorAttachmentDescriptor* MTL::RenderPassColorAttachmentDescriptorArray::object(NS::UInteger attachmentIndex)\n{\n    return Object::sendMessage<MTL::RenderPassColorAttachmentDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex);\n}\n\n_MTL_INLINE void MTL::RenderPassColorAttachmentDescriptorArray::setObject(const MTL::RenderPassColorAttachmentDescriptor* attachment, NS::UInteger attachmentIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex);\n}\n\n_MTL_INLINE MTL::RenderPassSampleBufferAttachmentDescriptor* MTL::RenderPassSampleBufferAttachmentDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::RenderPassSampleBufferAttachmentDescriptor>(_MTL_PRIVATE_CLS(MTLRenderPassSampleBufferAttachmentDescriptor));\n}\n\n_MTL_INLINE MTL::RenderPassSampleBufferAttachmentDescriptor* MTL::RenderPassSampleBufferAttachmentDescriptor::init()\n{\n    return NS::Object::init<MTL::RenderPassSampleBufferAttachmentDescriptor>();\n}\n\n_MTL_INLINE MTL::CounterSampleBuffer* MTL::RenderPassSampleBufferAttachmentDescriptor::sampleBuffer() const\n{\n    return Object::sendMessage<MTL::CounterSampleBuffer*>(this, _MTL_PRIVATE_SEL(sampleBuffer));\n}\n\n_MTL_INLINE void MTL::RenderPassSampleBufferAttachmentDescriptor::setSampleBuffer(const MTL::CounterSampleBuffer* sampleBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSampleBuffer_), sampleBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassSampleBufferAttachmentDescriptor::startOfVertexSampleIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(startOfVertexSampleIndex));\n}\n\n_MTL_INLINE void MTL::RenderPassSampleBufferAttachmentDescriptor::setStartOfVertexSampleIndex(NS::UInteger startOfVertexSampleIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStartOfVertexSampleIndex_), startOfVertexSampleIndex);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassSampleBufferAttachmentDescriptor::endOfVertexSampleIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(endOfVertexSampleIndex));\n}\n\n_MTL_INLINE void MTL::RenderPassSampleBufferAttachmentDescriptor::setEndOfVertexSampleIndex(NS::UInteger endOfVertexSampleIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setEndOfVertexSampleIndex_), endOfVertexSampleIndex);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassSampleBufferAttachmentDescriptor::startOfFragmentSampleIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(startOfFragmentSampleIndex));\n}\n\n_MTL_INLINE void MTL::RenderPassSampleBufferAttachmentDescriptor::setStartOfFragmentSampleIndex(NS::UInteger startOfFragmentSampleIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStartOfFragmentSampleIndex_), startOfFragmentSampleIndex);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassSampleBufferAttachmentDescriptor::endOfFragmentSampleIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(endOfFragmentSampleIndex));\n}\n\n_MTL_INLINE void MTL::RenderPassSampleBufferAttachmentDescriptor::setEndOfFragmentSampleIndex(NS::UInteger endOfFragmentSampleIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setEndOfFragmentSampleIndex_), endOfFragmentSampleIndex);\n}\n\n_MTL_INLINE MTL::RenderPassSampleBufferAttachmentDescriptorArray* MTL::RenderPassSampleBufferAttachmentDescriptorArray::alloc()\n{\n    return NS::Object::alloc<MTL::RenderPassSampleBufferAttachmentDescriptorArray>(_MTL_PRIVATE_CLS(MTLRenderPassSampleBufferAttachmentDescriptorArray));\n}\n\n_MTL_INLINE MTL::RenderPassSampleBufferAttachmentDescriptorArray* MTL::RenderPassSampleBufferAttachmentDescriptorArray::init()\n{\n    return NS::Object::init<MTL::RenderPassSampleBufferAttachmentDescriptorArray>();\n}\n\n_MTL_INLINE MTL::RenderPassSampleBufferAttachmentDescriptor* MTL::RenderPassSampleBufferAttachmentDescriptorArray::object(NS::UInteger attachmentIndex)\n{\n    return Object::sendMessage<MTL::RenderPassSampleBufferAttachmentDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex);\n}\n\n_MTL_INLINE void MTL::RenderPassSampleBufferAttachmentDescriptorArray::setObject(const MTL::RenderPassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex);\n}\n\n_MTL_INLINE MTL::RenderPassDescriptor* MTL::RenderPassDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::RenderPassDescriptor>(_MTL_PRIVATE_CLS(MTLRenderPassDescriptor));\n}\n\n_MTL_INLINE MTL::RenderPassDescriptor* MTL::RenderPassDescriptor::init()\n{\n    return NS::Object::init<MTL::RenderPassDescriptor>();\n}\n\n_MTL_INLINE MTL::RenderPassDescriptor* MTL::RenderPassDescriptor::renderPassDescriptor()\n{\n    return Object::sendMessage<MTL::RenderPassDescriptor*>(_MTL_PRIVATE_CLS(MTLRenderPassDescriptor), _MTL_PRIVATE_SEL(renderPassDescriptor));\n}\n\n_MTL_INLINE MTL::RenderPassColorAttachmentDescriptorArray* MTL::RenderPassDescriptor::colorAttachments() const\n{\n    return Object::sendMessage<MTL::RenderPassColorAttachmentDescriptorArray*>(this, _MTL_PRIVATE_SEL(colorAttachments));\n}\n\n_MTL_INLINE MTL::RenderPassDepthAttachmentDescriptor* MTL::RenderPassDescriptor::depthAttachment() const\n{\n    return Object::sendMessage<MTL::RenderPassDepthAttachmentDescriptor*>(this, _MTL_PRIVATE_SEL(depthAttachment));\n}\n\n_MTL_INLINE void MTL::RenderPassDescriptor::setDepthAttachment(const MTL::RenderPassDepthAttachmentDescriptor* depthAttachment)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthAttachment_), depthAttachment);\n}\n\n_MTL_INLINE MTL::RenderPassStencilAttachmentDescriptor* MTL::RenderPassDescriptor::stencilAttachment() const\n{\n    return Object::sendMessage<MTL::RenderPassStencilAttachmentDescriptor*>(this, _MTL_PRIVATE_SEL(stencilAttachment));\n}\n\n_MTL_INLINE void MTL::RenderPassDescriptor::setStencilAttachment(const MTL::RenderPassStencilAttachmentDescriptor* stencilAttachment)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStencilAttachment_), stencilAttachment);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::RenderPassDescriptor::visibilityResultBuffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(visibilityResultBuffer));\n}\n\n_MTL_INLINE void MTL::RenderPassDescriptor::setVisibilityResultBuffer(const MTL::Buffer* visibilityResultBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVisibilityResultBuffer_), visibilityResultBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::renderTargetArrayLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(renderTargetArrayLength));\n}\n\n_MTL_INLINE void MTL::RenderPassDescriptor::setRenderTargetArrayLength(NS::UInteger renderTargetArrayLength)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRenderTargetArrayLength_), renderTargetArrayLength);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::imageblockSampleLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(imageblockSampleLength));\n}\n\n_MTL_INLINE void MTL::RenderPassDescriptor::setImageblockSampleLength(NS::UInteger imageblockSampleLength)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setImageblockSampleLength_), imageblockSampleLength);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::threadgroupMemoryLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(threadgroupMemoryLength));\n}\n\n_MTL_INLINE void MTL::RenderPassDescriptor::setThreadgroupMemoryLength(NS::UInteger threadgroupMemoryLength)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setThreadgroupMemoryLength_), threadgroupMemoryLength);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::tileWidth() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(tileWidth));\n}\n\n_MTL_INLINE void MTL::RenderPassDescriptor::setTileWidth(NS::UInteger tileWidth)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileWidth_), tileWidth);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::tileHeight() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(tileHeight));\n}\n\n_MTL_INLINE void MTL::RenderPassDescriptor::setTileHeight(NS::UInteger tileHeight)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileHeight_), tileHeight);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::defaultRasterSampleCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(defaultRasterSampleCount));\n}\n\n_MTL_INLINE void MTL::RenderPassDescriptor::setDefaultRasterSampleCount(NS::UInteger defaultRasterSampleCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDefaultRasterSampleCount_), defaultRasterSampleCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::renderTargetWidth() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(renderTargetWidth));\n}\n\n_MTL_INLINE void MTL::RenderPassDescriptor::setRenderTargetWidth(NS::UInteger renderTargetWidth)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRenderTargetWidth_), renderTargetWidth);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::renderTargetHeight() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(renderTargetHeight));\n}\n\n_MTL_INLINE void MTL::RenderPassDescriptor::setRenderTargetHeight(NS::UInteger renderTargetHeight)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRenderTargetHeight_), renderTargetHeight);\n}\n\n_MTL_INLINE void MTL::RenderPassDescriptor::setSamplePositions(const MTL::SamplePosition* positions, NS::UInteger count)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSamplePositions_count_), positions, count);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::getSamplePositions(MTL::SamplePosition* positions, NS::UInteger count)\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(getSamplePositions_count_), positions, count);\n}\n\n_MTL_INLINE MTL::RasterizationRateMap* MTL::RenderPassDescriptor::rasterizationRateMap() const\n{\n    return Object::sendMessage<MTL::RasterizationRateMap*>(this, _MTL_PRIVATE_SEL(rasterizationRateMap));\n}\n\n_MTL_INLINE void MTL::RenderPassDescriptor::setRasterizationRateMap(const MTL::RasterizationRateMap* rasterizationRateMap)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRasterizationRateMap_), rasterizationRateMap);\n}\n\n_MTL_INLINE MTL::RenderPassSampleBufferAttachmentDescriptorArray* MTL::RenderPassDescriptor::sampleBufferAttachments() const\n{\n    return Object::sendMessage<MTL::RenderPassSampleBufferAttachmentDescriptorArray*>(this, _MTL_PRIVATE_SEL(sampleBufferAttachments));\n}\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, PrimitiveType) {\n    PrimitiveTypePoint = 0,\n    PrimitiveTypeLine = 1,\n    PrimitiveTypeLineStrip = 2,\n    PrimitiveTypeTriangle = 3,\n    PrimitiveTypeTriangleStrip = 4,\n};\n\n_MTL_ENUM(NS::UInteger, VisibilityResultMode) {\n    VisibilityResultModeDisabled = 0,\n    VisibilityResultModeBoolean = 1,\n    VisibilityResultModeCounting = 2,\n};\n\nstruct ScissorRect\n{\n    NS::UInteger x;\n    NS::UInteger y;\n    NS::UInteger width;\n    NS::UInteger height;\n} _MTL_PACKED;\n\nstruct Viewport\n{\n    double originX;\n    double originY;\n    double width;\n    double height;\n    double znear;\n    double zfar;\n} _MTL_PACKED;\n\n_MTL_ENUM(NS::UInteger, CullMode) {\n    CullModeNone = 0,\n    CullModeFront = 1,\n    CullModeBack = 2,\n};\n\n_MTL_ENUM(NS::UInteger, Winding) {\n    WindingClockwise = 0,\n    WindingCounterClockwise = 1,\n};\n\n_MTL_ENUM(NS::UInteger, DepthClipMode) {\n    DepthClipModeClip = 0,\n    DepthClipModeClamp = 1,\n};\n\n_MTL_ENUM(NS::UInteger, TriangleFillMode) {\n    TriangleFillModeFill = 0,\n    TriangleFillModeLines = 1,\n};\n\nstruct DrawPrimitivesIndirectArguments\n{\n    uint32_t vertexCount;\n    uint32_t instanceCount;\n    uint32_t vertexStart;\n    uint32_t baseInstance;\n} _MTL_PACKED;\n\nstruct DrawIndexedPrimitivesIndirectArguments\n{\n    uint32_t indexCount;\n    uint32_t instanceCount;\n    uint32_t indexStart;\n    int32_t  baseVertex;\n    uint32_t baseInstance;\n} _MTL_PACKED;\n\nstruct VertexAmplificationViewMapping\n{\n    uint32_t viewportArrayIndexOffset;\n    uint32_t renderTargetArrayIndexOffset;\n} _MTL_PACKED;\n\nstruct DrawPatchIndirectArguments\n{\n    uint32_t patchCount;\n    uint32_t instanceCount;\n    uint32_t patchStart;\n    uint32_t baseInstance;\n} _MTL_PACKED;\n\nstruct QuadTessellationFactorsHalf\n{\n    uint16_t edgeTessellationFactor[4];\n    uint16_t insideTessellationFactor[2];\n} _MTL_PACKED;\n\nstruct TriangleTessellationFactorsHalf\n{\n    uint16_t edgeTessellationFactor[3];\n    uint16_t insideTessellationFactor;\n} _MTL_PACKED;\n\n_MTL_OPTIONS(NS::UInteger, RenderStages) {\n    RenderStageVertex = 1,\n    RenderStageFragment = 2,\n    RenderStageTile = 4,\n    RenderStageObject = 8,\n    RenderStageMesh = 16,\n};\n\nclass RenderCommandEncoder : public NS::Referencing<RenderCommandEncoder, CommandEncoder>\n{\npublic:\n    void         setRenderPipelineState(const class RenderPipelineState* pipelineState);\n\n    void         setVertexBytes(const void* bytes, NS::UInteger length, NS::UInteger index);\n\n    void         setVertexBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index);\n\n    void         setVertexBufferOffset(NS::UInteger offset, NS::UInteger index);\n\n    void         setVertexBuffers(const class Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range);\n\n    void         setVertexBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index);\n\n    void         setVertexBuffers(const class Buffer* const buffers[], const NS::UInteger* offsets, const NS::UInteger* strides, NS::Range range);\n\n    void         setVertexBufferOffset(NS::UInteger offset, NS::UInteger stride, NS::UInteger index);\n\n    void         setVertexBytes(const void* bytes, NS::UInteger length, NS::UInteger stride, NS::UInteger index);\n\n    void         setVertexTexture(const class Texture* texture, NS::UInteger index);\n\n    void         setVertexTextures(const class Texture* const textures[], NS::Range range);\n\n    void         setVertexSamplerState(const class SamplerState* sampler, NS::UInteger index);\n\n    void         setVertexSamplerStates(const class SamplerState* const samplers[], NS::Range range);\n\n    void         setVertexSamplerState(const class SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index);\n\n    void         setVertexSamplerStates(const class SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range);\n\n    void         setVertexVisibleFunctionTable(const class VisibleFunctionTable* functionTable, NS::UInteger bufferIndex);\n\n    void         setVertexVisibleFunctionTables(const class VisibleFunctionTable* const functionTables[], NS::Range range);\n\n    void         setVertexIntersectionFunctionTable(const class IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex);\n\n    void         setVertexIntersectionFunctionTables(const class IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range);\n\n    void         setVertexAccelerationStructure(const class AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex);\n\n    void         setViewport(MTL::Viewport viewport);\n\n    void         setViewports(const MTL::Viewport* viewports, NS::UInteger count);\n\n    void         setFrontFacingWinding(MTL::Winding frontFacingWinding);\n\n    void         setVertexAmplificationCount(NS::UInteger count, const MTL::VertexAmplificationViewMapping* viewMappings);\n\n    void         setCullMode(MTL::CullMode cullMode);\n\n    void         setDepthClipMode(MTL::DepthClipMode depthClipMode);\n\n    void         setDepthBias(float depthBias, float slopeScale, float clamp);\n\n    void         setScissorRect(MTL::ScissorRect rect);\n\n    void         setScissorRects(const MTL::ScissorRect* scissorRects, NS::UInteger count);\n\n    void         setTriangleFillMode(MTL::TriangleFillMode fillMode);\n\n    void         setFragmentBytes(const void* bytes, NS::UInteger length, NS::UInteger index);\n\n    void         setFragmentBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index);\n\n    void         setFragmentBufferOffset(NS::UInteger offset, NS::UInteger index);\n\n    void         setFragmentBuffers(const class Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range);\n\n    void         setFragmentTexture(const class Texture* texture, NS::UInteger index);\n\n    void         setFragmentTextures(const class Texture* const textures[], NS::Range range);\n\n    void         setFragmentSamplerState(const class SamplerState* sampler, NS::UInteger index);\n\n    void         setFragmentSamplerStates(const class SamplerState* const samplers[], NS::Range range);\n\n    void         setFragmentSamplerState(const class SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index);\n\n    void         setFragmentSamplerStates(const class SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range);\n\n    void         setFragmentVisibleFunctionTable(const class VisibleFunctionTable* functionTable, NS::UInteger bufferIndex);\n\n    void         setFragmentVisibleFunctionTables(const class VisibleFunctionTable* const functionTables[], NS::Range range);\n\n    void         setFragmentIntersectionFunctionTable(const class IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex);\n\n    void         setFragmentIntersectionFunctionTables(const class IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range);\n\n    void         setFragmentAccelerationStructure(const class AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex);\n\n    void         setBlendColor(float red, float green, float blue, float alpha);\n\n    void         setDepthStencilState(const class DepthStencilState* depthStencilState);\n\n    void         setStencilReferenceValue(uint32_t referenceValue);\n\n    void         setStencilReferenceValues(uint32_t frontReferenceValue, uint32_t backReferenceValue);\n\n    void         setVisibilityResultMode(MTL::VisibilityResultMode mode, NS::UInteger offset);\n\n    void         setColorStoreAction(MTL::StoreAction storeAction, NS::UInteger colorAttachmentIndex);\n\n    void         setDepthStoreAction(MTL::StoreAction storeAction);\n\n    void         setStencilStoreAction(MTL::StoreAction storeAction);\n\n    void         setColorStoreActionOptions(MTL::StoreActionOptions storeActionOptions, NS::UInteger colorAttachmentIndex);\n\n    void         setDepthStoreActionOptions(MTL::StoreActionOptions storeActionOptions);\n\n    void         setStencilStoreActionOptions(MTL::StoreActionOptions storeActionOptions);\n\n    void         setObjectBytes(const void* bytes, NS::UInteger length, NS::UInteger index);\n\n    void         setObjectBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index);\n\n    void         setObjectBufferOffset(NS::UInteger offset, NS::UInteger index);\n\n    void         setObjectBuffers(const class Buffer* const buffers[], const NS::UInteger* offsets, NS::Range range);\n\n    void         setObjectTexture(const class Texture* texture, NS::UInteger index);\n\n    void         setObjectTextures(const class Texture* const textures[], NS::Range range);\n\n    void         setObjectSamplerState(const class SamplerState* sampler, NS::UInteger index);\n\n    void         setObjectSamplerStates(const class SamplerState* const samplers[], NS::Range range);\n\n    void         setObjectSamplerState(const class SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index);\n\n    void         setObjectSamplerStates(const class SamplerState* const samplers[], const float* lodMinClamps, const float* lodMaxClamps, NS::Range range);\n\n    void         setObjectThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index);\n\n    void         setMeshBytes(const void* bytes, NS::UInteger length, NS::UInteger index);\n\n    void         setMeshBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index);\n\n    void         setMeshBufferOffset(NS::UInteger offset, NS::UInteger index);\n\n    void         setMeshBuffers(const class Buffer* const buffers[], const NS::UInteger* offsets, NS::Range range);\n\n    void         setMeshTexture(const class Texture* texture, NS::UInteger index);\n\n    void         setMeshTextures(const class Texture* const textures[], NS::Range range);\n\n    void         setMeshSamplerState(const class SamplerState* sampler, NS::UInteger index);\n\n    void         setMeshSamplerStates(const class SamplerState* const samplers[], NS::Range range);\n\n    void         setMeshSamplerState(const class SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index);\n\n    void         setMeshSamplerStates(const class SamplerState* const samplers[], const float* lodMinClamps, const float* lodMaxClamps, NS::Range range);\n\n    void         drawMeshThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup);\n\n    void         drawMeshThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup);\n\n    void         drawMeshThreadgroups(const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup);\n\n    void         drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount, NS::UInteger instanceCount);\n\n    void         drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount);\n\n    void         drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const class Buffer* indexBuffer, NS::UInteger indexBufferOffset, NS::UInteger instanceCount);\n\n    void         drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const class Buffer* indexBuffer, NS::UInteger indexBufferOffset);\n\n    void         drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount, NS::UInteger instanceCount, NS::UInteger baseInstance);\n\n    void         drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const class Buffer* indexBuffer, NS::UInteger indexBufferOffset, NS::UInteger instanceCount, NS::Integer baseVertex, NS::UInteger baseInstance);\n\n    void         drawPrimitives(MTL::PrimitiveType primitiveType, const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset);\n\n    void         drawIndexedPrimitives(MTL::PrimitiveType primitiveType, MTL::IndexType indexType, const class Buffer* indexBuffer, NS::UInteger indexBufferOffset, const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset);\n\n    void         textureBarrier();\n\n    void         updateFence(const class Fence* fence, MTL::RenderStages stages);\n\n    void         waitForFence(const class Fence* fence, MTL::RenderStages stages);\n\n    void         setTessellationFactorBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger instanceStride);\n\n    void         setTessellationFactorScale(float scale);\n\n    void         drawPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const class Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance);\n\n    void         drawPatches(NS::UInteger numberOfPatchControlPoints, const class Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset);\n\n    void         drawIndexedPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const class Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const class Buffer* controlPointIndexBuffer, NS::UInteger controlPointIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance);\n\n    void         drawIndexedPatches(NS::UInteger numberOfPatchControlPoints, const class Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const class Buffer* controlPointIndexBuffer, NS::UInteger controlPointIndexBufferOffset, const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset);\n\n    NS::UInteger tileWidth() const;\n\n    NS::UInteger tileHeight() const;\n\n    void         setTileBytes(const void* bytes, NS::UInteger length, NS::UInteger index);\n\n    void         setTileBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index);\n\n    void         setTileBufferOffset(NS::UInteger offset, NS::UInteger index);\n\n    void         setTileBuffers(const class Buffer* const buffers[], const NS::UInteger* offsets, NS::Range range);\n\n    void         setTileTexture(const class Texture* texture, NS::UInteger index);\n\n    void         setTileTextures(const class Texture* const textures[], NS::Range range);\n\n    void         setTileSamplerState(const class SamplerState* sampler, NS::UInteger index);\n\n    void         setTileSamplerStates(const class SamplerState* const samplers[], NS::Range range);\n\n    void         setTileSamplerState(const class SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index);\n\n    void         setTileSamplerStates(const class SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range);\n\n    void         setTileVisibleFunctionTable(const class VisibleFunctionTable* functionTable, NS::UInteger bufferIndex);\n\n    void         setTileVisibleFunctionTables(const class VisibleFunctionTable* const functionTables[], NS::Range range);\n\n    void         setTileIntersectionFunctionTable(const class IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex);\n\n    void         setTileIntersectionFunctionTables(const class IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range);\n\n    void         setTileAccelerationStructure(const class AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex);\n\n    void         dispatchThreadsPerTile(MTL::Size threadsPerTile);\n\n    void         setThreadgroupMemoryLength(NS::UInteger length, NS::UInteger offset, NS::UInteger index);\n\n    void         useResource(const class Resource* resource, MTL::ResourceUsage usage);\n\n    void         useResources(const class Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage);\n\n    void         useResource(const class Resource* resource, MTL::ResourceUsage usage, MTL::RenderStages stages);\n\n    void         useResources(const class Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage, MTL::RenderStages stages);\n\n    void         useHeap(const class Heap* heap);\n\n    void         useHeaps(const class Heap* const heaps[], NS::UInteger count);\n\n    void         useHeap(const class Heap* heap, MTL::RenderStages stages);\n\n    void         useHeaps(const class Heap* const heaps[], NS::UInteger count, MTL::RenderStages stages);\n\n    void         executeCommandsInBuffer(const class IndirectCommandBuffer* indirectCommandBuffer, NS::Range executionRange);\n\n    void         executeCommandsInBuffer(const class IndirectCommandBuffer* indirectCommandbuffer, const class Buffer* indirectRangeBuffer, NS::UInteger indirectBufferOffset);\n\n    void         memoryBarrier(MTL::BarrierScope scope, MTL::RenderStages after, MTL::RenderStages before);\n\n    void         memoryBarrier(const class Resource* const resources[], NS::UInteger count, MTL::RenderStages after, MTL::RenderStages before);\n\n    void         sampleCountersInBuffer(const class CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier);\n};\n\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setRenderPipelineState(const MTL::RenderPipelineState* pipelineState)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRenderPipelineState_), pipelineState);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexBytes(const void* bytes, NS::UInteger length, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexBytes_length_atIndex_), bytes, length, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexBuffer_offset_atIndex_), buffer, offset, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexBufferOffset(NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexBufferOffset_atIndex_), offset, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexBuffers(const MTL::Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexBuffers_offsets_withRange_), buffers, offsets, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexBuffer_offset_attributeStride_atIndex_), buffer, offset, stride, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexBuffers(const MTL::Buffer* const buffers[], const NS::UInteger* offsets, const NS::UInteger* strides, NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexBuffers_offsets_attributeStrides_withRange_), buffers, offsets, strides, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexBufferOffset(NS::UInteger offset, NS::UInteger stride, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexBufferOffset_attributeStride_atIndex_), offset, stride, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexBytes(const void* bytes, NS::UInteger length, NS::UInteger stride, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexBytes_length_attributeStride_atIndex_), bytes, length, stride, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexTexture(const MTL::Texture* texture, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexTexture_atIndex_), texture, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexTextures(const MTL::Texture* const textures[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexTextures_withRange_), textures, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexSamplerState(const MTL::SamplerState* sampler, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexSamplerState_atIndex_), sampler, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexSamplerStates_withRange_), samplers, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexSamplerState(const MTL::SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexSamplerState_lodMinClamp_lodMaxClamp_atIndex_), sampler, lodMinClamp, lodMaxClamp, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexSamplerStates(const MTL::SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexSamplerStates_lodMinClamps_lodMaxClamps_withRange_), samplers, lodMinClamps, lodMaxClamps, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexVisibleFunctionTable(const MTL::VisibleFunctionTable* functionTable, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexVisibleFunctionTable_atBufferIndex_), functionTable, bufferIndex);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexVisibleFunctionTables(const MTL::VisibleFunctionTable* const functionTables[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexVisibleFunctionTables_withBufferRange_), functionTables, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexIntersectionFunctionTable(const MTL::IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexIntersectionFunctionTable_atBufferIndex_), intersectionFunctionTable, bufferIndex);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexIntersectionFunctionTables(const MTL::IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexIntersectionFunctionTables_withBufferRange_), intersectionFunctionTables, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexAccelerationStructure(const MTL::AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexAccelerationStructure_atBufferIndex_), accelerationStructure, bufferIndex);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setViewport(MTL::Viewport viewport)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setViewport_), viewport);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setViewports(const MTL::Viewport* viewports, NS::UInteger count)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setViewports_count_), viewports, count);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFrontFacingWinding(MTL::Winding frontFacingWinding)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFrontFacingWinding_), frontFacingWinding);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVertexAmplificationCount(NS::UInteger count, const MTL::VertexAmplificationViewMapping* viewMappings)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexAmplificationCount_viewMappings_), count, viewMappings);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setCullMode(MTL::CullMode cullMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCullMode_), cullMode);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setDepthClipMode(MTL::DepthClipMode depthClipMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthClipMode_), depthClipMode);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setDepthBias(float depthBias, float slopeScale, float clamp)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthBias_slopeScale_clamp_), depthBias, slopeScale, clamp);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setScissorRect(MTL::ScissorRect rect)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setScissorRect_), rect);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setScissorRects(const MTL::ScissorRect* scissorRects, NS::UInteger count)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setScissorRects_count_), scissorRects, count);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTriangleFillMode(MTL::TriangleFillMode fillMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTriangleFillMode_), fillMode);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentBytes(const void* bytes, NS::UInteger length, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentBytes_length_atIndex_), bytes, length, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentBuffer_offset_atIndex_), buffer, offset, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentBufferOffset(NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentBufferOffset_atIndex_), offset, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentBuffers(const MTL::Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentBuffers_offsets_withRange_), buffers, offsets, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentTexture(const MTL::Texture* texture, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentTexture_atIndex_), texture, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentTextures(const MTL::Texture* const textures[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentTextures_withRange_), textures, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentSamplerState(const MTL::SamplerState* sampler, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentSamplerState_atIndex_), sampler, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentSamplerStates_withRange_), samplers, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentSamplerState(const MTL::SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentSamplerState_lodMinClamp_lodMaxClamp_atIndex_), sampler, lodMinClamp, lodMaxClamp, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentSamplerStates(const MTL::SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentSamplerStates_lodMinClamps_lodMaxClamps_withRange_), samplers, lodMinClamps, lodMaxClamps, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentVisibleFunctionTable(const MTL::VisibleFunctionTable* functionTable, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentVisibleFunctionTable_atBufferIndex_), functionTable, bufferIndex);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentVisibleFunctionTables(const MTL::VisibleFunctionTable* const functionTables[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentVisibleFunctionTables_withBufferRange_), functionTables, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentIntersectionFunctionTable(const MTL::IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentIntersectionFunctionTable_atBufferIndex_), intersectionFunctionTable, bufferIndex);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentIntersectionFunctionTables(const MTL::IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentIntersectionFunctionTables_withBufferRange_), intersectionFunctionTables, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setFragmentAccelerationStructure(const MTL::AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentAccelerationStructure_atBufferIndex_), accelerationStructure, bufferIndex);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setBlendColor(float red, float green, float blue, float alpha)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBlendColorRed_green_blue_alpha_), red, green, blue, alpha);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setDepthStencilState(const MTL::DepthStencilState* depthStencilState)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthStencilState_), depthStencilState);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setStencilReferenceValue(uint32_t referenceValue)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStencilReferenceValue_), referenceValue);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setStencilReferenceValues(uint32_t frontReferenceValue, uint32_t backReferenceValue)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStencilFrontReferenceValue_backReferenceValue_), frontReferenceValue, backReferenceValue);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setVisibilityResultMode(MTL::VisibilityResultMode mode, NS::UInteger offset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVisibilityResultMode_offset_), mode, offset);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setColorStoreAction(MTL::StoreAction storeAction, NS::UInteger colorAttachmentIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setColorStoreAction_atIndex_), storeAction, colorAttachmentIndex);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setDepthStoreAction(MTL::StoreAction storeAction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthStoreAction_), storeAction);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setStencilStoreAction(MTL::StoreAction storeAction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStencilStoreAction_), storeAction);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setColorStoreActionOptions(MTL::StoreActionOptions storeActionOptions, NS::UInteger colorAttachmentIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setColorStoreActionOptions_atIndex_), storeActionOptions, colorAttachmentIndex);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setDepthStoreActionOptions(MTL::StoreActionOptions storeActionOptions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthStoreActionOptions_), storeActionOptions);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setStencilStoreActionOptions(MTL::StoreActionOptions storeActionOptions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStencilStoreActionOptions_), storeActionOptions);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setObjectBytes(const void* bytes, NS::UInteger length, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectBytes_length_atIndex_), bytes, length, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setObjectBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectBuffer_offset_atIndex_), buffer, offset, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setObjectBufferOffset(NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectBufferOffset_atIndex_), offset, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setObjectBuffers(const MTL::Buffer* const buffers[], const NS::UInteger* offsets, NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectBuffers_offsets_withRange_), buffers, offsets, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setObjectTexture(const MTL::Texture* texture, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectTexture_atIndex_), texture, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setObjectTextures(const MTL::Texture* const textures[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectTextures_withRange_), textures, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setObjectSamplerState(const MTL::SamplerState* sampler, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectSamplerState_atIndex_), sampler, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setObjectSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectSamplerStates_withRange_), samplers, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setObjectSamplerState(const MTL::SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectSamplerState_lodMinClamp_lodMaxClamp_atIndex_), sampler, lodMinClamp, lodMaxClamp, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setObjectSamplerStates(const MTL::SamplerState* const samplers[], const float* lodMinClamps, const float* lodMaxClamps, NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectSamplerStates_lodMinClamps_lodMaxClamps_withRange_), samplers, lodMinClamps, lodMaxClamps, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setObjectThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectThreadgroupMemoryLength_atIndex_), length, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setMeshBytes(const void* bytes, NS::UInteger length, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshBytes_length_atIndex_), bytes, length, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setMeshBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshBuffer_offset_atIndex_), buffer, offset, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setMeshBufferOffset(NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshBufferOffset_atIndex_), offset, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setMeshBuffers(const MTL::Buffer* const buffers[], const NS::UInteger* offsets, NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshBuffers_offsets_withRange_), buffers, offsets, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setMeshTexture(const MTL::Texture* texture, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshTexture_atIndex_), texture, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setMeshTextures(const MTL::Texture* const textures[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshTextures_withRange_), textures, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setMeshSamplerState(const MTL::SamplerState* sampler, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshSamplerState_atIndex_), sampler, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setMeshSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshSamplerStates_withRange_), samplers, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setMeshSamplerState(const MTL::SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshSamplerState_lodMinClamp_lodMaxClamp_atIndex_), sampler, lodMinClamp, lodMaxClamp, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setMeshSamplerStates(const MTL::SamplerState* const samplers[], const float* lodMinClamps, const float* lodMaxClamps, NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshSamplerStates_lodMinClamps_lodMaxClamps_withRange_), samplers, lodMinClamps, lodMaxClamps, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawMeshThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawMeshThreadgroups_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_), threadgroupsPerGrid, threadsPerObjectThreadgroup, threadsPerMeshThreadgroup);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawMeshThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawMeshThreads_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_), threadsPerGrid, threadsPerObjectThreadgroup, threadsPerMeshThreadgroup);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawMeshThreadgroups(const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawMeshThreadgroupsWithIndirectBuffer_indirectBufferOffset_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_), indirectBuffer, indirectBufferOffset, threadsPerObjectThreadgroup, threadsPerMeshThreadgroup);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount, NS::UInteger instanceCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawPrimitives_vertexStart_vertexCount_instanceCount_), primitiveType, vertexStart, vertexCount, instanceCount);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawPrimitives_vertexStart_vertexCount_), primitiveType, vertexStart, vertexCount);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const MTL::Buffer* indexBuffer, NS::UInteger indexBufferOffset, NS::UInteger instanceCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_instanceCount_), primitiveType, indexCount, indexType, indexBuffer, indexBufferOffset, instanceCount);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const MTL::Buffer* indexBuffer, NS::UInteger indexBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_), primitiveType, indexCount, indexType, indexBuffer, indexBufferOffset);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount, NS::UInteger instanceCount, NS::UInteger baseInstance)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawPrimitives_vertexStart_vertexCount_instanceCount_baseInstance_), primitiveType, vertexStart, vertexCount, instanceCount, baseInstance);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const MTL::Buffer* indexBuffer, NS::UInteger indexBufferOffset, NS::UInteger instanceCount, NS::Integer baseVertex, NS::UInteger baseInstance)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_instanceCount_baseVertex_baseInstance_), primitiveType, indexCount, indexType, indexBuffer, indexBufferOffset, instanceCount, baseVertex, baseInstance);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawPrimitives(MTL::PrimitiveType primitiveType, const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawPrimitives_indirectBuffer_indirectBufferOffset_), primitiveType, indirectBuffer, indirectBufferOffset);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawIndexedPrimitives(MTL::PrimitiveType primitiveType, MTL::IndexType indexType, const MTL::Buffer* indexBuffer, NS::UInteger indexBufferOffset, const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawIndexedPrimitives_indexType_indexBuffer_indexBufferOffset_indirectBuffer_indirectBufferOffset_), primitiveType, indexType, indexBuffer, indexBufferOffset, indirectBuffer, indirectBufferOffset);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::textureBarrier()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(textureBarrier));\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::updateFence(const MTL::Fence* fence, MTL::RenderStages stages)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(updateFence_afterStages_), fence, stages);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::waitForFence(const MTL::Fence* fence, MTL::RenderStages stages)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(waitForFence_beforeStages_), fence, stages);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTessellationFactorBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger instanceStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTessellationFactorBuffer_offset_instanceStride_), buffer, offset, instanceStride);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTessellationFactorScale(float scale)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTessellationFactorScale_), scale);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const MTL::Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_instanceCount_baseInstance_), numberOfPatchControlPoints, patchStart, patchCount, patchIndexBuffer, patchIndexBufferOffset, instanceCount, baseInstance);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawPatches(NS::UInteger numberOfPatchControlPoints, const MTL::Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawPatches_patchIndexBuffer_patchIndexBufferOffset_indirectBuffer_indirectBufferOffset_), numberOfPatchControlPoints, patchIndexBuffer, patchIndexBufferOffset, indirectBuffer, indirectBufferOffset);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawIndexedPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const MTL::Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const MTL::Buffer* controlPointIndexBuffer, NS::UInteger controlPointIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawIndexedPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_controlPointIndexBuffer_controlPointIndexBufferOffset_instanceCount_baseInstance_), numberOfPatchControlPoints, patchStart, patchCount, patchIndexBuffer, patchIndexBufferOffset, controlPointIndexBuffer, controlPointIndexBufferOffset, instanceCount, baseInstance);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::drawIndexedPatches(NS::UInteger numberOfPatchControlPoints, const MTL::Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const MTL::Buffer* controlPointIndexBuffer, NS::UInteger controlPointIndexBufferOffset, const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawIndexedPatches_patchIndexBuffer_patchIndexBufferOffset_controlPointIndexBuffer_controlPointIndexBufferOffset_indirectBuffer_indirectBufferOffset_), numberOfPatchControlPoints, patchIndexBuffer, patchIndexBufferOffset, controlPointIndexBuffer, controlPointIndexBufferOffset, indirectBuffer, indirectBufferOffset);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderCommandEncoder::tileWidth() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(tileWidth));\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderCommandEncoder::tileHeight() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(tileHeight));\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileBytes(const void* bytes, NS::UInteger length, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileBytes_length_atIndex_), bytes, length, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileBuffer_offset_atIndex_), buffer, offset, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileBufferOffset(NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileBufferOffset_atIndex_), offset, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileBuffers(const MTL::Buffer* const buffers[], const NS::UInteger* offsets, NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileBuffers_offsets_withRange_), buffers, offsets, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileTexture(const MTL::Texture* texture, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileTexture_atIndex_), texture, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileTextures(const MTL::Texture* const textures[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileTextures_withRange_), textures, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileSamplerState(const MTL::SamplerState* sampler, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileSamplerState_atIndex_), sampler, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileSamplerStates_withRange_), samplers, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileSamplerState(const MTL::SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileSamplerState_lodMinClamp_lodMaxClamp_atIndex_), sampler, lodMinClamp, lodMaxClamp, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileSamplerStates(const MTL::SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileSamplerStates_lodMinClamps_lodMaxClamps_withRange_), samplers, lodMinClamps, lodMaxClamps, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileVisibleFunctionTable(const MTL::VisibleFunctionTable* functionTable, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileVisibleFunctionTable_atBufferIndex_), functionTable, bufferIndex);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileVisibleFunctionTables(const MTL::VisibleFunctionTable* const functionTables[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileVisibleFunctionTables_withBufferRange_), functionTables, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileIntersectionFunctionTable(const MTL::IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileIntersectionFunctionTable_atBufferIndex_), intersectionFunctionTable, bufferIndex);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileIntersectionFunctionTables(const MTL::IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileIntersectionFunctionTables_withBufferRange_), intersectionFunctionTables, range);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setTileAccelerationStructure(const MTL::AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileAccelerationStructure_atBufferIndex_), accelerationStructure, bufferIndex);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::dispatchThreadsPerTile(MTL::Size threadsPerTile)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(dispatchThreadsPerTile_), threadsPerTile);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::setThreadgroupMemoryLength(NS::UInteger length, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setThreadgroupMemoryLength_offset_atIndex_), length, offset, index);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::useResource(const MTL::Resource* resource, MTL::ResourceUsage usage)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useResource_usage_), resource, usage);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::useResources(const MTL::Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useResources_count_usage_), resources, count, usage);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::useResource(const MTL::Resource* resource, MTL::ResourceUsage usage, MTL::RenderStages stages)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useResource_usage_stages_), resource, usage, stages);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::useResources(const MTL::Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage, MTL::RenderStages stages)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useResources_count_usage_stages_), resources, count, usage, stages);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::useHeap(const MTL::Heap* heap)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useHeap_), heap);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::useHeaps(const MTL::Heap* const heaps[], NS::UInteger count)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useHeaps_count_), heaps, count);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::useHeap(const MTL::Heap* heap, MTL::RenderStages stages)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useHeap_stages_), heap, stages);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::useHeaps(const MTL::Heap* const heaps[], NS::UInteger count, MTL::RenderStages stages)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(useHeaps_count_stages_), heaps, count, stages);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::executeCommandsInBuffer(const MTL::IndirectCommandBuffer* indirectCommandBuffer, NS::Range executionRange)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(executeCommandsInBuffer_withRange_), indirectCommandBuffer, executionRange);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::executeCommandsInBuffer(const MTL::IndirectCommandBuffer* indirectCommandbuffer, const MTL::Buffer* indirectRangeBuffer, NS::UInteger indirectBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(executeCommandsInBuffer_indirectBuffer_indirectBufferOffset_), indirectCommandbuffer, indirectRangeBuffer, indirectBufferOffset);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::memoryBarrier(MTL::BarrierScope scope, MTL::RenderStages after, MTL::RenderStages before)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(memoryBarrierWithScope_afterStages_beforeStages_), scope, after, before);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::memoryBarrier(const MTL::Resource* const resources[], NS::UInteger count, MTL::RenderStages after, MTL::RenderStages before)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(memoryBarrierWithResources_count_afterStages_beforeStages_), resources, count, after, before);\n}\n\n_MTL_INLINE void MTL::RenderCommandEncoder::sampleCountersInBuffer(const MTL::CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(sampleCountersInBuffer_atSampleIndex_withBarrier_), sampleBuffer, sampleIndex, barrier);\n}\n\nnamespace MTL\n{\nclass IndirectRenderCommand : public NS::Referencing<IndirectRenderCommand>\n{\npublic:\n    void setRenderPipelineState(const class RenderPipelineState* pipelineState);\n\n    void setVertexBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index);\n\n    void setFragmentBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index);\n\n    void setVertexBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index);\n\n    void drawPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const class Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance, const class Buffer* buffer, NS::UInteger offset, NS::UInteger instanceStride);\n\n    void drawIndexedPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const class Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const class Buffer* controlPointIndexBuffer, NS::UInteger controlPointIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance, const class Buffer* buffer, NS::UInteger offset, NS::UInteger instanceStride);\n\n    void drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount, NS::UInteger instanceCount, NS::UInteger baseInstance);\n\n    void drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const class Buffer* indexBuffer, NS::UInteger indexBufferOffset, NS::UInteger instanceCount, NS::Integer baseVertex, NS::UInteger baseInstance);\n\n    void setObjectThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index);\n\n    void setObjectBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index);\n\n    void setMeshBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index);\n\n    void drawMeshThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup);\n\n    void drawMeshThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup);\n\n    void setBarrier();\n\n    void clearBarrier();\n\n    void reset();\n};\n\nclass IndirectComputeCommand : public NS::Referencing<IndirectComputeCommand>\n{\npublic:\n    void setComputePipelineState(const class ComputePipelineState* pipelineState);\n\n    void setKernelBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index);\n\n    void setKernelBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index);\n\n    void concurrentDispatchThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerThreadgroup);\n\n    void concurrentDispatchThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerThreadgroup);\n\n    void setBarrier();\n\n    void clearBarrier();\n\n    void setImageblockWidth(NS::UInteger width, NS::UInteger height);\n\n    void reset();\n\n    void setThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index);\n\n    void setStageInRegion(MTL::Region region);\n};\n\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::setRenderPipelineState(const MTL::RenderPipelineState* pipelineState)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRenderPipelineState_), pipelineState);\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::setVertexBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexBuffer_offset_atIndex_), buffer, offset, index);\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::setFragmentBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentBuffer_offset_atIndex_), buffer, offset, index);\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::setVertexBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexBuffer_offset_attributeStride_atIndex_), buffer, offset, stride, index);\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::drawPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const MTL::Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance, const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger instanceStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_instanceCount_baseInstance_tessellationFactorBuffer_tessellationFactorBufferOffset_tessellationFactorBufferInstanceStride_), numberOfPatchControlPoints, patchStart, patchCount, patchIndexBuffer, patchIndexBufferOffset, instanceCount, baseInstance, buffer, offset, instanceStride);\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::drawIndexedPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const MTL::Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const MTL::Buffer* controlPointIndexBuffer, NS::UInteger controlPointIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance, const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger instanceStride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawIndexedPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_controlPointIndexBuffer_controlPointIndexBufferOffset_instanceCount_baseInstance_tessellationFactorBuffer_tessellationFactorBufferOffset_tessellationFactorBufferInstanceStride_), numberOfPatchControlPoints, patchStart, patchCount, patchIndexBuffer, patchIndexBufferOffset, controlPointIndexBuffer, controlPointIndexBufferOffset, instanceCount, baseInstance, buffer, offset, instanceStride);\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount, NS::UInteger instanceCount, NS::UInteger baseInstance)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawPrimitives_vertexStart_vertexCount_instanceCount_baseInstance_), primitiveType, vertexStart, vertexCount, instanceCount, baseInstance);\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const MTL::Buffer* indexBuffer, NS::UInteger indexBufferOffset, NS::UInteger instanceCount, NS::Integer baseVertex, NS::UInteger baseInstance)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_instanceCount_baseVertex_baseInstance_), primitiveType, indexCount, indexType, indexBuffer, indexBufferOffset, instanceCount, baseVertex, baseInstance);\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::setObjectThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectThreadgroupMemoryLength_atIndex_), length, index);\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::setObjectBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectBuffer_offset_atIndex_), buffer, offset, index);\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::setMeshBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshBuffer_offset_atIndex_), buffer, offset, index);\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::drawMeshThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawMeshThreadgroups_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_), threadgroupsPerGrid, threadsPerObjectThreadgroup, threadsPerMeshThreadgroup);\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::drawMeshThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(drawMeshThreads_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_), threadsPerGrid, threadsPerObjectThreadgroup, threadsPerMeshThreadgroup);\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::setBarrier()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBarrier));\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::clearBarrier()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(clearBarrier));\n}\n\n_MTL_INLINE void MTL::IndirectRenderCommand::reset()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(reset));\n}\n\n_MTL_INLINE void MTL::IndirectComputeCommand::setComputePipelineState(const MTL::ComputePipelineState* pipelineState)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setComputePipelineState_), pipelineState);\n}\n\n_MTL_INLINE void MTL::IndirectComputeCommand::setKernelBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setKernelBuffer_offset_atIndex_), buffer, offset, index);\n}\n\n_MTL_INLINE void MTL::IndirectComputeCommand::setKernelBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setKernelBuffer_offset_attributeStride_atIndex_), buffer, offset, stride, index);\n}\n\n_MTL_INLINE void MTL::IndirectComputeCommand::concurrentDispatchThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(concurrentDispatchThreadgroups_threadsPerThreadgroup_), threadgroupsPerGrid, threadsPerThreadgroup);\n}\n\n_MTL_INLINE void MTL::IndirectComputeCommand::concurrentDispatchThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(concurrentDispatchThreads_threadsPerThreadgroup_), threadsPerGrid, threadsPerThreadgroup);\n}\n\n_MTL_INLINE void MTL::IndirectComputeCommand::setBarrier()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBarrier));\n}\n\n_MTL_INLINE void MTL::IndirectComputeCommand::clearBarrier()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(clearBarrier));\n}\n\n_MTL_INLINE void MTL::IndirectComputeCommand::setImageblockWidth(NS::UInteger width, NS::UInteger height)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setImageblockWidth_height_), width, height);\n}\n\n_MTL_INLINE void MTL::IndirectComputeCommand::reset()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(reset));\n}\n\n_MTL_INLINE void MTL::IndirectComputeCommand::setThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setThreadgroupMemoryLength_atIndex_), length, index);\n}\n\n_MTL_INLINE void MTL::IndirectComputeCommand::setStageInRegion(MTL::Region region)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStageInRegion_), region);\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_OPTIONS(NS::UInteger, IntersectionFunctionSignature) {\n    IntersectionFunctionSignatureNone = 0,\n    IntersectionFunctionSignatureInstancing = 1,\n    IntersectionFunctionSignatureTriangleData = 2,\n    IntersectionFunctionSignatureWorldSpaceData = 4,\n    IntersectionFunctionSignatureInstanceMotion = 8,\n    IntersectionFunctionSignaturePrimitiveMotion = 16,\n    IntersectionFunctionSignatureExtendedLimits = 32,\n    IntersectionFunctionSignatureMaxLevels = 64,\n    IntersectionFunctionSignatureCurveData = 128,\n};\n\nclass IntersectionFunctionTableDescriptor : public NS::Copying<IntersectionFunctionTableDescriptor>\n{\npublic:\n    static class IntersectionFunctionTableDescriptor* alloc();\n\n    class IntersectionFunctionTableDescriptor*        init();\n\n    static class IntersectionFunctionTableDescriptor* intersectionFunctionTableDescriptor();\n\n    NS::UInteger                                      functionCount() const;\n    void                                              setFunctionCount(NS::UInteger functionCount);\n};\n\nclass IntersectionFunctionTable : public NS::Referencing<IntersectionFunctionTable, Resource>\n{\npublic:\n    void            setBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index);\n\n    void            setBuffers(const class Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range);\n\n    MTL::ResourceID gpuResourceID() const;\n\n    void            setFunction(const class FunctionHandle* function, NS::UInteger index);\n\n    void            setFunctions(const class FunctionHandle* const functions[], NS::Range range);\n\n    void            setOpaqueTriangleIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::UInteger index);\n\n    void            setOpaqueTriangleIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::Range range);\n\n    void            setOpaqueCurveIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::UInteger index);\n\n    void            setOpaqueCurveIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::Range range);\n\n    void            setVisibleFunctionTable(const class VisibleFunctionTable* functionTable, NS::UInteger bufferIndex);\n\n    void            setVisibleFunctionTables(const class VisibleFunctionTable* const functionTables[], NS::Range bufferRange);\n};\n\n}\n\n_MTL_INLINE MTL::IntersectionFunctionTableDescriptor* MTL::IntersectionFunctionTableDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::IntersectionFunctionTableDescriptor>(_MTL_PRIVATE_CLS(MTLIntersectionFunctionTableDescriptor));\n}\n\n_MTL_INLINE MTL::IntersectionFunctionTableDescriptor* MTL::IntersectionFunctionTableDescriptor::init()\n{\n    return NS::Object::init<MTL::IntersectionFunctionTableDescriptor>();\n}\n\n_MTL_INLINE MTL::IntersectionFunctionTableDescriptor* MTL::IntersectionFunctionTableDescriptor::intersectionFunctionTableDescriptor()\n{\n    return Object::sendMessage<MTL::IntersectionFunctionTableDescriptor*>(_MTL_PRIVATE_CLS(MTLIntersectionFunctionTableDescriptor), _MTL_PRIVATE_SEL(intersectionFunctionTableDescriptor));\n}\n\n_MTL_INLINE NS::UInteger MTL::IntersectionFunctionTableDescriptor::functionCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(functionCount));\n}\n\n_MTL_INLINE void MTL::IntersectionFunctionTableDescriptor::setFunctionCount(NS::UInteger functionCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFunctionCount_), functionCount);\n}\n\n_MTL_INLINE void MTL::IntersectionFunctionTable::setBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBuffer_offset_atIndex_), buffer, offset, index);\n}\n\n_MTL_INLINE void MTL::IntersectionFunctionTable::setBuffers(const MTL::Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBuffers_offsets_withRange_), buffers, offsets, range);\n}\n\n_MTL_INLINE MTL::ResourceID MTL::IntersectionFunctionTable::gpuResourceID() const\n{\n    return Object::sendMessage<MTL::ResourceID>(this, _MTL_PRIVATE_SEL(gpuResourceID));\n}\n\n_MTL_INLINE void MTL::IntersectionFunctionTable::setFunction(const MTL::FunctionHandle* function, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFunction_atIndex_), function, index);\n}\n\n_MTL_INLINE void MTL::IntersectionFunctionTable::setFunctions(const MTL::FunctionHandle* const functions[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFunctions_withRange_), functions, range);\n}\n\n_MTL_INLINE void MTL::IntersectionFunctionTable::setOpaqueTriangleIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setOpaqueTriangleIntersectionFunctionWithSignature_atIndex_), signature, index);\n}\n\n_MTL_INLINE void MTL::IntersectionFunctionTable::setOpaqueTriangleIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setOpaqueTriangleIntersectionFunctionWithSignature_withRange_), signature, range);\n}\n\n_MTL_INLINE void MTL::IntersectionFunctionTable::setOpaqueCurveIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setOpaqueCurveIntersectionFunctionWithSignature_atIndex_), signature, index);\n}\n\n_MTL_INLINE void MTL::IntersectionFunctionTable::setOpaqueCurveIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setOpaqueCurveIntersectionFunctionWithSignature_withRange_), signature, range);\n}\n\n_MTL_INLINE void MTL::IntersectionFunctionTable::setVisibleFunctionTable(const MTL::VisibleFunctionTable* functionTable, NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVisibleFunctionTable_atBufferIndex_), functionTable, bufferIndex);\n}\n\n_MTL_INLINE void MTL::IntersectionFunctionTable::setVisibleFunctionTables(const MTL::VisibleFunctionTable* const functionTables[], NS::Range bufferRange)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVisibleFunctionTables_withBufferRange_), functionTables, bufferRange);\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::Integer, IOStatus) {\n    IOStatusPending = 0,\n    IOStatusCancelled = 1,\n    IOStatusError = 2,\n    IOStatusComplete = 3,\n};\n\nusing IOCommandBufferHandler = void (^)(class IOCommandBuffer*);\n\nusing IOCommandBufferHandlerFunction = std::function<void(class IOCommandBuffer*)>;\n\nclass IOCommandBuffer : public NS::Referencing<IOCommandBuffer>\n{\npublic:\n    void          addCompletedHandler(const MTL::IOCommandBufferHandlerFunction& function);\n\n    void          addCompletedHandler(const MTL::IOCommandBufferHandler block);\n\n    void          loadBytes(const void* pointer, NS::UInteger size, const class IOFileHandle* sourceHandle, NS::UInteger sourceHandleOffset);\n\n    void          loadBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger size, const class IOFileHandle* sourceHandle, NS::UInteger sourceHandleOffset);\n\n    void          loadTexture(const class Texture* texture, NS::UInteger slice, NS::UInteger level, MTL::Size size, NS::UInteger sourceBytesPerRow, NS::UInteger sourceBytesPerImage, MTL::Origin destinationOrigin, const class IOFileHandle* sourceHandle, NS::UInteger sourceHandleOffset);\n\n    void          copyStatusToBuffer(const class Buffer* buffer, NS::UInteger offset);\n\n    void          commit();\n\n    void          waitUntilCompleted();\n\n    void          tryCancel();\n\n    void          addBarrier();\n\n    void          pushDebugGroup(const NS::String* string);\n\n    void          popDebugGroup();\n\n    void          enqueue();\n\n    void          wait(const class SharedEvent* event, uint64_t value);\n\n    void          signalEvent(const class SharedEvent* event, uint64_t value);\n\n    NS::String*   label() const;\n    void          setLabel(const NS::String* label);\n\n    MTL::IOStatus status() const;\n\n    NS::Error*    error() const;\n};\n\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::addCompletedHandler(const MTL::IOCommandBufferHandlerFunction& function)\n{\n    __block IOCommandBufferHandlerFunction blockFunction = function;\n\n    addCompletedHandler(^(IOCommandBuffer* pCommandBuffer) { blockFunction(pCommandBuffer); });\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::addCompletedHandler(const MTL::IOCommandBufferHandler block)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(addCompletedHandler_), block);\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::loadBytes(const void* pointer, NS::UInteger size, const MTL::IOFileHandle* sourceHandle, NS::UInteger sourceHandleOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(loadBytes_size_sourceHandle_sourceHandleOffset_), pointer, size, sourceHandle, sourceHandleOffset);\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::loadBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger size, const MTL::IOFileHandle* sourceHandle, NS::UInteger sourceHandleOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(loadBuffer_offset_size_sourceHandle_sourceHandleOffset_), buffer, offset, size, sourceHandle, sourceHandleOffset);\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::loadTexture(const MTL::Texture* texture, NS::UInteger slice, NS::UInteger level, MTL::Size size, NS::UInteger sourceBytesPerRow, NS::UInteger sourceBytesPerImage, MTL::Origin destinationOrigin, const MTL::IOFileHandle* sourceHandle, NS::UInteger sourceHandleOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(loadTexture_slice_level_size_sourceBytesPerRow_sourceBytesPerImage_destinationOrigin_sourceHandle_sourceHandleOffset_), texture, slice, level, size, sourceBytesPerRow, sourceBytesPerImage, destinationOrigin, sourceHandle, sourceHandleOffset);\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::copyStatusToBuffer(const MTL::Buffer* buffer, NS::UInteger offset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(copyStatusToBuffer_offset_), buffer, offset);\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::commit()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(commit));\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::waitUntilCompleted()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(waitUntilCompleted));\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::tryCancel()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(tryCancel));\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::addBarrier()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(addBarrier));\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::pushDebugGroup(const NS::String* string)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(pushDebugGroup_), string);\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::popDebugGroup()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(popDebugGroup));\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::enqueue()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(enqueue));\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::wait(const MTL::SharedEvent* event, uint64_t value)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(waitForEvent_value_), event, value);\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::signalEvent(const MTL::SharedEvent* event, uint64_t value)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(signalEvent_value_), event, value);\n}\n\n_MTL_INLINE NS::String* MTL::IOCommandBuffer::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::IOCommandBuffer::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::IOStatus MTL::IOCommandBuffer::status() const\n{\n    return Object::sendMessage<MTL::IOStatus>(this, _MTL_PRIVATE_SEL(status));\n}\n\n_MTL_INLINE NS::Error* MTL::IOCommandBuffer::error() const\n{\n    return Object::sendMessage<NS::Error*>(this, _MTL_PRIVATE_SEL(error));\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::Integer, IOPriority) {\n    IOPriorityHigh = 0,\n    IOPriorityNormal = 1,\n    IOPriorityLow = 2,\n};\n\n_MTL_ENUM(NS::Integer, IOCommandQueueType) {\n    IOCommandQueueTypeConcurrent = 0,\n    IOCommandQueueTypeSerial = 1,\n};\n\n_MTL_CONST(NS::ErrorDomain, IOErrorDomain);\n\n_MTL_ENUM(NS::Integer, IOError) {\n    IOErrorURLInvalid = 1,\n    IOErrorInternal = 2,\n};\n\nclass IOCommandQueue : public NS::Referencing<IOCommandQueue>\n{\npublic:\n    void                   enqueueBarrier();\n\n    class IOCommandBuffer* commandBuffer();\n\n    class IOCommandBuffer* commandBufferWithUnretainedReferences();\n\n    NS::String*            label() const;\n    void                   setLabel(const NS::String* label);\n};\n\nclass IOScratchBuffer : public NS::Referencing<IOScratchBuffer>\n{\npublic:\n    class Buffer* buffer() const;\n};\n\nclass IOScratchBufferAllocator : public NS::Referencing<IOScratchBufferAllocator>\n{\npublic:\n    class IOScratchBuffer* newScratchBuffer(NS::UInteger minimumSize);\n};\n\nclass IOCommandQueueDescriptor : public NS::Copying<IOCommandQueueDescriptor>\n{\npublic:\n    static class IOCommandQueueDescriptor* alloc();\n\n    class IOCommandQueueDescriptor*        init();\n\n    NS::UInteger                           maxCommandBufferCount() const;\n    void                                   setMaxCommandBufferCount(NS::UInteger maxCommandBufferCount);\n\n    MTL::IOPriority                        priority() const;\n    void                                   setPriority(MTL::IOPriority priority);\n\n    MTL::IOCommandQueueType                type() const;\n    void                                   setType(MTL::IOCommandQueueType type);\n\n    NS::UInteger                           maxCommandsInFlight() const;\n    void                                   setMaxCommandsInFlight(NS::UInteger maxCommandsInFlight);\n\n    class IOScratchBufferAllocator*        scratchBufferAllocator() const;\n    void                                   setScratchBufferAllocator(const class IOScratchBufferAllocator* scratchBufferAllocator);\n};\n\nclass IOFileHandle : public NS::Referencing<IOFileHandle>\n{\npublic:\n    NS::String* label() const;\n    void        setLabel(const NS::String* label);\n};\n\n}\n\n_MTL_PRIVATE_DEF_WEAK_CONST(NS::ErrorDomain, IOErrorDomain);\n\n_MTL_INLINE void MTL::IOCommandQueue::enqueueBarrier()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(enqueueBarrier));\n}\n\n_MTL_INLINE MTL::IOCommandBuffer* MTL::IOCommandQueue::commandBuffer()\n{\n    return Object::sendMessage<MTL::IOCommandBuffer*>(this, _MTL_PRIVATE_SEL(commandBuffer));\n}\n\n_MTL_INLINE MTL::IOCommandBuffer* MTL::IOCommandQueue::commandBufferWithUnretainedReferences()\n{\n    return Object::sendMessage<MTL::IOCommandBuffer*>(this, _MTL_PRIVATE_SEL(commandBufferWithUnretainedReferences));\n}\n\n_MTL_INLINE NS::String* MTL::IOCommandQueue::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::IOCommandQueue::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::Buffer* MTL::IOScratchBuffer::buffer() const\n{\n    return Object::sendMessage<MTL::Buffer*>(this, _MTL_PRIVATE_SEL(buffer));\n}\n\n_MTL_INLINE MTL::IOScratchBuffer* MTL::IOScratchBufferAllocator::newScratchBuffer(NS::UInteger minimumSize)\n{\n    return Object::sendMessage<MTL::IOScratchBuffer*>(this, _MTL_PRIVATE_SEL(newScratchBufferWithMinimumSize_), minimumSize);\n}\n\n_MTL_INLINE MTL::IOCommandQueueDescriptor* MTL::IOCommandQueueDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::IOCommandQueueDescriptor>(_MTL_PRIVATE_CLS(MTLIOCommandQueueDescriptor));\n}\n\n_MTL_INLINE MTL::IOCommandQueueDescriptor* MTL::IOCommandQueueDescriptor::init()\n{\n    return NS::Object::init<MTL::IOCommandQueueDescriptor>();\n}\n\n_MTL_INLINE NS::UInteger MTL::IOCommandQueueDescriptor::maxCommandBufferCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxCommandBufferCount));\n}\n\n_MTL_INLINE void MTL::IOCommandQueueDescriptor::setMaxCommandBufferCount(NS::UInteger maxCommandBufferCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxCommandBufferCount_), maxCommandBufferCount);\n}\n\n_MTL_INLINE MTL::IOPriority MTL::IOCommandQueueDescriptor::priority() const\n{\n    return Object::sendMessage<MTL::IOPriority>(this, _MTL_PRIVATE_SEL(priority));\n}\n\n_MTL_INLINE void MTL::IOCommandQueueDescriptor::setPriority(MTL::IOPriority priority)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPriority_), priority);\n}\n\n_MTL_INLINE MTL::IOCommandQueueType MTL::IOCommandQueueDescriptor::type() const\n{\n    return Object::sendMessage<MTL::IOCommandQueueType>(this, _MTL_PRIVATE_SEL(type));\n}\n\n_MTL_INLINE void MTL::IOCommandQueueDescriptor::setType(MTL::IOCommandQueueType type)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setType_), type);\n}\n\n_MTL_INLINE NS::UInteger MTL::IOCommandQueueDescriptor::maxCommandsInFlight() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxCommandsInFlight));\n}\n\n_MTL_INLINE void MTL::IOCommandQueueDescriptor::setMaxCommandsInFlight(NS::UInteger maxCommandsInFlight)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxCommandsInFlight_), maxCommandsInFlight);\n}\n\n_MTL_INLINE MTL::IOScratchBufferAllocator* MTL::IOCommandQueueDescriptor::scratchBufferAllocator() const\n{\n    return Object::sendMessage<MTL::IOScratchBufferAllocator*>(this, _MTL_PRIVATE_SEL(scratchBufferAllocator));\n}\n\n_MTL_INLINE void MTL::IOCommandQueueDescriptor::setScratchBufferAllocator(const MTL::IOScratchBufferAllocator* scratchBufferAllocator)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setScratchBufferAllocator_), scratchBufferAllocator);\n}\n\n_MTL_INLINE NS::String* MTL::IOFileHandle::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::IOFileHandle::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n#pragma once\n\nnamespace MTL\n{\nusing IOCompresionContext=void*;\n\n_MTL_ENUM(NS::Integer, IOCompressionStatus) {\n    IOCompressionStatusComplete = 0,\n    IOCompressionStatusError = 1,\n};\n\nsize_t IOCompressionContextDefaultChunkSize();\n\nIOCompresionContext IOCreateCompressionContext(const char* path, IOCompressionMethod type, size_t chunkSize);\n\nvoid IOCompressionContextAppendData(IOCompresionContext context, const void* data, size_t size);\n\nIOCompressionStatus IOFlushAndDestroyCompressionContext(IOCompresionContext context);\n\n}\n\n#if defined(MTL_PRIVATE_IMPLEMENTATION)\n\nnamespace MTL::Private {\n\nMTL_DEF_FUNC(MTLIOCompressionContextDefaultChunkSize, size_t (*)(void));\n\nMTL_DEF_FUNC( MTLIOCreateCompressionContext, void* (*)(const char*, MTL::IOCompressionMethod, size_t) );\n\nMTL_DEF_FUNC( MTLIOCompressionContextAppendData, void (*)(void*, const void*, size_t) );\n\nMTL_DEF_FUNC( MTLIOFlushAndDestroyCompressionContext, MTL::IOCompressionStatus (*)(void*) );\n\n}\n\n_NS_EXPORT size_t MTL::IOCompressionContextDefaultChunkSize()\n{\n    return MTL::Private::MTLIOCompressionContextDefaultChunkSize();\n}\n\n_NS_EXPORT void* MTL::IOCreateCompressionContext(const char* path, IOCompressionMethod type, size_t chunkSize)\n{\n    if ( MTL::Private::MTLIOCreateCompressionContext )\n    {\n        return MTL::Private::MTLIOCreateCompressionContext( path, type, chunkSize );\n    }\n    return nullptr;\n}\n\n_NS_EXPORT void MTL::IOCompressionContextAppendData(void* context, const void* data, size_t size)\n{\n    if ( MTL::Private::MTLIOCompressionContextAppendData )\n    {\n        MTL::Private::MTLIOCompressionContextAppendData( context, data, size );\n    }\n}\n\n_NS_EXPORT MTL::IOCompressionStatus MTL::IOFlushAndDestroyCompressionContext(void* context)\n{\n    if ( MTL::Private::MTLIOFlushAndDestroyCompressionContext )\n    {\n        return MTL::Private::MTLIOFlushAndDestroyCompressionContext( context );\n    }\n    return MTL::IOCompressionStatusError;\n}\n\n#endif\n\n#pragma once\n\nnamespace MTL\n{\nclass LinkedFunctions : public NS::Copying<LinkedFunctions>\n{\npublic:\n    static class LinkedFunctions* alloc();\n\n    class LinkedFunctions*        init();\n\n    static class LinkedFunctions* linkedFunctions();\n\n    NS::Array*                    functions() const;\n    void                          setFunctions(const NS::Array* functions);\n\n    NS::Array*                    binaryFunctions() const;\n    void                          setBinaryFunctions(const NS::Array* binaryFunctions);\n\n    NS::Dictionary*               groups() const;\n    void                          setGroups(const NS::Dictionary* groups);\n\n    NS::Array*                    privateFunctions() const;\n    void                          setPrivateFunctions(const NS::Array* privateFunctions);\n};\n\n}\n\n_MTL_INLINE MTL::LinkedFunctions* MTL::LinkedFunctions::alloc()\n{\n    return NS::Object::alloc<MTL::LinkedFunctions>(_MTL_PRIVATE_CLS(MTLLinkedFunctions));\n}\n\n_MTL_INLINE MTL::LinkedFunctions* MTL::LinkedFunctions::init()\n{\n    return NS::Object::init<MTL::LinkedFunctions>();\n}\n\n_MTL_INLINE MTL::LinkedFunctions* MTL::LinkedFunctions::linkedFunctions()\n{\n    return Object::sendMessage<MTL::LinkedFunctions*>(_MTL_PRIVATE_CLS(MTLLinkedFunctions), _MTL_PRIVATE_SEL(linkedFunctions));\n}\n\n_MTL_INLINE NS::Array* MTL::LinkedFunctions::functions() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(functions));\n}\n\n_MTL_INLINE void MTL::LinkedFunctions::setFunctions(const NS::Array* functions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFunctions_), functions);\n}\n\n_MTL_INLINE NS::Array* MTL::LinkedFunctions::binaryFunctions() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(binaryFunctions));\n}\n\n_MTL_INLINE void MTL::LinkedFunctions::setBinaryFunctions(const NS::Array* binaryFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBinaryFunctions_), binaryFunctions);\n}\n\n_MTL_INLINE NS::Dictionary* MTL::LinkedFunctions::groups() const\n{\n    return Object::sendMessage<NS::Dictionary*>(this, _MTL_PRIVATE_SEL(groups));\n}\n\n_MTL_INLINE void MTL::LinkedFunctions::setGroups(const NS::Dictionary* groups)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setGroups_), groups);\n}\n\n_MTL_INLINE NS::Array* MTL::LinkedFunctions::privateFunctions() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(privateFunctions));\n}\n\n_MTL_INLINE void MTL::LinkedFunctions::setPrivateFunctions(const NS::Array* privateFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPrivateFunctions_), privateFunctions);\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::Integer, LogLevel) {\n    LogLevelUndefined = 0,\n    LogLevelDebug = 1,\n    LogLevelInfo = 2,\n    LogLevelNotice = 3,\n    LogLevelError = 4,\n    LogLevelFault = 5,\n};\n\nusing LogHandlerFunction = std::function<void(NS::String* subsystem, NS::String* category, MTL::LogLevel logLevel, NS::String* message)>;\n\nclass LogState : public NS::Referencing<LogState>\n{\npublic:\n    void addLogHandler(void (^block)(NS::String*, NS::String*, MTL::LogLevel, NS::String*));\n    void addLogHandler(const LogHandlerFunction& handler);\n};\n\nclass LogStateDescriptor : public NS::Copying<LogStateDescriptor>\n{\npublic:\n    static class LogStateDescriptor* alloc();\n\n    class LogStateDescriptor*        init();\n\n    MTL::LogLevel                    level() const;\n    void                             setLevel(MTL::LogLevel level);\n\n    NS::Integer                      bufferSize() const;\n    void                             setBufferSize(NS::Integer bufferSize);\n};\n\n_MTL_CONST(NS::ErrorDomain, LogStateErrorDomain);\n\n_MTL_ENUM(NS::UInteger, LogStateError) {\n    LogStateErrorInvalidSize = 1,\n    LogStateErrorInvalid = 2,\n};\n\n}\n\n_MTL_PRIVATE_DEF_WEAK_CONST(NS::ErrorDomain, LogStateErrorDomain);\n\n_MTL_INLINE void MTL::LogState::addLogHandler(void (^block)(NS::String*, NS::String*, MTL::LogLevel, NS::String*))\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(addLogHandler_), block);\n}\n\n_MTL_INLINE void MTL::LogState::addLogHandler(const MTL::LogHandlerFunction& handler)\n{\n    __block LogHandlerFunction function = handler;\n\n    addLogHandler(^void(NS::String* subsystem, NS::String* category, MTL::LogLevel logLevel, NS::String* message){\n        function(subsystem, category, logLevel, message);\n\t});\n}\n\n_MTL_INLINE MTL::LogStateDescriptor* MTL::LogStateDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::LogStateDescriptor>(_MTL_PRIVATE_CLS(MTLLogStateDescriptor));\n}\n\n_MTL_INLINE MTL::LogStateDescriptor* MTL::LogStateDescriptor::init()\n{\n    return NS::Object::init<MTL::LogStateDescriptor>();\n}\n\n_MTL_INLINE MTL::LogLevel MTL::LogStateDescriptor::level() const\n{\n    return Object::sendMessage<MTL::LogLevel>(this, _MTL_PRIVATE_SEL(level));\n}\n\n_MTL_INLINE void MTL::LogStateDescriptor::setLevel(MTL::LogLevel level)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLevel_), level);\n}\n\n_MTL_INLINE NS::Integer MTL::LogStateDescriptor::bufferSize() const\n{\n    return Object::sendMessage<NS::Integer>(this, _MTL_PRIVATE_SEL(bufferSize));\n}\n\n_MTL_INLINE void MTL::LogStateDescriptor::setBufferSize(NS::Integer bufferSize)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBufferSize_), bufferSize);\n}\n\n#pragma once\n\nnamespace MTL\n{\nclass ParallelRenderCommandEncoder : public NS::Referencing<ParallelRenderCommandEncoder, CommandEncoder>\n{\npublic:\n    class RenderCommandEncoder* renderCommandEncoder();\n\n    void                        setColorStoreAction(MTL::StoreAction storeAction, NS::UInteger colorAttachmentIndex);\n\n    void                        setDepthStoreAction(MTL::StoreAction storeAction);\n\n    void                        setStencilStoreAction(MTL::StoreAction storeAction);\n\n    void                        setColorStoreActionOptions(MTL::StoreActionOptions storeActionOptions, NS::UInteger colorAttachmentIndex);\n\n    void                        setDepthStoreActionOptions(MTL::StoreActionOptions storeActionOptions);\n\n    void                        setStencilStoreActionOptions(MTL::StoreActionOptions storeActionOptions);\n};\n\n}\n\n_MTL_INLINE MTL::RenderCommandEncoder* MTL::ParallelRenderCommandEncoder::renderCommandEncoder()\n{\n    return Object::sendMessage<MTL::RenderCommandEncoder*>(this, _MTL_PRIVATE_SEL(renderCommandEncoder));\n}\n\n_MTL_INLINE void MTL::ParallelRenderCommandEncoder::setColorStoreAction(MTL::StoreAction storeAction, NS::UInteger colorAttachmentIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setColorStoreAction_atIndex_), storeAction, colorAttachmentIndex);\n}\n\n_MTL_INLINE void MTL::ParallelRenderCommandEncoder::setDepthStoreAction(MTL::StoreAction storeAction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthStoreAction_), storeAction);\n}\n\n_MTL_INLINE void MTL::ParallelRenderCommandEncoder::setStencilStoreAction(MTL::StoreAction storeAction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStencilStoreAction_), storeAction);\n}\n\n_MTL_INLINE void MTL::ParallelRenderCommandEncoder::setColorStoreActionOptions(MTL::StoreActionOptions storeActionOptions, NS::UInteger colorAttachmentIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setColorStoreActionOptions_atIndex_), storeActionOptions, colorAttachmentIndex);\n}\n\n_MTL_INLINE void MTL::ParallelRenderCommandEncoder::setDepthStoreActionOptions(MTL::StoreActionOptions storeActionOptions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthStoreActionOptions_), storeActionOptions);\n}\n\n_MTL_INLINE void MTL::ParallelRenderCommandEncoder::setStencilStoreActionOptions(MTL::StoreActionOptions storeActionOptions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStencilStoreActionOptions_), storeActionOptions);\n}\n\n#pragma once\n\nnamespace MTL\n{\nclass RasterizationRateSampleArray : public NS::Referencing<RasterizationRateSampleArray>\n{\npublic:\n    static class RasterizationRateSampleArray* alloc();\n\n    class RasterizationRateSampleArray*        init();\n\n    NS::Number*                                object(NS::UInteger index);\n\n    void                                       setObject(const NS::Number* value, NS::UInteger index);\n};\n\nclass RasterizationRateLayerDescriptor : public NS::Copying<RasterizationRateLayerDescriptor>\n{\npublic:\n    static class RasterizationRateLayerDescriptor* alloc();\n\n    MTL::RasterizationRateLayerDescriptor*         init();\n\n    MTL::RasterizationRateLayerDescriptor*         init(MTL::Size sampleCount);\n\n    MTL::RasterizationRateLayerDescriptor*         init(MTL::Size sampleCount, const float* horizontal, const float* vertical);\n\n    MTL::Size                                      sampleCount() const;\n\n    MTL::Size                                      maxSampleCount() const;\n\n    float*                                         horizontalSampleStorage() const;\n\n    float*                                         verticalSampleStorage() const;\n\n    class RasterizationRateSampleArray*            horizontal() const;\n\n    class RasterizationRateSampleArray*            vertical() const;\n\n    void                                           setSampleCount(MTL::Size sampleCount);\n};\n\nclass RasterizationRateLayerArray : public NS::Referencing<RasterizationRateLayerArray>\n{\npublic:\n    static class RasterizationRateLayerArray* alloc();\n\n    class RasterizationRateLayerArray*        init();\n\n    class RasterizationRateLayerDescriptor*   object(NS::UInteger layerIndex);\n\n    void                                      setObject(const class RasterizationRateLayerDescriptor* layer, NS::UInteger layerIndex);\n};\n\nclass RasterizationRateMapDescriptor : public NS::Copying<RasterizationRateMapDescriptor>\n{\npublic:\n    static class RasterizationRateMapDescriptor* alloc();\n\n    class RasterizationRateMapDescriptor*        init();\n\n    static class RasterizationRateMapDescriptor* rasterizationRateMapDescriptor(MTL::Size screenSize);\n\n    static class RasterizationRateMapDescriptor* rasterizationRateMapDescriptor(MTL::Size screenSize, const class RasterizationRateLayerDescriptor* layer);\n\n    static class RasterizationRateMapDescriptor* rasterizationRateMapDescriptor(MTL::Size screenSize, NS::UInteger layerCount, const class RasterizationRateLayerDescriptor* const* layers);\n\n    class RasterizationRateLayerDescriptor*      layer(NS::UInteger layerIndex);\n\n    void                                         setLayer(const class RasterizationRateLayerDescriptor* layer, NS::UInteger layerIndex);\n\n    class RasterizationRateLayerArray*           layers() const;\n\n    MTL::Size                                    screenSize() const;\n    void                                         setScreenSize(MTL::Size screenSize);\n\n    NS::String*                                  label() const;\n    void                                         setLabel(const NS::String* label);\n\n    NS::UInteger                                 layerCount() const;\n};\n\nclass RasterizationRateMap : public NS::Referencing<RasterizationRateMap>\n{\npublic:\n    class Device*     device() const;\n\n    NS::String*       label() const;\n\n    MTL::Size         screenSize() const;\n\n    MTL::Size         physicalGranularity() const;\n\n    NS::UInteger      layerCount() const;\n\n    MTL::SizeAndAlign parameterBufferSizeAndAlign() const;\n\n    void              copyParameterDataToBuffer(const class Buffer* buffer, NS::UInteger offset);\n\n    MTL::Size         physicalSize(NS::UInteger layerIndex);\n\n    MTL::Coordinate2D mapScreenToPhysicalCoordinates(MTL::Coordinate2D screenCoordinates, NS::UInteger layerIndex);\n\n    MTL::Coordinate2D mapPhysicalToScreenCoordinates(MTL::Coordinate2D physicalCoordinates, NS::UInteger layerIndex);\n};\n\n}\n\n_MTL_INLINE MTL::RasterizationRateSampleArray* MTL::RasterizationRateSampleArray::alloc()\n{\n    return NS::Object::alloc<MTL::RasterizationRateSampleArray>(_MTL_PRIVATE_CLS(MTLRasterizationRateSampleArray));\n}\n\n_MTL_INLINE MTL::RasterizationRateSampleArray* MTL::RasterizationRateSampleArray::init()\n{\n    return NS::Object::init<MTL::RasterizationRateSampleArray>();\n}\n\n_MTL_INLINE NS::Number* MTL::RasterizationRateSampleArray::object(NS::UInteger index)\n{\n    return Object::sendMessage<NS::Number*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), index);\n}\n\n_MTL_INLINE void MTL::RasterizationRateSampleArray::setObject(const NS::Number* value, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), value, index);\n}\n\n_MTL_INLINE MTL::RasterizationRateLayerDescriptor* MTL::RasterizationRateLayerDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::RasterizationRateLayerDescriptor>(_MTL_PRIVATE_CLS(MTLRasterizationRateLayerDescriptor));\n}\n\n_MTL_INLINE MTL::RasterizationRateLayerDescriptor* MTL::RasterizationRateLayerDescriptor::init()\n{\n    return NS::Object::init<MTL::RasterizationRateLayerDescriptor>();\n}\n\n_MTL_INLINE MTL::RasterizationRateLayerDescriptor* MTL::RasterizationRateLayerDescriptor::init(MTL::Size sampleCount)\n{\n    return Object::sendMessage<MTL::RasterizationRateLayerDescriptor*>(this, _MTL_PRIVATE_SEL(initWithSampleCount_), sampleCount);\n}\n\n_MTL_INLINE MTL::RasterizationRateLayerDescriptor* MTL::RasterizationRateLayerDescriptor::init(MTL::Size sampleCount, const float* horizontal, const float* vertical)\n{\n    return Object::sendMessage<MTL::RasterizationRateLayerDescriptor*>(this, _MTL_PRIVATE_SEL(initWithSampleCount_horizontal_vertical_), sampleCount, horizontal, vertical);\n}\n\n_MTL_INLINE MTL::Size MTL::RasterizationRateLayerDescriptor::sampleCount() const\n{\n    return Object::sendMessage<MTL::Size>(this, _MTL_PRIVATE_SEL(sampleCount));\n}\n\n_MTL_INLINE MTL::Size MTL::RasterizationRateLayerDescriptor::maxSampleCount() const\n{\n    return Object::sendMessage<MTL::Size>(this, _MTL_PRIVATE_SEL(maxSampleCount));\n}\n\n_MTL_INLINE float* MTL::RasterizationRateLayerDescriptor::horizontalSampleStorage() const\n{\n    return Object::sendMessage<float*>(this, _MTL_PRIVATE_SEL(horizontalSampleStorage));\n}\n\n_MTL_INLINE float* MTL::RasterizationRateLayerDescriptor::verticalSampleStorage() const\n{\n    return Object::sendMessage<float*>(this, _MTL_PRIVATE_SEL(verticalSampleStorage));\n}\n\n_MTL_INLINE MTL::RasterizationRateSampleArray* MTL::RasterizationRateLayerDescriptor::horizontal() const\n{\n    return Object::sendMessage<MTL::RasterizationRateSampleArray*>(this, _MTL_PRIVATE_SEL(horizontal));\n}\n\n_MTL_INLINE MTL::RasterizationRateSampleArray* MTL::RasterizationRateLayerDescriptor::vertical() const\n{\n    return Object::sendMessage<MTL::RasterizationRateSampleArray*>(this, _MTL_PRIVATE_SEL(vertical));\n}\n\n_MTL_INLINE void MTL::RasterizationRateLayerDescriptor::setSampleCount(MTL::Size sampleCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSampleCount_), sampleCount);\n}\n\n_MTL_INLINE MTL::RasterizationRateLayerArray* MTL::RasterizationRateLayerArray::alloc()\n{\n    return NS::Object::alloc<MTL::RasterizationRateLayerArray>(_MTL_PRIVATE_CLS(MTLRasterizationRateLayerArray));\n}\n\n_MTL_INLINE MTL::RasterizationRateLayerArray* MTL::RasterizationRateLayerArray::init()\n{\n    return NS::Object::init<MTL::RasterizationRateLayerArray>();\n}\n\n_MTL_INLINE MTL::RasterizationRateLayerDescriptor* MTL::RasterizationRateLayerArray::object(NS::UInteger layerIndex)\n{\n    return Object::sendMessage<MTL::RasterizationRateLayerDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), layerIndex);\n}\n\n_MTL_INLINE void MTL::RasterizationRateLayerArray::setObject(const MTL::RasterizationRateLayerDescriptor* layer, NS::UInteger layerIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), layer, layerIndex);\n}\n\n_MTL_INLINE MTL::RasterizationRateMapDescriptor* MTL::RasterizationRateMapDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::RasterizationRateMapDescriptor>(_MTL_PRIVATE_CLS(MTLRasterizationRateMapDescriptor));\n}\n\n_MTL_INLINE MTL::RasterizationRateMapDescriptor* MTL::RasterizationRateMapDescriptor::init()\n{\n    return NS::Object::init<MTL::RasterizationRateMapDescriptor>();\n}\n\n_MTL_INLINE MTL::RasterizationRateMapDescriptor* MTL::RasterizationRateMapDescriptor::rasterizationRateMapDescriptor(MTL::Size screenSize)\n{\n    return Object::sendMessage<MTL::RasterizationRateMapDescriptor*>(_MTL_PRIVATE_CLS(MTLRasterizationRateMapDescriptor), _MTL_PRIVATE_SEL(rasterizationRateMapDescriptorWithScreenSize_), screenSize);\n}\n\n_MTL_INLINE MTL::RasterizationRateMapDescriptor* MTL::RasterizationRateMapDescriptor::rasterizationRateMapDescriptor(MTL::Size screenSize, const MTL::RasterizationRateLayerDescriptor* layer)\n{\n    return Object::sendMessage<MTL::RasterizationRateMapDescriptor*>(_MTL_PRIVATE_CLS(MTLRasterizationRateMapDescriptor), _MTL_PRIVATE_SEL(rasterizationRateMapDescriptorWithScreenSize_layer_), screenSize, layer);\n}\n\n_MTL_INLINE MTL::RasterizationRateMapDescriptor* MTL::RasterizationRateMapDescriptor::rasterizationRateMapDescriptor(MTL::Size screenSize, NS::UInteger layerCount, const MTL::RasterizationRateLayerDescriptor* const* layers)\n{\n    return Object::sendMessage<MTL::RasterizationRateMapDescriptor*>(_MTL_PRIVATE_CLS(MTLRasterizationRateMapDescriptor), _MTL_PRIVATE_SEL(rasterizationRateMapDescriptorWithScreenSize_layerCount_layers_), screenSize, layerCount, layers);\n}\n\n_MTL_INLINE MTL::RasterizationRateLayerDescriptor* MTL::RasterizationRateMapDescriptor::layer(NS::UInteger layerIndex)\n{\n    return Object::sendMessage<MTL::RasterizationRateLayerDescriptor*>(this, _MTL_PRIVATE_SEL(layerAtIndex_), layerIndex);\n}\n\n_MTL_INLINE void MTL::RasterizationRateMapDescriptor::setLayer(const MTL::RasterizationRateLayerDescriptor* layer, NS::UInteger layerIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLayer_atIndex_), layer, layerIndex);\n}\n\n_MTL_INLINE MTL::RasterizationRateLayerArray* MTL::RasterizationRateMapDescriptor::layers() const\n{\n    return Object::sendMessage<MTL::RasterizationRateLayerArray*>(this, _MTL_PRIVATE_SEL(layers));\n}\n\n_MTL_INLINE MTL::Size MTL::RasterizationRateMapDescriptor::screenSize() const\n{\n    return Object::sendMessage<MTL::Size>(this, _MTL_PRIVATE_SEL(screenSize));\n}\n\n_MTL_INLINE void MTL::RasterizationRateMapDescriptor::setScreenSize(MTL::Size screenSize)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setScreenSize_), screenSize);\n}\n\n_MTL_INLINE NS::String* MTL::RasterizationRateMapDescriptor::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::RasterizationRateMapDescriptor::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE NS::UInteger MTL::RasterizationRateMapDescriptor::layerCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(layerCount));\n}\n\n_MTL_INLINE MTL::Device* MTL::RasterizationRateMap::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE NS::String* MTL::RasterizationRateMap::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE MTL::Size MTL::RasterizationRateMap::screenSize() const\n{\n    return Object::sendMessage<MTL::Size>(this, _MTL_PRIVATE_SEL(screenSize));\n}\n\n_MTL_INLINE MTL::Size MTL::RasterizationRateMap::physicalGranularity() const\n{\n    return Object::sendMessage<MTL::Size>(this, _MTL_PRIVATE_SEL(physicalGranularity));\n}\n\n_MTL_INLINE NS::UInteger MTL::RasterizationRateMap::layerCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(layerCount));\n}\n\n_MTL_INLINE MTL::SizeAndAlign MTL::RasterizationRateMap::parameterBufferSizeAndAlign() const\n{\n    return Object::sendMessage<MTL::SizeAndAlign>(this, _MTL_PRIVATE_SEL(parameterBufferSizeAndAlign));\n}\n\n_MTL_INLINE void MTL::RasterizationRateMap::copyParameterDataToBuffer(const MTL::Buffer* buffer, NS::UInteger offset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(copyParameterDataToBuffer_offset_), buffer, offset);\n}\n\n_MTL_INLINE MTL::Size MTL::RasterizationRateMap::physicalSize(NS::UInteger layerIndex)\n{\n    return Object::sendMessage<MTL::Size>(this, _MTL_PRIVATE_SEL(physicalSizeForLayer_), layerIndex);\n}\n\n_MTL_INLINE MTL::Coordinate2D MTL::RasterizationRateMap::mapScreenToPhysicalCoordinates(MTL::Coordinate2D screenCoordinates, NS::UInteger layerIndex)\n{\n    return Object::sendMessage<MTL::Coordinate2D>(this, _MTL_PRIVATE_SEL(mapScreenToPhysicalCoordinates_forLayer_), screenCoordinates, layerIndex);\n}\n\n_MTL_INLINE MTL::Coordinate2D MTL::RasterizationRateMap::mapPhysicalToScreenCoordinates(MTL::Coordinate2D physicalCoordinates, NS::UInteger layerIndex)\n{\n    return Object::sendMessage<MTL::Coordinate2D>(this, _MTL_PRIVATE_SEL(mapPhysicalToScreenCoordinates_forLayer_), physicalCoordinates, layerIndex);\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, BlendFactor) {\n    BlendFactorZero = 0,\n    BlendFactorOne = 1,\n    BlendFactorSourceColor = 2,\n    BlendFactorOneMinusSourceColor = 3,\n    BlendFactorSourceAlpha = 4,\n    BlendFactorOneMinusSourceAlpha = 5,\n    BlendFactorDestinationColor = 6,\n    BlendFactorOneMinusDestinationColor = 7,\n    BlendFactorDestinationAlpha = 8,\n    BlendFactorOneMinusDestinationAlpha = 9,\n    BlendFactorSourceAlphaSaturated = 10,\n    BlendFactorBlendColor = 11,\n    BlendFactorOneMinusBlendColor = 12,\n    BlendFactorBlendAlpha = 13,\n    BlendFactorOneMinusBlendAlpha = 14,\n    BlendFactorSource1Color = 15,\n    BlendFactorOneMinusSource1Color = 16,\n    BlendFactorSource1Alpha = 17,\n    BlendFactorOneMinusSource1Alpha = 18,\n};\n\n_MTL_ENUM(NS::UInteger, BlendOperation) {\n    BlendOperationAdd = 0,\n    BlendOperationSubtract = 1,\n    BlendOperationReverseSubtract = 2,\n    BlendOperationMin = 3,\n    BlendOperationMax = 4,\n};\n\n_MTL_OPTIONS(NS::UInteger, ColorWriteMask) {\n    ColorWriteMaskNone = 0,\n    ColorWriteMaskRed = 8,\n    ColorWriteMaskGreen = 4,\n    ColorWriteMaskBlue = 2,\n    ColorWriteMaskAlpha = 1,\n    ColorWriteMaskAll = 15,\n};\n\n_MTL_ENUM(NS::UInteger, PrimitiveTopologyClass) {\n    PrimitiveTopologyClassUnspecified = 0,\n    PrimitiveTopologyClassPoint = 1,\n    PrimitiveTopologyClassLine = 2,\n    PrimitiveTopologyClassTriangle = 3,\n};\n\n_MTL_ENUM(NS::UInteger, TessellationPartitionMode) {\n    TessellationPartitionModePow2 = 0,\n    TessellationPartitionModeInteger = 1,\n    TessellationPartitionModeFractionalOdd = 2,\n    TessellationPartitionModeFractionalEven = 3,\n};\n\n_MTL_ENUM(NS::UInteger, TessellationFactorStepFunction) {\n    TessellationFactorStepFunctionConstant = 0,\n    TessellationFactorStepFunctionPerPatch = 1,\n    TessellationFactorStepFunctionPerInstance = 2,\n    TessellationFactorStepFunctionPerPatchAndPerInstance = 3,\n};\n\n_MTL_ENUM(NS::UInteger, TessellationFactorFormat) {\n    TessellationFactorFormatHalf = 0,\n};\n\n_MTL_ENUM(NS::UInteger, TessellationControlPointIndexType) {\n    TessellationControlPointIndexTypeNone = 0,\n    TessellationControlPointIndexTypeUInt16 = 1,\n    TessellationControlPointIndexTypeUInt32 = 2,\n};\n\nclass RenderPipelineColorAttachmentDescriptor : public NS::Copying<RenderPipelineColorAttachmentDescriptor>\n{\npublic:\n    static class RenderPipelineColorAttachmentDescriptor* alloc();\n\n    class RenderPipelineColorAttachmentDescriptor*        init();\n\n    MTL::PixelFormat                                      pixelFormat() const;\n    void                                                  setPixelFormat(MTL::PixelFormat pixelFormat);\n\n    bool                                                  blendingEnabled() const;\n    void                                                  setBlendingEnabled(bool blendingEnabled);\n\n    MTL::BlendFactor                                      sourceRGBBlendFactor() const;\n    void                                                  setSourceRGBBlendFactor(MTL::BlendFactor sourceRGBBlendFactor);\n\n    MTL::BlendFactor                                      destinationRGBBlendFactor() const;\n    void                                                  setDestinationRGBBlendFactor(MTL::BlendFactor destinationRGBBlendFactor);\n\n    MTL::BlendOperation                                   rgbBlendOperation() const;\n    void                                                  setRgbBlendOperation(MTL::BlendOperation rgbBlendOperation);\n\n    MTL::BlendFactor                                      sourceAlphaBlendFactor() const;\n    void                                                  setSourceAlphaBlendFactor(MTL::BlendFactor sourceAlphaBlendFactor);\n\n    MTL::BlendFactor                                      destinationAlphaBlendFactor() const;\n    void                                                  setDestinationAlphaBlendFactor(MTL::BlendFactor destinationAlphaBlendFactor);\n\n    MTL::BlendOperation                                   alphaBlendOperation() const;\n    void                                                  setAlphaBlendOperation(MTL::BlendOperation alphaBlendOperation);\n\n    MTL::ColorWriteMask                                   writeMask() const;\n    void                                                  setWriteMask(MTL::ColorWriteMask writeMask);\n};\n\nclass RenderPipelineReflection : public NS::Referencing<RenderPipelineReflection>\n{\npublic:\n    static class RenderPipelineReflection* alloc();\n\n    class RenderPipelineReflection*        init();\n\n    NS::Array*                             vertexBindings() const;\n\n    NS::Array*                             fragmentBindings() const;\n\n    NS::Array*                             tileBindings() const;\n\n    NS::Array*                             objectBindings() const;\n\n    NS::Array*                             meshBindings() const;\n\n    NS::Array*                             vertexArguments() const;\n\n    NS::Array*                             fragmentArguments() const;\n\n    NS::Array*                             tileArguments() const;\n};\n\nclass RenderPipelineDescriptor : public NS::Copying<RenderPipelineDescriptor>\n{\npublic:\n    static class RenderPipelineDescriptor*              alloc();\n\n    class RenderPipelineDescriptor*                     init();\n\n    NS::String*                                         label() const;\n    void                                                setLabel(const NS::String* label);\n\n    class Function*                                     vertexFunction() const;\n    void                                                setVertexFunction(const class Function* vertexFunction);\n\n    class Function*                                     fragmentFunction() const;\n    void                                                setFragmentFunction(const class Function* fragmentFunction);\n\n    class VertexDescriptor*                             vertexDescriptor() const;\n    void                                                setVertexDescriptor(const class VertexDescriptor* vertexDescriptor);\n\n    NS::UInteger                                        sampleCount() const;\n    void                                                setSampleCount(NS::UInteger sampleCount);\n\n    NS::UInteger                                        rasterSampleCount() const;\n    void                                                setRasterSampleCount(NS::UInteger rasterSampleCount);\n\n    bool                                                alphaToCoverageEnabled() const;\n    void                                                setAlphaToCoverageEnabled(bool alphaToCoverageEnabled);\n\n    bool                                                alphaToOneEnabled() const;\n    void                                                setAlphaToOneEnabled(bool alphaToOneEnabled);\n\n    bool                                                rasterizationEnabled() const;\n    void                                                setRasterizationEnabled(bool rasterizationEnabled);\n\n    NS::UInteger                                        maxVertexAmplificationCount() const;\n    void                                                setMaxVertexAmplificationCount(NS::UInteger maxVertexAmplificationCount);\n\n    class RenderPipelineColorAttachmentDescriptorArray* colorAttachments() const;\n\n    MTL::PixelFormat                                    depthAttachmentPixelFormat() const;\n    void                                                setDepthAttachmentPixelFormat(MTL::PixelFormat depthAttachmentPixelFormat);\n\n    MTL::PixelFormat                                    stencilAttachmentPixelFormat() const;\n    void                                                setStencilAttachmentPixelFormat(MTL::PixelFormat stencilAttachmentPixelFormat);\n\n    MTL::PrimitiveTopologyClass                         inputPrimitiveTopology() const;\n    void                                                setInputPrimitiveTopology(MTL::PrimitiveTopologyClass inputPrimitiveTopology);\n\n    MTL::TessellationPartitionMode                      tessellationPartitionMode() const;\n    void                                                setTessellationPartitionMode(MTL::TessellationPartitionMode tessellationPartitionMode);\n\n    NS::UInteger                                        maxTessellationFactor() const;\n    void                                                setMaxTessellationFactor(NS::UInteger maxTessellationFactor);\n\n    bool                                                tessellationFactorScaleEnabled() const;\n    void                                                setTessellationFactorScaleEnabled(bool tessellationFactorScaleEnabled);\n\n    MTL::TessellationFactorFormat                       tessellationFactorFormat() const;\n    void                                                setTessellationFactorFormat(MTL::TessellationFactorFormat tessellationFactorFormat);\n\n    MTL::TessellationControlPointIndexType              tessellationControlPointIndexType() const;\n    void                                                setTessellationControlPointIndexType(MTL::TessellationControlPointIndexType tessellationControlPointIndexType);\n\n    MTL::TessellationFactorStepFunction                 tessellationFactorStepFunction() const;\n    void                                                setTessellationFactorStepFunction(MTL::TessellationFactorStepFunction tessellationFactorStepFunction);\n\n    MTL::Winding                                        tessellationOutputWindingOrder() const;\n    void                                                setTessellationOutputWindingOrder(MTL::Winding tessellationOutputWindingOrder);\n\n    class PipelineBufferDescriptorArray*                vertexBuffers() const;\n\n    class PipelineBufferDescriptorArray*                fragmentBuffers() const;\n\n    bool                                                supportIndirectCommandBuffers() const;\n    void                                                setSupportIndirectCommandBuffers(bool supportIndirectCommandBuffers);\n\n    NS::Array*                                          binaryArchives() const;\n    void                                                setBinaryArchives(const NS::Array* binaryArchives);\n\n    NS::Array*                                          vertexPreloadedLibraries() const;\n    void                                                setVertexPreloadedLibraries(const NS::Array* vertexPreloadedLibraries);\n\n    NS::Array*                                          fragmentPreloadedLibraries() const;\n    void                                                setFragmentPreloadedLibraries(const NS::Array* fragmentPreloadedLibraries);\n\n    class LinkedFunctions*                              vertexLinkedFunctions() const;\n    void                                                setVertexLinkedFunctions(const class LinkedFunctions* vertexLinkedFunctions);\n\n    class LinkedFunctions*                              fragmentLinkedFunctions() const;\n    void                                                setFragmentLinkedFunctions(const class LinkedFunctions* fragmentLinkedFunctions);\n\n    bool                                                supportAddingVertexBinaryFunctions() const;\n    void                                                setSupportAddingVertexBinaryFunctions(bool supportAddingVertexBinaryFunctions);\n\n    bool                                                supportAddingFragmentBinaryFunctions() const;\n    void                                                setSupportAddingFragmentBinaryFunctions(bool supportAddingFragmentBinaryFunctions);\n\n    NS::UInteger                                        maxVertexCallStackDepth() const;\n    void                                                setMaxVertexCallStackDepth(NS::UInteger maxVertexCallStackDepth);\n\n    NS::UInteger                                        maxFragmentCallStackDepth() const;\n    void                                                setMaxFragmentCallStackDepth(NS::UInteger maxFragmentCallStackDepth);\n\n    void                                                reset();\n\n    MTL::ShaderValidation                               shaderValidation() const;\n    void                                                setShaderValidation(MTL::ShaderValidation shaderValidation);\n};\n\nclass RenderPipelineFunctionsDescriptor : public NS::Copying<RenderPipelineFunctionsDescriptor>\n{\npublic:\n    static class RenderPipelineFunctionsDescriptor* alloc();\n\n    class RenderPipelineFunctionsDescriptor*        init();\n\n    NS::Array*                                      vertexAdditionalBinaryFunctions() const;\n    void                                            setVertexAdditionalBinaryFunctions(const NS::Array* vertexAdditionalBinaryFunctions);\n\n    NS::Array*                                      fragmentAdditionalBinaryFunctions() const;\n    void                                            setFragmentAdditionalBinaryFunctions(const NS::Array* fragmentAdditionalBinaryFunctions);\n\n    NS::Array*                                      tileAdditionalBinaryFunctions() const;\n    void                                            setTileAdditionalBinaryFunctions(const NS::Array* tileAdditionalBinaryFunctions);\n};\n\nclass RenderPipelineState : public NS::Referencing<RenderPipelineState>\n{\npublic:\n    NS::String*                      label() const;\n\n    class Device*                    device() const;\n\n    NS::UInteger                     maxTotalThreadsPerThreadgroup() const;\n\n    bool                             threadgroupSizeMatchesTileSize() const;\n\n    NS::UInteger                     imageblockSampleLength() const;\n\n    NS::UInteger                     imageblockMemoryLength(MTL::Size imageblockDimensions);\n\n    bool                             supportIndirectCommandBuffers() const;\n\n    NS::UInteger                     maxTotalThreadsPerObjectThreadgroup() const;\n\n    NS::UInteger                     maxTotalThreadsPerMeshThreadgroup() const;\n\n    NS::UInteger                     objectThreadExecutionWidth() const;\n\n    NS::UInteger                     meshThreadExecutionWidth() const;\n\n    NS::UInteger                     maxTotalThreadgroupsPerMeshGrid() const;\n\n    MTL::ResourceID                  gpuResourceID() const;\n\n    class FunctionHandle*            functionHandle(const class Function* function, MTL::RenderStages stage);\n\n    class VisibleFunctionTable*      newVisibleFunctionTable(const class VisibleFunctionTableDescriptor* descriptor, MTL::RenderStages stage);\n\n    class IntersectionFunctionTable* newIntersectionFunctionTable(const class IntersectionFunctionTableDescriptor* descriptor, MTL::RenderStages stage);\n\n    class RenderPipelineState*       newRenderPipelineState(const class RenderPipelineFunctionsDescriptor* additionalBinaryFunctions, NS::Error** error);\n\n    MTL::ShaderValidation            shaderValidation() const;\n};\n\nclass RenderPipelineColorAttachmentDescriptorArray : public NS::Referencing<RenderPipelineColorAttachmentDescriptorArray>\n{\npublic:\n    static class RenderPipelineColorAttachmentDescriptorArray* alloc();\n\n    class RenderPipelineColorAttachmentDescriptorArray*        init();\n\n    class RenderPipelineColorAttachmentDescriptor*             object(NS::UInteger attachmentIndex);\n\n    void                                                       setObject(const class RenderPipelineColorAttachmentDescriptor* attachment, NS::UInteger attachmentIndex);\n};\n\nclass TileRenderPipelineColorAttachmentDescriptor : public NS::Copying<TileRenderPipelineColorAttachmentDescriptor>\n{\npublic:\n    static class TileRenderPipelineColorAttachmentDescriptor* alloc();\n\n    class TileRenderPipelineColorAttachmentDescriptor*        init();\n\n    MTL::PixelFormat                                          pixelFormat() const;\n    void                                                      setPixelFormat(MTL::PixelFormat pixelFormat);\n};\n\nclass TileRenderPipelineColorAttachmentDescriptorArray : public NS::Referencing<TileRenderPipelineColorAttachmentDescriptorArray>\n{\npublic:\n    static class TileRenderPipelineColorAttachmentDescriptorArray* alloc();\n\n    class TileRenderPipelineColorAttachmentDescriptorArray*        init();\n\n    class TileRenderPipelineColorAttachmentDescriptor*             object(NS::UInteger attachmentIndex);\n\n    void                                                           setObject(const class TileRenderPipelineColorAttachmentDescriptor* attachment, NS::UInteger attachmentIndex);\n};\n\nclass TileRenderPipelineDescriptor : public NS::Copying<TileRenderPipelineDescriptor>\n{\npublic:\n    static class TileRenderPipelineDescriptor*              alloc();\n\n    class TileRenderPipelineDescriptor*                     init();\n\n    NS::String*                                             label() const;\n    void                                                    setLabel(const NS::String* label);\n\n    class Function*                                         tileFunction() const;\n    void                                                    setTileFunction(const class Function* tileFunction);\n\n    NS::UInteger                                            rasterSampleCount() const;\n    void                                                    setRasterSampleCount(NS::UInteger rasterSampleCount);\n\n    class TileRenderPipelineColorAttachmentDescriptorArray* colorAttachments() const;\n\n    bool                                                    threadgroupSizeMatchesTileSize() const;\n    void                                                    setThreadgroupSizeMatchesTileSize(bool threadgroupSizeMatchesTileSize);\n\n    class PipelineBufferDescriptorArray*                    tileBuffers() const;\n\n    NS::UInteger                                            maxTotalThreadsPerThreadgroup() const;\n    void                                                    setMaxTotalThreadsPerThreadgroup(NS::UInteger maxTotalThreadsPerThreadgroup);\n\n    NS::Array*                                              binaryArchives() const;\n    void                                                    setBinaryArchives(const NS::Array* binaryArchives);\n\n    NS::Array*                                              preloadedLibraries() const;\n    void                                                    setPreloadedLibraries(const NS::Array* preloadedLibraries);\n\n    class LinkedFunctions*                                  linkedFunctions() const;\n    void                                                    setLinkedFunctions(const class LinkedFunctions* linkedFunctions);\n\n    bool                                                    supportAddingBinaryFunctions() const;\n    void                                                    setSupportAddingBinaryFunctions(bool supportAddingBinaryFunctions);\n\n    NS::UInteger                                            maxCallStackDepth() const;\n    void                                                    setMaxCallStackDepth(NS::UInteger maxCallStackDepth);\n\n    void                                                    reset();\n\n    MTL::ShaderValidation                                   shaderValidation() const;\n    void                                                    setShaderValidation(MTL::ShaderValidation shaderValidation);\n};\n\nclass MeshRenderPipelineDescriptor : public NS::Copying<MeshRenderPipelineDescriptor>\n{\npublic:\n    static class MeshRenderPipelineDescriptor*          alloc();\n\n    class MeshRenderPipelineDescriptor*                 init();\n\n    NS::String*                                         label() const;\n    void                                                setLabel(const NS::String* label);\n\n    class Function*                                     objectFunction() const;\n    void                                                setObjectFunction(const class Function* objectFunction);\n\n    class Function*                                     meshFunction() const;\n    void                                                setMeshFunction(const class Function* meshFunction);\n\n    class Function*                                     fragmentFunction() const;\n    void                                                setFragmentFunction(const class Function* fragmentFunction);\n\n    NS::UInteger                                        maxTotalThreadsPerObjectThreadgroup() const;\n    void                                                setMaxTotalThreadsPerObjectThreadgroup(NS::UInteger maxTotalThreadsPerObjectThreadgroup);\n\n    NS::UInteger                                        maxTotalThreadsPerMeshThreadgroup() const;\n    void                                                setMaxTotalThreadsPerMeshThreadgroup(NS::UInteger maxTotalThreadsPerMeshThreadgroup);\n\n    bool                                                objectThreadgroupSizeIsMultipleOfThreadExecutionWidth() const;\n    void                                                setObjectThreadgroupSizeIsMultipleOfThreadExecutionWidth(bool objectThreadgroupSizeIsMultipleOfThreadExecutionWidth);\n\n    bool                                                meshThreadgroupSizeIsMultipleOfThreadExecutionWidth() const;\n    void                                                setMeshThreadgroupSizeIsMultipleOfThreadExecutionWidth(bool meshThreadgroupSizeIsMultipleOfThreadExecutionWidth);\n\n    NS::UInteger                                        payloadMemoryLength() const;\n    void                                                setPayloadMemoryLength(NS::UInteger payloadMemoryLength);\n\n    NS::UInteger                                        maxTotalThreadgroupsPerMeshGrid() const;\n    void                                                setMaxTotalThreadgroupsPerMeshGrid(NS::UInteger maxTotalThreadgroupsPerMeshGrid);\n\n    class PipelineBufferDescriptorArray*                objectBuffers() const;\n\n    class PipelineBufferDescriptorArray*                meshBuffers() const;\n\n    class PipelineBufferDescriptorArray*                fragmentBuffers() const;\n\n    NS::UInteger                                        rasterSampleCount() const;\n    void                                                setRasterSampleCount(NS::UInteger rasterSampleCount);\n\n    bool                                                alphaToCoverageEnabled() const;\n    void                                                setAlphaToCoverageEnabled(bool alphaToCoverageEnabled);\n\n    bool                                                alphaToOneEnabled() const;\n    void                                                setAlphaToOneEnabled(bool alphaToOneEnabled);\n\n    bool                                                rasterizationEnabled() const;\n    void                                                setRasterizationEnabled(bool rasterizationEnabled);\n\n    NS::UInteger                                        maxVertexAmplificationCount() const;\n    void                                                setMaxVertexAmplificationCount(NS::UInteger maxVertexAmplificationCount);\n\n    class RenderPipelineColorAttachmentDescriptorArray* colorAttachments() const;\n\n    MTL::PixelFormat                                    depthAttachmentPixelFormat() const;\n    void                                                setDepthAttachmentPixelFormat(MTL::PixelFormat depthAttachmentPixelFormat);\n\n    MTL::PixelFormat                                    stencilAttachmentPixelFormat() const;\n    void                                                setStencilAttachmentPixelFormat(MTL::PixelFormat stencilAttachmentPixelFormat);\n\n    bool                                                supportIndirectCommandBuffers() const;\n    void                                                setSupportIndirectCommandBuffers(bool supportIndirectCommandBuffers);\n\n    NS::Array*                                          binaryArchives() const;\n    void                                                setBinaryArchives(const NS::Array* binaryArchives);\n\n    class LinkedFunctions*                              objectLinkedFunctions() const;\n    void                                                setObjectLinkedFunctions(const class LinkedFunctions* objectLinkedFunctions);\n\n    class LinkedFunctions*                              meshLinkedFunctions() const;\n    void                                                setMeshLinkedFunctions(const class LinkedFunctions* meshLinkedFunctions);\n\n    class LinkedFunctions*                              fragmentLinkedFunctions() const;\n    void                                                setFragmentLinkedFunctions(const class LinkedFunctions* fragmentLinkedFunctions);\n\n    void                                                reset();\n\n    MTL::ShaderValidation                               shaderValidation() const;\n    void                                                setShaderValidation(MTL::ShaderValidation shaderValidation);\n};\n\n}\n\n_MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptor* MTL::RenderPipelineColorAttachmentDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::RenderPipelineColorAttachmentDescriptor>(_MTL_PRIVATE_CLS(MTLRenderPipelineColorAttachmentDescriptor));\n}\n\n_MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptor* MTL::RenderPipelineColorAttachmentDescriptor::init()\n{\n    return NS::Object::init<MTL::RenderPipelineColorAttachmentDescriptor>();\n}\n\n_MTL_INLINE MTL::PixelFormat MTL::RenderPipelineColorAttachmentDescriptor::pixelFormat() const\n{\n    return Object::sendMessage<MTL::PixelFormat>(this, _MTL_PRIVATE_SEL(pixelFormat));\n}\n\n_MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setPixelFormat(MTL::PixelFormat pixelFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPixelFormat_), pixelFormat);\n}\n\n_MTL_INLINE bool MTL::RenderPipelineColorAttachmentDescriptor::blendingEnabled() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isBlendingEnabled));\n}\n\n_MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setBlendingEnabled(bool blendingEnabled)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBlendingEnabled_), blendingEnabled);\n}\n\n_MTL_INLINE MTL::BlendFactor MTL::RenderPipelineColorAttachmentDescriptor::sourceRGBBlendFactor() const\n{\n    return Object::sendMessage<MTL::BlendFactor>(this, _MTL_PRIVATE_SEL(sourceRGBBlendFactor));\n}\n\n_MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setSourceRGBBlendFactor(MTL::BlendFactor sourceRGBBlendFactor)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSourceRGBBlendFactor_), sourceRGBBlendFactor);\n}\n\n_MTL_INLINE MTL::BlendFactor MTL::RenderPipelineColorAttachmentDescriptor::destinationRGBBlendFactor() const\n{\n    return Object::sendMessage<MTL::BlendFactor>(this, _MTL_PRIVATE_SEL(destinationRGBBlendFactor));\n}\n\n_MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setDestinationRGBBlendFactor(MTL::BlendFactor destinationRGBBlendFactor)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDestinationRGBBlendFactor_), destinationRGBBlendFactor);\n}\n\n_MTL_INLINE MTL::BlendOperation MTL::RenderPipelineColorAttachmentDescriptor::rgbBlendOperation() const\n{\n    return Object::sendMessage<MTL::BlendOperation>(this, _MTL_PRIVATE_SEL(rgbBlendOperation));\n}\n\n_MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setRgbBlendOperation(MTL::BlendOperation rgbBlendOperation)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRgbBlendOperation_), rgbBlendOperation);\n}\n\n_MTL_INLINE MTL::BlendFactor MTL::RenderPipelineColorAttachmentDescriptor::sourceAlphaBlendFactor() const\n{\n    return Object::sendMessage<MTL::BlendFactor>(this, _MTL_PRIVATE_SEL(sourceAlphaBlendFactor));\n}\n\n_MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setSourceAlphaBlendFactor(MTL::BlendFactor sourceAlphaBlendFactor)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSourceAlphaBlendFactor_), sourceAlphaBlendFactor);\n}\n\n_MTL_INLINE MTL::BlendFactor MTL::RenderPipelineColorAttachmentDescriptor::destinationAlphaBlendFactor() const\n{\n    return Object::sendMessage<MTL::BlendFactor>(this, _MTL_PRIVATE_SEL(destinationAlphaBlendFactor));\n}\n\n_MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setDestinationAlphaBlendFactor(MTL::BlendFactor destinationAlphaBlendFactor)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDestinationAlphaBlendFactor_), destinationAlphaBlendFactor);\n}\n\n_MTL_INLINE MTL::BlendOperation MTL::RenderPipelineColorAttachmentDescriptor::alphaBlendOperation() const\n{\n    return Object::sendMessage<MTL::BlendOperation>(this, _MTL_PRIVATE_SEL(alphaBlendOperation));\n}\n\n_MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setAlphaBlendOperation(MTL::BlendOperation alphaBlendOperation)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setAlphaBlendOperation_), alphaBlendOperation);\n}\n\n_MTL_INLINE MTL::ColorWriteMask MTL::RenderPipelineColorAttachmentDescriptor::writeMask() const\n{\n    return Object::sendMessage<MTL::ColorWriteMask>(this, _MTL_PRIVATE_SEL(writeMask));\n}\n\n_MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setWriteMask(MTL::ColorWriteMask writeMask)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setWriteMask_), writeMask);\n}\n\n_MTL_INLINE MTL::RenderPipelineReflection* MTL::RenderPipelineReflection::alloc()\n{\n    return NS::Object::alloc<MTL::RenderPipelineReflection>(_MTL_PRIVATE_CLS(MTLRenderPipelineReflection));\n}\n\n_MTL_INLINE MTL::RenderPipelineReflection* MTL::RenderPipelineReflection::init()\n{\n    return NS::Object::init<MTL::RenderPipelineReflection>();\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineReflection::vertexBindings() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(vertexBindings));\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineReflection::fragmentBindings() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(fragmentBindings));\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineReflection::tileBindings() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(tileBindings));\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineReflection::objectBindings() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(objectBindings));\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineReflection::meshBindings() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(meshBindings));\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineReflection::vertexArguments() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(vertexArguments));\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineReflection::fragmentArguments() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(fragmentArguments));\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineReflection::tileArguments() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(tileArguments));\n}\n\n_MTL_INLINE MTL::RenderPipelineDescriptor* MTL::RenderPipelineDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::RenderPipelineDescriptor>(_MTL_PRIVATE_CLS(MTLRenderPipelineDescriptor));\n}\n\n_MTL_INLINE MTL::RenderPipelineDescriptor* MTL::RenderPipelineDescriptor::init()\n{\n    return NS::Object::init<MTL::RenderPipelineDescriptor>();\n}\n\n_MTL_INLINE NS::String* MTL::RenderPipelineDescriptor::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::Function* MTL::RenderPipelineDescriptor::vertexFunction() const\n{\n    return Object::sendMessage<MTL::Function*>(this, _MTL_PRIVATE_SEL(vertexFunction));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setVertexFunction(const MTL::Function* vertexFunction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexFunction_), vertexFunction);\n}\n\n_MTL_INLINE MTL::Function* MTL::RenderPipelineDescriptor::fragmentFunction() const\n{\n    return Object::sendMessage<MTL::Function*>(this, _MTL_PRIVATE_SEL(fragmentFunction));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setFragmentFunction(const MTL::Function* fragmentFunction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentFunction_), fragmentFunction);\n}\n\n_MTL_INLINE MTL::VertexDescriptor* MTL::RenderPipelineDescriptor::vertexDescriptor() const\n{\n    return Object::sendMessage<MTL::VertexDescriptor*>(this, _MTL_PRIVATE_SEL(vertexDescriptor));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setVertexDescriptor(const MTL::VertexDescriptor* vertexDescriptor)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexDescriptor_), vertexDescriptor);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineDescriptor::sampleCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(sampleCount));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setSampleCount(NS::UInteger sampleCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSampleCount_), sampleCount);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineDescriptor::rasterSampleCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(rasterSampleCount));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setRasterSampleCount(NS::UInteger rasterSampleCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRasterSampleCount_), rasterSampleCount);\n}\n\n_MTL_INLINE bool MTL::RenderPipelineDescriptor::alphaToCoverageEnabled() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isAlphaToCoverageEnabled));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setAlphaToCoverageEnabled(bool alphaToCoverageEnabled)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setAlphaToCoverageEnabled_), alphaToCoverageEnabled);\n}\n\n_MTL_INLINE bool MTL::RenderPipelineDescriptor::alphaToOneEnabled() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isAlphaToOneEnabled));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setAlphaToOneEnabled(bool alphaToOneEnabled)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setAlphaToOneEnabled_), alphaToOneEnabled);\n}\n\n_MTL_INLINE bool MTL::RenderPipelineDescriptor::rasterizationEnabled() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isRasterizationEnabled));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setRasterizationEnabled(bool rasterizationEnabled)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRasterizationEnabled_), rasterizationEnabled);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineDescriptor::maxVertexAmplificationCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxVertexAmplificationCount));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setMaxVertexAmplificationCount(NS::UInteger maxVertexAmplificationCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxVertexAmplificationCount_), maxVertexAmplificationCount);\n}\n\n_MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptorArray* MTL::RenderPipelineDescriptor::colorAttachments() const\n{\n    return Object::sendMessage<MTL::RenderPipelineColorAttachmentDescriptorArray*>(this, _MTL_PRIVATE_SEL(colorAttachments));\n}\n\n_MTL_INLINE MTL::PixelFormat MTL::RenderPipelineDescriptor::depthAttachmentPixelFormat() const\n{\n    return Object::sendMessage<MTL::PixelFormat>(this, _MTL_PRIVATE_SEL(depthAttachmentPixelFormat));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setDepthAttachmentPixelFormat(MTL::PixelFormat depthAttachmentPixelFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthAttachmentPixelFormat_), depthAttachmentPixelFormat);\n}\n\n_MTL_INLINE MTL::PixelFormat MTL::RenderPipelineDescriptor::stencilAttachmentPixelFormat() const\n{\n    return Object::sendMessage<MTL::PixelFormat>(this, _MTL_PRIVATE_SEL(stencilAttachmentPixelFormat));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setStencilAttachmentPixelFormat(MTL::PixelFormat stencilAttachmentPixelFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStencilAttachmentPixelFormat_), stencilAttachmentPixelFormat);\n}\n\n_MTL_INLINE MTL::PrimitiveTopologyClass MTL::RenderPipelineDescriptor::inputPrimitiveTopology() const\n{\n    return Object::sendMessage<MTL::PrimitiveTopologyClass>(this, _MTL_PRIVATE_SEL(inputPrimitiveTopology));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setInputPrimitiveTopology(MTL::PrimitiveTopologyClass inputPrimitiveTopology)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInputPrimitiveTopology_), inputPrimitiveTopology);\n}\n\n_MTL_INLINE MTL::TessellationPartitionMode MTL::RenderPipelineDescriptor::tessellationPartitionMode() const\n{\n    return Object::sendMessage<MTL::TessellationPartitionMode>(this, _MTL_PRIVATE_SEL(tessellationPartitionMode));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setTessellationPartitionMode(MTL::TessellationPartitionMode tessellationPartitionMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTessellationPartitionMode_), tessellationPartitionMode);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineDescriptor::maxTessellationFactor() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxTessellationFactor));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setMaxTessellationFactor(NS::UInteger maxTessellationFactor)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxTessellationFactor_), maxTessellationFactor);\n}\n\n_MTL_INLINE bool MTL::RenderPipelineDescriptor::tessellationFactorScaleEnabled() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isTessellationFactorScaleEnabled));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setTessellationFactorScaleEnabled(bool tessellationFactorScaleEnabled)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTessellationFactorScaleEnabled_), tessellationFactorScaleEnabled);\n}\n\n_MTL_INLINE MTL::TessellationFactorFormat MTL::RenderPipelineDescriptor::tessellationFactorFormat() const\n{\n    return Object::sendMessage<MTL::TessellationFactorFormat>(this, _MTL_PRIVATE_SEL(tessellationFactorFormat));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setTessellationFactorFormat(MTL::TessellationFactorFormat tessellationFactorFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTessellationFactorFormat_), tessellationFactorFormat);\n}\n\n_MTL_INLINE MTL::TessellationControlPointIndexType MTL::RenderPipelineDescriptor::tessellationControlPointIndexType() const\n{\n    return Object::sendMessage<MTL::TessellationControlPointIndexType>(this, _MTL_PRIVATE_SEL(tessellationControlPointIndexType));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setTessellationControlPointIndexType(MTL::TessellationControlPointIndexType tessellationControlPointIndexType)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTessellationControlPointIndexType_), tessellationControlPointIndexType);\n}\n\n_MTL_INLINE MTL::TessellationFactorStepFunction MTL::RenderPipelineDescriptor::tessellationFactorStepFunction() const\n{\n    return Object::sendMessage<MTL::TessellationFactorStepFunction>(this, _MTL_PRIVATE_SEL(tessellationFactorStepFunction));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setTessellationFactorStepFunction(MTL::TessellationFactorStepFunction tessellationFactorStepFunction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTessellationFactorStepFunction_), tessellationFactorStepFunction);\n}\n\n_MTL_INLINE MTL::Winding MTL::RenderPipelineDescriptor::tessellationOutputWindingOrder() const\n{\n    return Object::sendMessage<MTL::Winding>(this, _MTL_PRIVATE_SEL(tessellationOutputWindingOrder));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setTessellationOutputWindingOrder(MTL::Winding tessellationOutputWindingOrder)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTessellationOutputWindingOrder_), tessellationOutputWindingOrder);\n}\n\n_MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::RenderPipelineDescriptor::vertexBuffers() const\n{\n    return Object::sendMessage<MTL::PipelineBufferDescriptorArray*>(this, _MTL_PRIVATE_SEL(vertexBuffers));\n}\n\n_MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::RenderPipelineDescriptor::fragmentBuffers() const\n{\n    return Object::sendMessage<MTL::PipelineBufferDescriptorArray*>(this, _MTL_PRIVATE_SEL(fragmentBuffers));\n}\n\n_MTL_INLINE bool MTL::RenderPipelineDescriptor::supportIndirectCommandBuffers() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportIndirectCommandBuffers));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setSupportIndirectCommandBuffers(bool supportIndirectCommandBuffers)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSupportIndirectCommandBuffers_), supportIndirectCommandBuffers);\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineDescriptor::binaryArchives() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(binaryArchives));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setBinaryArchives(const NS::Array* binaryArchives)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBinaryArchives_), binaryArchives);\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineDescriptor::vertexPreloadedLibraries() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(vertexPreloadedLibraries));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setVertexPreloadedLibraries(const NS::Array* vertexPreloadedLibraries)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexPreloadedLibraries_), vertexPreloadedLibraries);\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineDescriptor::fragmentPreloadedLibraries() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(fragmentPreloadedLibraries));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setFragmentPreloadedLibraries(const NS::Array* fragmentPreloadedLibraries)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentPreloadedLibraries_), fragmentPreloadedLibraries);\n}\n\n_MTL_INLINE MTL::LinkedFunctions* MTL::RenderPipelineDescriptor::vertexLinkedFunctions() const\n{\n    return Object::sendMessage<MTL::LinkedFunctions*>(this, _MTL_PRIVATE_SEL(vertexLinkedFunctions));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setVertexLinkedFunctions(const MTL::LinkedFunctions* vertexLinkedFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexLinkedFunctions_), vertexLinkedFunctions);\n}\n\n_MTL_INLINE MTL::LinkedFunctions* MTL::RenderPipelineDescriptor::fragmentLinkedFunctions() const\n{\n    return Object::sendMessage<MTL::LinkedFunctions*>(this, _MTL_PRIVATE_SEL(fragmentLinkedFunctions));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setFragmentLinkedFunctions(const MTL::LinkedFunctions* fragmentLinkedFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentLinkedFunctions_), fragmentLinkedFunctions);\n}\n\n_MTL_INLINE bool MTL::RenderPipelineDescriptor::supportAddingVertexBinaryFunctions() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportAddingVertexBinaryFunctions));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setSupportAddingVertexBinaryFunctions(bool supportAddingVertexBinaryFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSupportAddingVertexBinaryFunctions_), supportAddingVertexBinaryFunctions);\n}\n\n_MTL_INLINE bool MTL::RenderPipelineDescriptor::supportAddingFragmentBinaryFunctions() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportAddingFragmentBinaryFunctions));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setSupportAddingFragmentBinaryFunctions(bool supportAddingFragmentBinaryFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSupportAddingFragmentBinaryFunctions_), supportAddingFragmentBinaryFunctions);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineDescriptor::maxVertexCallStackDepth() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxVertexCallStackDepth));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setMaxVertexCallStackDepth(NS::UInteger maxVertexCallStackDepth)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxVertexCallStackDepth_), maxVertexCallStackDepth);\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineDescriptor::maxFragmentCallStackDepth() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxFragmentCallStackDepth));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setMaxFragmentCallStackDepth(NS::UInteger maxFragmentCallStackDepth)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxFragmentCallStackDepth_), maxFragmentCallStackDepth);\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::reset()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(reset));\n}\n\n_MTL_INLINE MTL::ShaderValidation MTL::RenderPipelineDescriptor::shaderValidation() const\n{\n    return Object::sendMessage<MTL::ShaderValidation>(this, _MTL_PRIVATE_SEL(shaderValidation));\n}\n\n_MTL_INLINE void MTL::RenderPipelineDescriptor::setShaderValidation(MTL::ShaderValidation shaderValidation)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setShaderValidation_), shaderValidation);\n}\n\n_MTL_INLINE MTL::RenderPipelineFunctionsDescriptor* MTL::RenderPipelineFunctionsDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::RenderPipelineFunctionsDescriptor>(_MTL_PRIVATE_CLS(MTLRenderPipelineFunctionsDescriptor));\n}\n\n_MTL_INLINE MTL::RenderPipelineFunctionsDescriptor* MTL::RenderPipelineFunctionsDescriptor::init()\n{\n    return NS::Object::init<MTL::RenderPipelineFunctionsDescriptor>();\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineFunctionsDescriptor::vertexAdditionalBinaryFunctions() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(vertexAdditionalBinaryFunctions));\n}\n\n_MTL_INLINE void MTL::RenderPipelineFunctionsDescriptor::setVertexAdditionalBinaryFunctions(const NS::Array* vertexAdditionalBinaryFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setVertexAdditionalBinaryFunctions_), vertexAdditionalBinaryFunctions);\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineFunctionsDescriptor::fragmentAdditionalBinaryFunctions() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(fragmentAdditionalBinaryFunctions));\n}\n\n_MTL_INLINE void MTL::RenderPipelineFunctionsDescriptor::setFragmentAdditionalBinaryFunctions(const NS::Array* fragmentAdditionalBinaryFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentAdditionalBinaryFunctions_), fragmentAdditionalBinaryFunctions);\n}\n\n_MTL_INLINE NS::Array* MTL::RenderPipelineFunctionsDescriptor::tileAdditionalBinaryFunctions() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(tileAdditionalBinaryFunctions));\n}\n\n_MTL_INLINE void MTL::RenderPipelineFunctionsDescriptor::setTileAdditionalBinaryFunctions(const NS::Array* tileAdditionalBinaryFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileAdditionalBinaryFunctions_), tileAdditionalBinaryFunctions);\n}\n\n_MTL_INLINE NS::String* MTL::RenderPipelineState::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE MTL::Device* MTL::RenderPipelineState::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineState::maxTotalThreadsPerThreadgroup() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerThreadgroup));\n}\n\n_MTL_INLINE bool MTL::RenderPipelineState::threadgroupSizeMatchesTileSize() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(threadgroupSizeMatchesTileSize));\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineState::imageblockSampleLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(imageblockSampleLength));\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineState::imageblockMemoryLength(MTL::Size imageblockDimensions)\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(imageblockMemoryLengthForDimensions_), imageblockDimensions);\n}\n\n_MTL_INLINE bool MTL::RenderPipelineState::supportIndirectCommandBuffers() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportIndirectCommandBuffers));\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineState::maxTotalThreadsPerObjectThreadgroup() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerObjectThreadgroup));\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineState::maxTotalThreadsPerMeshThreadgroup() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerMeshThreadgroup));\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineState::objectThreadExecutionWidth() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(objectThreadExecutionWidth));\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineState::meshThreadExecutionWidth() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(meshThreadExecutionWidth));\n}\n\n_MTL_INLINE NS::UInteger MTL::RenderPipelineState::maxTotalThreadgroupsPerMeshGrid() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxTotalThreadgroupsPerMeshGrid));\n}\n\n_MTL_INLINE MTL::ResourceID MTL::RenderPipelineState::gpuResourceID() const\n{\n    return Object::sendMessage<MTL::ResourceID>(this, _MTL_PRIVATE_SEL(gpuResourceID));\n}\n\n_MTL_INLINE MTL::FunctionHandle* MTL::RenderPipelineState::functionHandle(const MTL::Function* function, MTL::RenderStages stage)\n{\n    return Object::sendMessage<MTL::FunctionHandle*>(this, _MTL_PRIVATE_SEL(functionHandleWithFunction_stage_), function, stage);\n}\n\n_MTL_INLINE MTL::VisibleFunctionTable* MTL::RenderPipelineState::newVisibleFunctionTable(const MTL::VisibleFunctionTableDescriptor* descriptor, MTL::RenderStages stage)\n{\n    return Object::sendMessage<MTL::VisibleFunctionTable*>(this, _MTL_PRIVATE_SEL(newVisibleFunctionTableWithDescriptor_stage_), descriptor, stage);\n}\n\n_MTL_INLINE MTL::IntersectionFunctionTable* MTL::RenderPipelineState::newIntersectionFunctionTable(const MTL::IntersectionFunctionTableDescriptor* descriptor, MTL::RenderStages stage)\n{\n    return Object::sendMessage<MTL::IntersectionFunctionTable*>(this, _MTL_PRIVATE_SEL(newIntersectionFunctionTableWithDescriptor_stage_), descriptor, stage);\n}\n\n_MTL_INLINE MTL::RenderPipelineState* MTL::RenderPipelineState::newRenderPipelineState(const MTL::RenderPipelineFunctionsDescriptor* additionalBinaryFunctions, NS::Error** error)\n{\n    return Object::sendMessage<MTL::RenderPipelineState*>(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithAdditionalBinaryFunctions_error_), additionalBinaryFunctions, error);\n}\n\n_MTL_INLINE MTL::ShaderValidation MTL::RenderPipelineState::shaderValidation() const\n{\n    return Object::sendMessage<MTL::ShaderValidation>(this, _MTL_PRIVATE_SEL(shaderValidation));\n}\n\n_MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptorArray* MTL::RenderPipelineColorAttachmentDescriptorArray::alloc()\n{\n    return NS::Object::alloc<MTL::RenderPipelineColorAttachmentDescriptorArray>(_MTL_PRIVATE_CLS(MTLRenderPipelineColorAttachmentDescriptorArray));\n}\n\n_MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptorArray* MTL::RenderPipelineColorAttachmentDescriptorArray::init()\n{\n    return NS::Object::init<MTL::RenderPipelineColorAttachmentDescriptorArray>();\n}\n\n_MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptor* MTL::RenderPipelineColorAttachmentDescriptorArray::object(NS::UInteger attachmentIndex)\n{\n    return Object::sendMessage<MTL::RenderPipelineColorAttachmentDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex);\n}\n\n_MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptorArray::setObject(const MTL::RenderPipelineColorAttachmentDescriptor* attachment, NS::UInteger attachmentIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex);\n}\n\n_MTL_INLINE MTL::TileRenderPipelineColorAttachmentDescriptor* MTL::TileRenderPipelineColorAttachmentDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::TileRenderPipelineColorAttachmentDescriptor>(_MTL_PRIVATE_CLS(MTLTileRenderPipelineColorAttachmentDescriptor));\n}\n\n_MTL_INLINE MTL::TileRenderPipelineColorAttachmentDescriptor* MTL::TileRenderPipelineColorAttachmentDescriptor::init()\n{\n    return NS::Object::init<MTL::TileRenderPipelineColorAttachmentDescriptor>();\n}\n\n_MTL_INLINE MTL::PixelFormat MTL::TileRenderPipelineColorAttachmentDescriptor::pixelFormat() const\n{\n    return Object::sendMessage<MTL::PixelFormat>(this, _MTL_PRIVATE_SEL(pixelFormat));\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineColorAttachmentDescriptor::setPixelFormat(MTL::PixelFormat pixelFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPixelFormat_), pixelFormat);\n}\n\n_MTL_INLINE MTL::TileRenderPipelineColorAttachmentDescriptorArray* MTL::TileRenderPipelineColorAttachmentDescriptorArray::alloc()\n{\n    return NS::Object::alloc<MTL::TileRenderPipelineColorAttachmentDescriptorArray>(_MTL_PRIVATE_CLS(MTLTileRenderPipelineColorAttachmentDescriptorArray));\n}\n\n_MTL_INLINE MTL::TileRenderPipelineColorAttachmentDescriptorArray* MTL::TileRenderPipelineColorAttachmentDescriptorArray::init()\n{\n    return NS::Object::init<MTL::TileRenderPipelineColorAttachmentDescriptorArray>();\n}\n\n_MTL_INLINE MTL::TileRenderPipelineColorAttachmentDescriptor* MTL::TileRenderPipelineColorAttachmentDescriptorArray::object(NS::UInteger attachmentIndex)\n{\n    return Object::sendMessage<MTL::TileRenderPipelineColorAttachmentDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex);\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineColorAttachmentDescriptorArray::setObject(const MTL::TileRenderPipelineColorAttachmentDescriptor* attachment, NS::UInteger attachmentIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex);\n}\n\n_MTL_INLINE MTL::TileRenderPipelineDescriptor* MTL::TileRenderPipelineDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::TileRenderPipelineDescriptor>(_MTL_PRIVATE_CLS(MTLTileRenderPipelineDescriptor));\n}\n\n_MTL_INLINE MTL::TileRenderPipelineDescriptor* MTL::TileRenderPipelineDescriptor::init()\n{\n    return NS::Object::init<MTL::TileRenderPipelineDescriptor>();\n}\n\n_MTL_INLINE NS::String* MTL::TileRenderPipelineDescriptor::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineDescriptor::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::Function* MTL::TileRenderPipelineDescriptor::tileFunction() const\n{\n    return Object::sendMessage<MTL::Function*>(this, _MTL_PRIVATE_SEL(tileFunction));\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineDescriptor::setTileFunction(const MTL::Function* tileFunction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTileFunction_), tileFunction);\n}\n\n_MTL_INLINE NS::UInteger MTL::TileRenderPipelineDescriptor::rasterSampleCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(rasterSampleCount));\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineDescriptor::setRasterSampleCount(NS::UInteger rasterSampleCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRasterSampleCount_), rasterSampleCount);\n}\n\n_MTL_INLINE MTL::TileRenderPipelineColorAttachmentDescriptorArray* MTL::TileRenderPipelineDescriptor::colorAttachments() const\n{\n    return Object::sendMessage<MTL::TileRenderPipelineColorAttachmentDescriptorArray*>(this, _MTL_PRIVATE_SEL(colorAttachments));\n}\n\n_MTL_INLINE bool MTL::TileRenderPipelineDescriptor::threadgroupSizeMatchesTileSize() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(threadgroupSizeMatchesTileSize));\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineDescriptor::setThreadgroupSizeMatchesTileSize(bool threadgroupSizeMatchesTileSize)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setThreadgroupSizeMatchesTileSize_), threadgroupSizeMatchesTileSize);\n}\n\n_MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::TileRenderPipelineDescriptor::tileBuffers() const\n{\n    return Object::sendMessage<MTL::PipelineBufferDescriptorArray*>(this, _MTL_PRIVATE_SEL(tileBuffers));\n}\n\n_MTL_INLINE NS::UInteger MTL::TileRenderPipelineDescriptor::maxTotalThreadsPerThreadgroup() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerThreadgroup));\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineDescriptor::setMaxTotalThreadsPerThreadgroup(NS::UInteger maxTotalThreadsPerThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxTotalThreadsPerThreadgroup_), maxTotalThreadsPerThreadgroup);\n}\n\n_MTL_INLINE NS::Array* MTL::TileRenderPipelineDescriptor::binaryArchives() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(binaryArchives));\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineDescriptor::setBinaryArchives(const NS::Array* binaryArchives)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBinaryArchives_), binaryArchives);\n}\n\n_MTL_INLINE NS::Array* MTL::TileRenderPipelineDescriptor::preloadedLibraries() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(preloadedLibraries));\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineDescriptor::setPreloadedLibraries(const NS::Array* preloadedLibraries)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPreloadedLibraries_), preloadedLibraries);\n}\n\n_MTL_INLINE MTL::LinkedFunctions* MTL::TileRenderPipelineDescriptor::linkedFunctions() const\n{\n    return Object::sendMessage<MTL::LinkedFunctions*>(this, _MTL_PRIVATE_SEL(linkedFunctions));\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineDescriptor::setLinkedFunctions(const MTL::LinkedFunctions* linkedFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLinkedFunctions_), linkedFunctions);\n}\n\n_MTL_INLINE bool MTL::TileRenderPipelineDescriptor::supportAddingBinaryFunctions() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportAddingBinaryFunctions));\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineDescriptor::setSupportAddingBinaryFunctions(bool supportAddingBinaryFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSupportAddingBinaryFunctions_), supportAddingBinaryFunctions);\n}\n\n_MTL_INLINE NS::UInteger MTL::TileRenderPipelineDescriptor::maxCallStackDepth() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxCallStackDepth));\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineDescriptor::setMaxCallStackDepth(NS::UInteger maxCallStackDepth)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxCallStackDepth_), maxCallStackDepth);\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineDescriptor::reset()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(reset));\n}\n\n_MTL_INLINE MTL::ShaderValidation MTL::TileRenderPipelineDescriptor::shaderValidation() const\n{\n    return Object::sendMessage<MTL::ShaderValidation>(this, _MTL_PRIVATE_SEL(shaderValidation));\n}\n\n_MTL_INLINE void MTL::TileRenderPipelineDescriptor::setShaderValidation(MTL::ShaderValidation shaderValidation)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setShaderValidation_), shaderValidation);\n}\n\n_MTL_INLINE MTL::MeshRenderPipelineDescriptor* MTL::MeshRenderPipelineDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::MeshRenderPipelineDescriptor>(_MTL_PRIVATE_CLS(MTLMeshRenderPipelineDescriptor));\n}\n\n_MTL_INLINE MTL::MeshRenderPipelineDescriptor* MTL::MeshRenderPipelineDescriptor::init()\n{\n    return NS::Object::init<MTL::MeshRenderPipelineDescriptor>();\n}\n\n_MTL_INLINE NS::String* MTL::MeshRenderPipelineDescriptor::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE MTL::Function* MTL::MeshRenderPipelineDescriptor::objectFunction() const\n{\n    return Object::sendMessage<MTL::Function*>(this, _MTL_PRIVATE_SEL(objectFunction));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setObjectFunction(const MTL::Function* objectFunction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectFunction_), objectFunction);\n}\n\n_MTL_INLINE MTL::Function* MTL::MeshRenderPipelineDescriptor::meshFunction() const\n{\n    return Object::sendMessage<MTL::Function*>(this, _MTL_PRIVATE_SEL(meshFunction));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMeshFunction(const MTL::Function* meshFunction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshFunction_), meshFunction);\n}\n\n_MTL_INLINE MTL::Function* MTL::MeshRenderPipelineDescriptor::fragmentFunction() const\n{\n    return Object::sendMessage<MTL::Function*>(this, _MTL_PRIVATE_SEL(fragmentFunction));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setFragmentFunction(const MTL::Function* fragmentFunction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentFunction_), fragmentFunction);\n}\n\n_MTL_INLINE NS::UInteger MTL::MeshRenderPipelineDescriptor::maxTotalThreadsPerObjectThreadgroup() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerObjectThreadgroup));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMaxTotalThreadsPerObjectThreadgroup(NS::UInteger maxTotalThreadsPerObjectThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxTotalThreadsPerObjectThreadgroup_), maxTotalThreadsPerObjectThreadgroup);\n}\n\n_MTL_INLINE NS::UInteger MTL::MeshRenderPipelineDescriptor::maxTotalThreadsPerMeshThreadgroup() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerMeshThreadgroup));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMaxTotalThreadsPerMeshThreadgroup(NS::UInteger maxTotalThreadsPerMeshThreadgroup)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxTotalThreadsPerMeshThreadgroup_), maxTotalThreadsPerMeshThreadgroup);\n}\n\n_MTL_INLINE bool MTL::MeshRenderPipelineDescriptor::objectThreadgroupSizeIsMultipleOfThreadExecutionWidth() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(objectThreadgroupSizeIsMultipleOfThreadExecutionWidth));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setObjectThreadgroupSizeIsMultipleOfThreadExecutionWidth(bool objectThreadgroupSizeIsMultipleOfThreadExecutionWidth)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectThreadgroupSizeIsMultipleOfThreadExecutionWidth_), objectThreadgroupSizeIsMultipleOfThreadExecutionWidth);\n}\n\n_MTL_INLINE bool MTL::MeshRenderPipelineDescriptor::meshThreadgroupSizeIsMultipleOfThreadExecutionWidth() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(meshThreadgroupSizeIsMultipleOfThreadExecutionWidth));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMeshThreadgroupSizeIsMultipleOfThreadExecutionWidth(bool meshThreadgroupSizeIsMultipleOfThreadExecutionWidth)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshThreadgroupSizeIsMultipleOfThreadExecutionWidth_), meshThreadgroupSizeIsMultipleOfThreadExecutionWidth);\n}\n\n_MTL_INLINE NS::UInteger MTL::MeshRenderPipelineDescriptor::payloadMemoryLength() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(payloadMemoryLength));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setPayloadMemoryLength(NS::UInteger payloadMemoryLength)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setPayloadMemoryLength_), payloadMemoryLength);\n}\n\n_MTL_INLINE NS::UInteger MTL::MeshRenderPipelineDescriptor::maxTotalThreadgroupsPerMeshGrid() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxTotalThreadgroupsPerMeshGrid));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMaxTotalThreadgroupsPerMeshGrid(NS::UInteger maxTotalThreadgroupsPerMeshGrid)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxTotalThreadgroupsPerMeshGrid_), maxTotalThreadgroupsPerMeshGrid);\n}\n\n_MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::MeshRenderPipelineDescriptor::objectBuffers() const\n{\n    return Object::sendMessage<MTL::PipelineBufferDescriptorArray*>(this, _MTL_PRIVATE_SEL(objectBuffers));\n}\n\n_MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::MeshRenderPipelineDescriptor::meshBuffers() const\n{\n    return Object::sendMessage<MTL::PipelineBufferDescriptorArray*>(this, _MTL_PRIVATE_SEL(meshBuffers));\n}\n\n_MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::MeshRenderPipelineDescriptor::fragmentBuffers() const\n{\n    return Object::sendMessage<MTL::PipelineBufferDescriptorArray*>(this, _MTL_PRIVATE_SEL(fragmentBuffers));\n}\n\n_MTL_INLINE NS::UInteger MTL::MeshRenderPipelineDescriptor::rasterSampleCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(rasterSampleCount));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setRasterSampleCount(NS::UInteger rasterSampleCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRasterSampleCount_), rasterSampleCount);\n}\n\n_MTL_INLINE bool MTL::MeshRenderPipelineDescriptor::alphaToCoverageEnabled() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isAlphaToCoverageEnabled));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setAlphaToCoverageEnabled(bool alphaToCoverageEnabled)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setAlphaToCoverageEnabled_), alphaToCoverageEnabled);\n}\n\n_MTL_INLINE bool MTL::MeshRenderPipelineDescriptor::alphaToOneEnabled() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isAlphaToOneEnabled));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setAlphaToOneEnabled(bool alphaToOneEnabled)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setAlphaToOneEnabled_), alphaToOneEnabled);\n}\n\n_MTL_INLINE bool MTL::MeshRenderPipelineDescriptor::rasterizationEnabled() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(isRasterizationEnabled));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setRasterizationEnabled(bool rasterizationEnabled)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRasterizationEnabled_), rasterizationEnabled);\n}\n\n_MTL_INLINE NS::UInteger MTL::MeshRenderPipelineDescriptor::maxVertexAmplificationCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxVertexAmplificationCount));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMaxVertexAmplificationCount(NS::UInteger maxVertexAmplificationCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxVertexAmplificationCount_), maxVertexAmplificationCount);\n}\n\n_MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptorArray* MTL::MeshRenderPipelineDescriptor::colorAttachments() const\n{\n    return Object::sendMessage<MTL::RenderPipelineColorAttachmentDescriptorArray*>(this, _MTL_PRIVATE_SEL(colorAttachments));\n}\n\n_MTL_INLINE MTL::PixelFormat MTL::MeshRenderPipelineDescriptor::depthAttachmentPixelFormat() const\n{\n    return Object::sendMessage<MTL::PixelFormat>(this, _MTL_PRIVATE_SEL(depthAttachmentPixelFormat));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setDepthAttachmentPixelFormat(MTL::PixelFormat depthAttachmentPixelFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setDepthAttachmentPixelFormat_), depthAttachmentPixelFormat);\n}\n\n_MTL_INLINE MTL::PixelFormat MTL::MeshRenderPipelineDescriptor::stencilAttachmentPixelFormat() const\n{\n    return Object::sendMessage<MTL::PixelFormat>(this, _MTL_PRIVATE_SEL(stencilAttachmentPixelFormat));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setStencilAttachmentPixelFormat(MTL::PixelFormat stencilAttachmentPixelFormat)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStencilAttachmentPixelFormat_), stencilAttachmentPixelFormat);\n}\n\n_MTL_INLINE bool MTL::MeshRenderPipelineDescriptor::supportIndirectCommandBuffers() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportIndirectCommandBuffers));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setSupportIndirectCommandBuffers(bool supportIndirectCommandBuffers)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSupportIndirectCommandBuffers_), supportIndirectCommandBuffers);\n}\n\n_MTL_INLINE NS::Array* MTL::MeshRenderPipelineDescriptor::binaryArchives() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(binaryArchives));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setBinaryArchives(const NS::Array* binaryArchives)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBinaryArchives_), binaryArchives);\n}\n\n_MTL_INLINE MTL::LinkedFunctions* MTL::MeshRenderPipelineDescriptor::objectLinkedFunctions() const\n{\n    return Object::sendMessage<MTL::LinkedFunctions*>(this, _MTL_PRIVATE_SEL(objectLinkedFunctions));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setObjectLinkedFunctions(const MTL::LinkedFunctions* objectLinkedFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObjectLinkedFunctions_), objectLinkedFunctions);\n}\n\n_MTL_INLINE MTL::LinkedFunctions* MTL::MeshRenderPipelineDescriptor::meshLinkedFunctions() const\n{\n    return Object::sendMessage<MTL::LinkedFunctions*>(this, _MTL_PRIVATE_SEL(meshLinkedFunctions));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMeshLinkedFunctions(const MTL::LinkedFunctions* meshLinkedFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMeshLinkedFunctions_), meshLinkedFunctions);\n}\n\n_MTL_INLINE MTL::LinkedFunctions* MTL::MeshRenderPipelineDescriptor::fragmentLinkedFunctions() const\n{\n    return Object::sendMessage<MTL::LinkedFunctions*>(this, _MTL_PRIVATE_SEL(fragmentLinkedFunctions));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setFragmentLinkedFunctions(const MTL::LinkedFunctions* fragmentLinkedFunctions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFragmentLinkedFunctions_), fragmentLinkedFunctions);\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::reset()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(reset));\n}\n\n_MTL_INLINE MTL::ShaderValidation MTL::MeshRenderPipelineDescriptor::shaderValidation() const\n{\n    return Object::sendMessage<MTL::ShaderValidation>(this, _MTL_PRIVATE_SEL(shaderValidation));\n}\n\n_MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setShaderValidation(MTL::ShaderValidation shaderValidation)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setShaderValidation_), shaderValidation);\n}\n\n#pragma once\n\nnamespace MTL\n{\nclass ResidencySetDescriptor : public NS::Copying<ResidencySetDescriptor>\n{\npublic:\n    static class ResidencySetDescriptor* alloc();\n\n    class ResidencySetDescriptor*        init();\n\n    NS::String*                          label() const;\n    void                                 setLabel(const NS::String* label);\n\n    NS::UInteger                         initialCapacity() const;\n    void                                 setInitialCapacity(NS::UInteger initialCapacity);\n};\n\nclass ResidencySet : public NS::Referencing<ResidencySet>\n{\npublic:\n    class Device* device() const;\n\n    NS::String*   label() const;\n\n    uint64_t      allocatedSize() const;\n\n    void          requestResidency();\n\n    void          endResidency();\n\n    void          addAllocation(const class Allocation* allocation);\n\n    void          addAllocations(const class Allocation* const allocations[], NS::UInteger count);\n\n    void          removeAllocation(const class Allocation* allocation);\n\n    void          removeAllocations(const class Allocation* const allocations[], NS::UInteger count);\n\n    void          removeAllAllocations();\n\n    bool          containsAllocation(const class Allocation* anAllocation);\n\n    NS::Array*    allAllocations() const;\n\n    NS::UInteger  allocationCount() const;\n\n    void          commit();\n};\n\n}\n\n_MTL_INLINE MTL::ResidencySetDescriptor* MTL::ResidencySetDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::ResidencySetDescriptor>(_MTL_PRIVATE_CLS(MTLResidencySetDescriptor));\n}\n\n_MTL_INLINE MTL::ResidencySetDescriptor* MTL::ResidencySetDescriptor::init()\n{\n    return NS::Object::init<MTL::ResidencySetDescriptor>();\n}\n\n_MTL_INLINE NS::String* MTL::ResidencySetDescriptor::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::ResidencySetDescriptor::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE NS::UInteger MTL::ResidencySetDescriptor::initialCapacity() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(initialCapacity));\n}\n\n_MTL_INLINE void MTL::ResidencySetDescriptor::setInitialCapacity(NS::UInteger initialCapacity)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setInitialCapacity_), initialCapacity);\n}\n\n_MTL_INLINE MTL::Device* MTL::ResidencySet::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE NS::String* MTL::ResidencySet::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE uint64_t MTL::ResidencySet::allocatedSize() const\n{\n    return Object::sendMessage<uint64_t>(this, _MTL_PRIVATE_SEL(allocatedSize));\n}\n\n_MTL_INLINE void MTL::ResidencySet::requestResidency()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(requestResidency));\n}\n\n_MTL_INLINE void MTL::ResidencySet::endResidency()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(endResidency));\n}\n\n_MTL_INLINE void MTL::ResidencySet::addAllocation(const MTL::Allocation* allocation)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(addAllocation_), allocation);\n}\n\n_MTL_INLINE void MTL::ResidencySet::addAllocations(const MTL::Allocation* const allocations[], NS::UInteger count)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(addAllocations_count_), allocations, count);\n}\n\n_MTL_INLINE void MTL::ResidencySet::removeAllocation(const MTL::Allocation* allocation)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(removeAllocation_), allocation);\n}\n\n_MTL_INLINE void MTL::ResidencySet::removeAllocations(const MTL::Allocation* const allocations[], NS::UInteger count)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(removeAllocations_count_), allocations, count);\n}\n\n_MTL_INLINE void MTL::ResidencySet::removeAllAllocations()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(removeAllAllocations));\n}\n\n_MTL_INLINE bool MTL::ResidencySet::containsAllocation(const MTL::Allocation* anAllocation)\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(containsAllocation_), anAllocation);\n}\n\n_MTL_INLINE NS::Array* MTL::ResidencySet::allAllocations() const\n{\n    return Object::sendMessage<NS::Array*>(this, _MTL_PRIVATE_SEL(allAllocations));\n}\n\n_MTL_INLINE NS::UInteger MTL::ResidencySet::allocationCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(allocationCount));\n}\n\n_MTL_INLINE void MTL::ResidencySet::commit()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(commit));\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, SparseTextureMappingMode) {\n    SparseTextureMappingModeMap = 0,\n    SparseTextureMappingModeUnmap = 1,\n};\n\nstruct MapIndirectArguments\n{\n    uint32_t regionOriginX;\n    uint32_t regionOriginY;\n    uint32_t regionOriginZ;\n    uint32_t regionSizeWidth;\n    uint32_t regionSizeHeight;\n    uint32_t regionSizeDepth;\n    uint32_t mipMapLevel;\n    uint32_t sliceId;\n} _MTL_PACKED;\n\nclass ResourceStateCommandEncoder : public NS::Referencing<ResourceStateCommandEncoder, CommandEncoder>\n{\npublic:\n    void updateTextureMappings(const class Texture* texture, const MTL::SparseTextureMappingMode mode, const MTL::Region* regions, const NS::UInteger* mipLevels, const NS::UInteger* slices, NS::UInteger numRegions);\n\n    void updateTextureMapping(const class Texture* texture, const MTL::SparseTextureMappingMode mode, const MTL::Region region, const NS::UInteger mipLevel, const NS::UInteger slice);\n\n    void updateTextureMapping(const class Texture* texture, const MTL::SparseTextureMappingMode mode, const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset);\n\n    void updateFence(const class Fence* fence);\n\n    void waitForFence(const class Fence* fence);\n\n    void moveTextureMappingsFromTexture(const class Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const class Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin);\n};\n\n}\n\n_MTL_INLINE void MTL::ResourceStateCommandEncoder::updateTextureMappings(const MTL::Texture* texture, const MTL::SparseTextureMappingMode mode, const MTL::Region* regions, const NS::UInteger* mipLevels, const NS::UInteger* slices, NS::UInteger numRegions)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(updateTextureMappings_mode_regions_mipLevels_slices_numRegions_), texture, mode, regions, mipLevels, slices, numRegions);\n}\n\n_MTL_INLINE void MTL::ResourceStateCommandEncoder::updateTextureMapping(const MTL::Texture* texture, const MTL::SparseTextureMappingMode mode, const MTL::Region region, const NS::UInteger mipLevel, const NS::UInteger slice)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(updateTextureMapping_mode_region_mipLevel_slice_), texture, mode, region, mipLevel, slice);\n}\n\n_MTL_INLINE void MTL::ResourceStateCommandEncoder::updateTextureMapping(const MTL::Texture* texture, const MTL::SparseTextureMappingMode mode, const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(updateTextureMapping_mode_indirectBuffer_indirectBufferOffset_), texture, mode, indirectBuffer, indirectBufferOffset);\n}\n\n_MTL_INLINE void MTL::ResourceStateCommandEncoder::updateFence(const MTL::Fence* fence)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(updateFence_), fence);\n}\n\n_MTL_INLINE void MTL::ResourceStateCommandEncoder::waitForFence(const MTL::Fence* fence)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(waitForFence_), fence);\n}\n\n_MTL_INLINE void MTL::ResourceStateCommandEncoder::moveTextureMappingsFromTexture(const MTL::Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const MTL::Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(moveTextureMappingsFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_), sourceTexture, sourceSlice, sourceLevel, sourceOrigin, sourceSize, destinationTexture, destinationSlice, destinationLevel, destinationOrigin);\n}\n\n#pragma once\n\nnamespace MTL\n{\nclass ResourceStatePassSampleBufferAttachmentDescriptor : public NS::Copying<ResourceStatePassSampleBufferAttachmentDescriptor>\n{\npublic:\n    static class ResourceStatePassSampleBufferAttachmentDescriptor* alloc();\n\n    class ResourceStatePassSampleBufferAttachmentDescriptor*        init();\n\n    class CounterSampleBuffer*                                      sampleBuffer() const;\n    void                                                            setSampleBuffer(const class CounterSampleBuffer* sampleBuffer);\n\n    NS::UInteger                                                    startOfEncoderSampleIndex() const;\n    void                                                            setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex);\n\n    NS::UInteger                                                    endOfEncoderSampleIndex() const;\n    void                                                            setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex);\n};\n\nclass ResourceStatePassSampleBufferAttachmentDescriptorArray : public NS::Referencing<ResourceStatePassSampleBufferAttachmentDescriptorArray>\n{\npublic:\n    static class ResourceStatePassSampleBufferAttachmentDescriptorArray* alloc();\n\n    class ResourceStatePassSampleBufferAttachmentDescriptorArray*        init();\n\n    class ResourceStatePassSampleBufferAttachmentDescriptor*             object(NS::UInteger attachmentIndex);\n\n    void                                                                 setObject(const class ResourceStatePassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex);\n};\n\nclass ResourceStatePassDescriptor : public NS::Copying<ResourceStatePassDescriptor>\n{\npublic:\n    static class ResourceStatePassDescriptor*                     alloc();\n\n    class ResourceStatePassDescriptor*                            init();\n\n    static class ResourceStatePassDescriptor*                     resourceStatePassDescriptor();\n\n    class ResourceStatePassSampleBufferAttachmentDescriptorArray* sampleBufferAttachments() const;\n};\n\n}\n\n_MTL_INLINE MTL::ResourceStatePassSampleBufferAttachmentDescriptor* MTL::ResourceStatePassSampleBufferAttachmentDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::ResourceStatePassSampleBufferAttachmentDescriptor>(_MTL_PRIVATE_CLS(MTLResourceStatePassSampleBufferAttachmentDescriptor));\n}\n\n_MTL_INLINE MTL::ResourceStatePassSampleBufferAttachmentDescriptor* MTL::ResourceStatePassSampleBufferAttachmentDescriptor::init()\n{\n    return NS::Object::init<MTL::ResourceStatePassSampleBufferAttachmentDescriptor>();\n}\n\n_MTL_INLINE MTL::CounterSampleBuffer* MTL::ResourceStatePassSampleBufferAttachmentDescriptor::sampleBuffer() const\n{\n    return Object::sendMessage<MTL::CounterSampleBuffer*>(this, _MTL_PRIVATE_SEL(sampleBuffer));\n}\n\n_MTL_INLINE void MTL::ResourceStatePassSampleBufferAttachmentDescriptor::setSampleBuffer(const MTL::CounterSampleBuffer* sampleBuffer)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSampleBuffer_), sampleBuffer);\n}\n\n_MTL_INLINE NS::UInteger MTL::ResourceStatePassSampleBufferAttachmentDescriptor::startOfEncoderSampleIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(startOfEncoderSampleIndex));\n}\n\n_MTL_INLINE void MTL::ResourceStatePassSampleBufferAttachmentDescriptor::setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStartOfEncoderSampleIndex_), startOfEncoderSampleIndex);\n}\n\n_MTL_INLINE NS::UInteger MTL::ResourceStatePassSampleBufferAttachmentDescriptor::endOfEncoderSampleIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(endOfEncoderSampleIndex));\n}\n\n_MTL_INLINE void MTL::ResourceStatePassSampleBufferAttachmentDescriptor::setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setEndOfEncoderSampleIndex_), endOfEncoderSampleIndex);\n}\n\n_MTL_INLINE MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray* MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray::alloc()\n{\n    return NS::Object::alloc<MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray>(_MTL_PRIVATE_CLS(MTLResourceStatePassSampleBufferAttachmentDescriptorArray));\n}\n\n_MTL_INLINE MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray* MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray::init()\n{\n    return NS::Object::init<MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray>();\n}\n\n_MTL_INLINE MTL::ResourceStatePassSampleBufferAttachmentDescriptor* MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray::object(NS::UInteger attachmentIndex)\n{\n    return Object::sendMessage<MTL::ResourceStatePassSampleBufferAttachmentDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex);\n}\n\n_MTL_INLINE void MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray::setObject(const MTL::ResourceStatePassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex);\n}\n\n_MTL_INLINE MTL::ResourceStatePassDescriptor* MTL::ResourceStatePassDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::ResourceStatePassDescriptor>(_MTL_PRIVATE_CLS(MTLResourceStatePassDescriptor));\n}\n\n_MTL_INLINE MTL::ResourceStatePassDescriptor* MTL::ResourceStatePassDescriptor::init()\n{\n    return NS::Object::init<MTL::ResourceStatePassDescriptor>();\n}\n\n_MTL_INLINE MTL::ResourceStatePassDescriptor* MTL::ResourceStatePassDescriptor::resourceStatePassDescriptor()\n{\n    return Object::sendMessage<MTL::ResourceStatePassDescriptor*>(_MTL_PRIVATE_CLS(MTLResourceStatePassDescriptor), _MTL_PRIVATE_SEL(resourceStatePassDescriptor));\n}\n\n_MTL_INLINE MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray* MTL::ResourceStatePassDescriptor::sampleBufferAttachments() const\n{\n    return Object::sendMessage<MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray*>(this, _MTL_PRIVATE_SEL(sampleBufferAttachments));\n}\n\n#pragma once\n\nnamespace MTL\n{\n_MTL_ENUM(NS::UInteger, SamplerMinMagFilter) {\n    SamplerMinMagFilterNearest = 0,\n    SamplerMinMagFilterLinear = 1,\n};\n\n_MTL_ENUM(NS::UInteger, SamplerMipFilter) {\n    SamplerMipFilterNotMipmapped = 0,\n    SamplerMipFilterNearest = 1,\n    SamplerMipFilterLinear = 2,\n};\n\n_MTL_ENUM(NS::UInteger, SamplerAddressMode) {\n    SamplerAddressModeClampToEdge = 0,\n    SamplerAddressModeMirrorClampToEdge = 1,\n    SamplerAddressModeRepeat = 2,\n    SamplerAddressModeMirrorRepeat = 3,\n    SamplerAddressModeClampToZero = 4,\n    SamplerAddressModeClampToBorderColor = 5,\n};\n\n_MTL_ENUM(NS::UInteger, SamplerBorderColor) {\n    SamplerBorderColorTransparentBlack = 0,\n    SamplerBorderColorOpaqueBlack = 1,\n    SamplerBorderColorOpaqueWhite = 2,\n};\n\nclass SamplerDescriptor : public NS::Copying<SamplerDescriptor>\n{\npublic:\n    static class SamplerDescriptor* alloc();\n\n    class SamplerDescriptor*        init();\n\n    MTL::SamplerMinMagFilter        minFilter() const;\n    void                            setMinFilter(MTL::SamplerMinMagFilter minFilter);\n\n    MTL::SamplerMinMagFilter        magFilter() const;\n    void                            setMagFilter(MTL::SamplerMinMagFilter magFilter);\n\n    MTL::SamplerMipFilter           mipFilter() const;\n    void                            setMipFilter(MTL::SamplerMipFilter mipFilter);\n\n    NS::UInteger                    maxAnisotropy() const;\n    void                            setMaxAnisotropy(NS::UInteger maxAnisotropy);\n\n    MTL::SamplerAddressMode         sAddressMode() const;\n    void                            setSAddressMode(MTL::SamplerAddressMode sAddressMode);\n\n    MTL::SamplerAddressMode         tAddressMode() const;\n    void                            setTAddressMode(MTL::SamplerAddressMode tAddressMode);\n\n    MTL::SamplerAddressMode         rAddressMode() const;\n    void                            setRAddressMode(MTL::SamplerAddressMode rAddressMode);\n\n    MTL::SamplerBorderColor         borderColor() const;\n    void                            setBorderColor(MTL::SamplerBorderColor borderColor);\n\n    bool                            normalizedCoordinates() const;\n    void                            setNormalizedCoordinates(bool normalizedCoordinates);\n\n    float                           lodMinClamp() const;\n    void                            setLodMinClamp(float lodMinClamp);\n\n    float                           lodMaxClamp() const;\n    void                            setLodMaxClamp(float lodMaxClamp);\n\n    bool                            lodAverage() const;\n    void                            setLodAverage(bool lodAverage);\n\n    MTL::CompareFunction            compareFunction() const;\n    void                            setCompareFunction(MTL::CompareFunction compareFunction);\n\n    bool                            supportArgumentBuffers() const;\n    void                            setSupportArgumentBuffers(bool supportArgumentBuffers);\n\n    NS::String*                     label() const;\n    void                            setLabel(const NS::String* label);\n};\n\nclass SamplerState : public NS::Referencing<SamplerState>\n{\npublic:\n    NS::String*     label() const;\n\n    class Device*   device() const;\n\n    MTL::ResourceID gpuResourceID() const;\n};\n\n}\n\n_MTL_INLINE MTL::SamplerDescriptor* MTL::SamplerDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::SamplerDescriptor>(_MTL_PRIVATE_CLS(MTLSamplerDescriptor));\n}\n\n_MTL_INLINE MTL::SamplerDescriptor* MTL::SamplerDescriptor::init()\n{\n    return NS::Object::init<MTL::SamplerDescriptor>();\n}\n\n_MTL_INLINE MTL::SamplerMinMagFilter MTL::SamplerDescriptor::minFilter() const\n{\n    return Object::sendMessage<MTL::SamplerMinMagFilter>(this, _MTL_PRIVATE_SEL(minFilter));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setMinFilter(MTL::SamplerMinMagFilter minFilter)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMinFilter_), minFilter);\n}\n\n_MTL_INLINE MTL::SamplerMinMagFilter MTL::SamplerDescriptor::magFilter() const\n{\n    return Object::sendMessage<MTL::SamplerMinMagFilter>(this, _MTL_PRIVATE_SEL(magFilter));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setMagFilter(MTL::SamplerMinMagFilter magFilter)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMagFilter_), magFilter);\n}\n\n_MTL_INLINE MTL::SamplerMipFilter MTL::SamplerDescriptor::mipFilter() const\n{\n    return Object::sendMessage<MTL::SamplerMipFilter>(this, _MTL_PRIVATE_SEL(mipFilter));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setMipFilter(MTL::SamplerMipFilter mipFilter)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMipFilter_), mipFilter);\n}\n\n_MTL_INLINE NS::UInteger MTL::SamplerDescriptor::maxAnisotropy() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(maxAnisotropy));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setMaxAnisotropy(NS::UInteger maxAnisotropy)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setMaxAnisotropy_), maxAnisotropy);\n}\n\n_MTL_INLINE MTL::SamplerAddressMode MTL::SamplerDescriptor::sAddressMode() const\n{\n    return Object::sendMessage<MTL::SamplerAddressMode>(this, _MTL_PRIVATE_SEL(sAddressMode));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setSAddressMode(MTL::SamplerAddressMode sAddressMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSAddressMode_), sAddressMode);\n}\n\n_MTL_INLINE MTL::SamplerAddressMode MTL::SamplerDescriptor::tAddressMode() const\n{\n    return Object::sendMessage<MTL::SamplerAddressMode>(this, _MTL_PRIVATE_SEL(tAddressMode));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setTAddressMode(MTL::SamplerAddressMode tAddressMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setTAddressMode_), tAddressMode);\n}\n\n_MTL_INLINE MTL::SamplerAddressMode MTL::SamplerDescriptor::rAddressMode() const\n{\n    return Object::sendMessage<MTL::SamplerAddressMode>(this, _MTL_PRIVATE_SEL(rAddressMode));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setRAddressMode(MTL::SamplerAddressMode rAddressMode)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setRAddressMode_), rAddressMode);\n}\n\n_MTL_INLINE MTL::SamplerBorderColor MTL::SamplerDescriptor::borderColor() const\n{\n    return Object::sendMessage<MTL::SamplerBorderColor>(this, _MTL_PRIVATE_SEL(borderColor));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setBorderColor(MTL::SamplerBorderColor borderColor)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBorderColor_), borderColor);\n}\n\n_MTL_INLINE bool MTL::SamplerDescriptor::normalizedCoordinates() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(normalizedCoordinates));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setNormalizedCoordinates(bool normalizedCoordinates)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setNormalizedCoordinates_), normalizedCoordinates);\n}\n\n_MTL_INLINE float MTL::SamplerDescriptor::lodMinClamp() const\n{\n    return Object::sendMessage<float>(this, _MTL_PRIVATE_SEL(lodMinClamp));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setLodMinClamp(float lodMinClamp)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLodMinClamp_), lodMinClamp);\n}\n\n_MTL_INLINE float MTL::SamplerDescriptor::lodMaxClamp() const\n{\n    return Object::sendMessage<float>(this, _MTL_PRIVATE_SEL(lodMaxClamp));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setLodMaxClamp(float lodMaxClamp)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLodMaxClamp_), lodMaxClamp);\n}\n\n_MTL_INLINE bool MTL::SamplerDescriptor::lodAverage() const\n{\n    return Object::sendMessage<bool>(this, _MTL_PRIVATE_SEL(lodAverage));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setLodAverage(bool lodAverage)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLodAverage_), lodAverage);\n}\n\n_MTL_INLINE MTL::CompareFunction MTL::SamplerDescriptor::compareFunction() const\n{\n    return Object::sendMessage<MTL::CompareFunction>(this, _MTL_PRIVATE_SEL(compareFunction));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setCompareFunction(MTL::CompareFunction compareFunction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setCompareFunction_), compareFunction);\n}\n\n_MTL_INLINE bool MTL::SamplerDescriptor::supportArgumentBuffers() const\n{\n    return Object::sendMessageSafe<bool>(this, _MTL_PRIVATE_SEL(supportArgumentBuffers));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setSupportArgumentBuffers(bool supportArgumentBuffers)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setSupportArgumentBuffers_), supportArgumentBuffers);\n}\n\n_MTL_INLINE NS::String* MTL::SamplerDescriptor::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE void MTL::SamplerDescriptor::setLabel(const NS::String* label)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setLabel_), label);\n}\n\n_MTL_INLINE NS::String* MTL::SamplerState::label() const\n{\n    return Object::sendMessage<NS::String*>(this, _MTL_PRIVATE_SEL(label));\n}\n\n_MTL_INLINE MTL::Device* MTL::SamplerState::device() const\n{\n    return Object::sendMessage<MTL::Device*>(this, _MTL_PRIVATE_SEL(device));\n}\n\n_MTL_INLINE MTL::ResourceID MTL::SamplerState::gpuResourceID() const\n{\n    return Object::sendMessage<MTL::ResourceID>(this, _MTL_PRIVATE_SEL(gpuResourceID));\n}\n\n#pragma once\n\nnamespace MTL\n{\n\nstatic const NS::UInteger BufferLayoutStrideDynamic = NS::UIntegerMax;\n\n_MTL_ENUM(NS::UInteger, VertexFormat) {\n    VertexFormatInvalid = 0,\n    VertexFormatUChar2 = 1,\n    VertexFormatUChar3 = 2,\n    VertexFormatUChar4 = 3,\n    VertexFormatChar2 = 4,\n    VertexFormatChar3 = 5,\n    VertexFormatChar4 = 6,\n    VertexFormatUChar2Normalized = 7,\n    VertexFormatUChar3Normalized = 8,\n    VertexFormatUChar4Normalized = 9,\n    VertexFormatChar2Normalized = 10,\n    VertexFormatChar3Normalized = 11,\n    VertexFormatChar4Normalized = 12,\n    VertexFormatUShort2 = 13,\n    VertexFormatUShort3 = 14,\n    VertexFormatUShort4 = 15,\n    VertexFormatShort2 = 16,\n    VertexFormatShort3 = 17,\n    VertexFormatShort4 = 18,\n    VertexFormatUShort2Normalized = 19,\n    VertexFormatUShort3Normalized = 20,\n    VertexFormatUShort4Normalized = 21,\n    VertexFormatShort2Normalized = 22,\n    VertexFormatShort3Normalized = 23,\n    VertexFormatShort4Normalized = 24,\n    VertexFormatHalf2 = 25,\n    VertexFormatHalf3 = 26,\n    VertexFormatHalf4 = 27,\n    VertexFormatFloat = 28,\n    VertexFormatFloat2 = 29,\n    VertexFormatFloat3 = 30,\n    VertexFormatFloat4 = 31,\n    VertexFormatInt = 32,\n    VertexFormatInt2 = 33,\n    VertexFormatInt3 = 34,\n    VertexFormatInt4 = 35,\n    VertexFormatUInt = 36,\n    VertexFormatUInt2 = 37,\n    VertexFormatUInt3 = 38,\n    VertexFormatUInt4 = 39,\n    VertexFormatInt1010102Normalized = 40,\n    VertexFormatUInt1010102Normalized = 41,\n    VertexFormatUChar4Normalized_BGRA = 42,\n    VertexFormatUChar = 45,\n    VertexFormatChar = 46,\n    VertexFormatUCharNormalized = 47,\n    VertexFormatCharNormalized = 48,\n    VertexFormatUShort = 49,\n    VertexFormatShort = 50,\n    VertexFormatUShortNormalized = 51,\n    VertexFormatShortNormalized = 52,\n    VertexFormatHalf = 53,\n    VertexFormatFloatRG11B10 = 54,\n    VertexFormatFloatRGB9E5 = 55,\n};\n\n_MTL_ENUM(NS::UInteger, VertexStepFunction) {\n    VertexStepFunctionConstant = 0,\n    VertexStepFunctionPerVertex = 1,\n    VertexStepFunctionPerInstance = 2,\n    VertexStepFunctionPerPatch = 3,\n    VertexStepFunctionPerPatchControlPoint = 4,\n};\n\nclass VertexBufferLayoutDescriptor : public NS::Copying<VertexBufferLayoutDescriptor>\n{\npublic:\n    static class VertexBufferLayoutDescriptor* alloc();\n\n    class VertexBufferLayoutDescriptor*        init();\n\n    NS::UInteger                               stride() const;\n    void                                       setStride(NS::UInteger stride);\n\n    MTL::VertexStepFunction                    stepFunction() const;\n    void                                       setStepFunction(MTL::VertexStepFunction stepFunction);\n\n    NS::UInteger                               stepRate() const;\n    void                                       setStepRate(NS::UInteger stepRate);\n};\n\nclass VertexBufferLayoutDescriptorArray : public NS::Referencing<VertexBufferLayoutDescriptorArray>\n{\npublic:\n    static class VertexBufferLayoutDescriptorArray* alloc();\n\n    class VertexBufferLayoutDescriptorArray*        init();\n\n    class VertexBufferLayoutDescriptor*             object(NS::UInteger index);\n\n    void                                            setObject(const class VertexBufferLayoutDescriptor* bufferDesc, NS::UInteger index);\n};\n\nclass VertexAttributeDescriptor : public NS::Copying<VertexAttributeDescriptor>\n{\npublic:\n    static class VertexAttributeDescriptor* alloc();\n\n    class VertexAttributeDescriptor*        init();\n\n    MTL::VertexFormat                       format() const;\n    void                                    setFormat(MTL::VertexFormat format);\n\n    NS::UInteger                            offset() const;\n    void                                    setOffset(NS::UInteger offset);\n\n    NS::UInteger                            bufferIndex() const;\n    void                                    setBufferIndex(NS::UInteger bufferIndex);\n};\n\nclass VertexAttributeDescriptorArray : public NS::Referencing<VertexAttributeDescriptorArray>\n{\npublic:\n    static class VertexAttributeDescriptorArray* alloc();\n\n    class VertexAttributeDescriptorArray*        init();\n\n    class VertexAttributeDescriptor*             object(NS::UInteger index);\n\n    void                                         setObject(const class VertexAttributeDescriptor* attributeDesc, NS::UInteger index);\n};\n\nclass VertexDescriptor : public NS::Copying<VertexDescriptor>\n{\npublic:\n    static class VertexDescriptor*           alloc();\n\n    class VertexDescriptor*                  init();\n\n    static class VertexDescriptor*           vertexDescriptor();\n\n    class VertexBufferLayoutDescriptorArray* layouts() const;\n\n    class VertexAttributeDescriptorArray*    attributes() const;\n\n    void                                     reset();\n};\n\n}\n\n_MTL_INLINE MTL::VertexBufferLayoutDescriptor* MTL::VertexBufferLayoutDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::VertexBufferLayoutDescriptor>(_MTL_PRIVATE_CLS(MTLVertexBufferLayoutDescriptor));\n}\n\n_MTL_INLINE MTL::VertexBufferLayoutDescriptor* MTL::VertexBufferLayoutDescriptor::init()\n{\n    return NS::Object::init<MTL::VertexBufferLayoutDescriptor>();\n}\n\n_MTL_INLINE NS::UInteger MTL::VertexBufferLayoutDescriptor::stride() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(stride));\n}\n\n_MTL_INLINE void MTL::VertexBufferLayoutDescriptor::setStride(NS::UInteger stride)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStride_), stride);\n}\n\n_MTL_INLINE MTL::VertexStepFunction MTL::VertexBufferLayoutDescriptor::stepFunction() const\n{\n    return Object::sendMessage<MTL::VertexStepFunction>(this, _MTL_PRIVATE_SEL(stepFunction));\n}\n\n_MTL_INLINE void MTL::VertexBufferLayoutDescriptor::setStepFunction(MTL::VertexStepFunction stepFunction)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStepFunction_), stepFunction);\n}\n\n_MTL_INLINE NS::UInteger MTL::VertexBufferLayoutDescriptor::stepRate() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(stepRate));\n}\n\n_MTL_INLINE void MTL::VertexBufferLayoutDescriptor::setStepRate(NS::UInteger stepRate)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setStepRate_), stepRate);\n}\n\n_MTL_INLINE MTL::VertexBufferLayoutDescriptorArray* MTL::VertexBufferLayoutDescriptorArray::alloc()\n{\n    return NS::Object::alloc<MTL::VertexBufferLayoutDescriptorArray>(_MTL_PRIVATE_CLS(MTLVertexBufferLayoutDescriptorArray));\n}\n\n_MTL_INLINE MTL::VertexBufferLayoutDescriptorArray* MTL::VertexBufferLayoutDescriptorArray::init()\n{\n    return NS::Object::init<MTL::VertexBufferLayoutDescriptorArray>();\n}\n\n_MTL_INLINE MTL::VertexBufferLayoutDescriptor* MTL::VertexBufferLayoutDescriptorArray::object(NS::UInteger index)\n{\n    return Object::sendMessage<MTL::VertexBufferLayoutDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), index);\n}\n\n_MTL_INLINE void MTL::VertexBufferLayoutDescriptorArray::setObject(const MTL::VertexBufferLayoutDescriptor* bufferDesc, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), bufferDesc, index);\n}\n\n_MTL_INLINE MTL::VertexAttributeDescriptor* MTL::VertexAttributeDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::VertexAttributeDescriptor>(_MTL_PRIVATE_CLS(MTLVertexAttributeDescriptor));\n}\n\n_MTL_INLINE MTL::VertexAttributeDescriptor* MTL::VertexAttributeDescriptor::init()\n{\n    return NS::Object::init<MTL::VertexAttributeDescriptor>();\n}\n\n_MTL_INLINE MTL::VertexFormat MTL::VertexAttributeDescriptor::format() const\n{\n    return Object::sendMessage<MTL::VertexFormat>(this, _MTL_PRIVATE_SEL(format));\n}\n\n_MTL_INLINE void MTL::VertexAttributeDescriptor::setFormat(MTL::VertexFormat format)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFormat_), format);\n}\n\n_MTL_INLINE NS::UInteger MTL::VertexAttributeDescriptor::offset() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(offset));\n}\n\n_MTL_INLINE void MTL::VertexAttributeDescriptor::setOffset(NS::UInteger offset)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setOffset_), offset);\n}\n\n_MTL_INLINE NS::UInteger MTL::VertexAttributeDescriptor::bufferIndex() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(bufferIndex));\n}\n\n_MTL_INLINE void MTL::VertexAttributeDescriptor::setBufferIndex(NS::UInteger bufferIndex)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setBufferIndex_), bufferIndex);\n}\n\n_MTL_INLINE MTL::VertexAttributeDescriptorArray* MTL::VertexAttributeDescriptorArray::alloc()\n{\n    return NS::Object::alloc<MTL::VertexAttributeDescriptorArray>(_MTL_PRIVATE_CLS(MTLVertexAttributeDescriptorArray));\n}\n\n_MTL_INLINE MTL::VertexAttributeDescriptorArray* MTL::VertexAttributeDescriptorArray::init()\n{\n    return NS::Object::init<MTL::VertexAttributeDescriptorArray>();\n}\n\n_MTL_INLINE MTL::VertexAttributeDescriptor* MTL::VertexAttributeDescriptorArray::object(NS::UInteger index)\n{\n    return Object::sendMessage<MTL::VertexAttributeDescriptor*>(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), index);\n}\n\n_MTL_INLINE void MTL::VertexAttributeDescriptorArray::setObject(const MTL::VertexAttributeDescriptor* attributeDesc, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attributeDesc, index);\n}\n\n_MTL_INLINE MTL::VertexDescriptor* MTL::VertexDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::VertexDescriptor>(_MTL_PRIVATE_CLS(MTLVertexDescriptor));\n}\n\n_MTL_INLINE MTL::VertexDescriptor* MTL::VertexDescriptor::init()\n{\n    return NS::Object::init<MTL::VertexDescriptor>();\n}\n\n_MTL_INLINE MTL::VertexDescriptor* MTL::VertexDescriptor::vertexDescriptor()\n{\n    return Object::sendMessage<MTL::VertexDescriptor*>(_MTL_PRIVATE_CLS(MTLVertexDescriptor), _MTL_PRIVATE_SEL(vertexDescriptor));\n}\n\n_MTL_INLINE MTL::VertexBufferLayoutDescriptorArray* MTL::VertexDescriptor::layouts() const\n{\n    return Object::sendMessage<MTL::VertexBufferLayoutDescriptorArray*>(this, _MTL_PRIVATE_SEL(layouts));\n}\n\n_MTL_INLINE MTL::VertexAttributeDescriptorArray* MTL::VertexDescriptor::attributes() const\n{\n    return Object::sendMessage<MTL::VertexAttributeDescriptorArray*>(this, _MTL_PRIVATE_SEL(attributes));\n}\n\n_MTL_INLINE void MTL::VertexDescriptor::reset()\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(reset));\n}\n\n#pragma once\n\nnamespace MTL\n{\nclass VisibleFunctionTableDescriptor : public NS::Copying<VisibleFunctionTableDescriptor>\n{\npublic:\n    static class VisibleFunctionTableDescriptor* alloc();\n\n    class VisibleFunctionTableDescriptor*        init();\n\n    static class VisibleFunctionTableDescriptor* visibleFunctionTableDescriptor();\n\n    NS::UInteger                                 functionCount() const;\n    void                                         setFunctionCount(NS::UInteger functionCount);\n};\n\nclass VisibleFunctionTable : public NS::Referencing<VisibleFunctionTable, Resource>\n{\npublic:\n    MTL::ResourceID gpuResourceID() const;\n\n    void            setFunction(const class FunctionHandle* function, NS::UInteger index);\n\n    void            setFunctions(const class FunctionHandle* const functions[], NS::Range range);\n};\n\n}\n\n_MTL_INLINE MTL::VisibleFunctionTableDescriptor* MTL::VisibleFunctionTableDescriptor::alloc()\n{\n    return NS::Object::alloc<MTL::VisibleFunctionTableDescriptor>(_MTL_PRIVATE_CLS(MTLVisibleFunctionTableDescriptor));\n}\n\n_MTL_INLINE MTL::VisibleFunctionTableDescriptor* MTL::VisibleFunctionTableDescriptor::init()\n{\n    return NS::Object::init<MTL::VisibleFunctionTableDescriptor>();\n}\n\n_MTL_INLINE MTL::VisibleFunctionTableDescriptor* MTL::VisibleFunctionTableDescriptor::visibleFunctionTableDescriptor()\n{\n    return Object::sendMessage<MTL::VisibleFunctionTableDescriptor*>(_MTL_PRIVATE_CLS(MTLVisibleFunctionTableDescriptor), _MTL_PRIVATE_SEL(visibleFunctionTableDescriptor));\n}\n\n_MTL_INLINE NS::UInteger MTL::VisibleFunctionTableDescriptor::functionCount() const\n{\n    return Object::sendMessage<NS::UInteger>(this, _MTL_PRIVATE_SEL(functionCount));\n}\n\n_MTL_INLINE void MTL::VisibleFunctionTableDescriptor::setFunctionCount(NS::UInteger functionCount)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFunctionCount_), functionCount);\n}\n\n_MTL_INLINE MTL::ResourceID MTL::VisibleFunctionTable::gpuResourceID() const\n{\n    return Object::sendMessage<MTL::ResourceID>(this, _MTL_PRIVATE_SEL(gpuResourceID));\n}\n\n_MTL_INLINE void MTL::VisibleFunctionTable::setFunction(const MTL::FunctionHandle* function, NS::UInteger index)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFunction_atIndex_), function, index);\n}\n\n_MTL_INLINE void MTL::VisibleFunctionTable::setFunctions(const MTL::FunctionHandle* const functions[], NS::Range range)\n{\n    Object::sendMessage<void>(this, _MTL_PRIVATE_SEL(setFunctions_withRange_), functions, range);\n}\n\n#define METALCPP_VERSION_MAJOR 367\n#define METALCPP_VERSION_MINOR 4\n#define METALCPP_VERSION_PATCH 2\n\n#define METALCPP_SUPPORTS_VERSION(major, minor, patch) \\\n    ((major < METALCPP_VERSION_MAJOR) || \\\n    (major == METALCPP_VERSION_MAJOR && minor < METALCPP_VERSION_MINOR) || \\\n    (major == METALCPP_VERSION_MAJOR && minor == METALCPP_VERSION_MINOR && patch <= METALCPP_VERSION_PATCH))\n\n"
  },
  {
    "path": "deps/renderdoc/renderdoc_app.h",
    "content": "/******************************************************************************\n * The MIT License (MIT)\n *\n * Copyright (c) 2019-2023 Baldur Karlsson\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n * THE SOFTWARE.\n ******************************************************************************/\n\n#pragma once\n\n//////////////////////////////////////////////////////////////////////////////////////////////////\n//\n// Documentation for the API is available at https://renderdoc.org/docs/in_application_api.html\n//\n\n#if !defined(RENDERDOC_NO_STDINT)\n#include <stdint.h>\n#endif\n\n#if defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER)\n#define RENDERDOC_CC __cdecl\n#elif defined(__linux__)\n#define RENDERDOC_CC\n#elif defined(__APPLE__)\n#define RENDERDOC_CC\n#else\n#error \"Unknown platform\"\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n//////////////////////////////////////////////////////////////////////////////////////////////////\n// Constants not used directly in below API\n\n// This is a GUID/magic value used for when applications pass a path where shader debug\n// information can be found to match up with a stripped shader.\n// the define can be used like so: const GUID RENDERDOC_ShaderDebugMagicValue =\n// RENDERDOC_ShaderDebugMagicValue_value\n#define RENDERDOC_ShaderDebugMagicValue_struct                                \\\n  {                                                                           \\\n    0xeab25520, 0x6670, 0x4865, 0x84, 0x29, 0x6c, 0x8, 0x51, 0x54, 0x00, 0xff \\\n  }\n\n// as an alternative when you want a byte array (assuming x86 endianness):\n#define RENDERDOC_ShaderDebugMagicValue_bytearray                                                 \\\n  {                                                                                               \\\n    0x20, 0x55, 0xb2, 0xea, 0x70, 0x66, 0x65, 0x48, 0x84, 0x29, 0x6c, 0x8, 0x51, 0x54, 0x00, 0xff \\\n  }\n\n// truncated version when only a uint64_t is available (e.g. Vulkan tags):\n#define RENDERDOC_ShaderDebugMagicValue_truncated 0x48656670eab25520ULL\n\n//////////////////////////////////////////////////////////////////////////////////////////////////\n// RenderDoc capture options\n//\n\ntypedef enum RENDERDOC_CaptureOption {\n  // Allow the application to enable vsync\n  //\n  // Default - enabled\n  //\n  // 1 - The application can enable or disable vsync at will\n  // 0 - vsync is force disabled\n  eRENDERDOC_Option_AllowVSync = 0,\n\n  // Allow the application to enable fullscreen\n  //\n  // Default - enabled\n  //\n  // 1 - The application can enable or disable fullscreen at will\n  // 0 - fullscreen is force disabled\n  eRENDERDOC_Option_AllowFullscreen = 1,\n\n  // Record API debugging events and messages\n  //\n  // Default - disabled\n  //\n  // 1 - Enable built-in API debugging features and records the results into\n  //     the capture, which is matched up with events on replay\n  // 0 - no API debugging is forcibly enabled\n  eRENDERDOC_Option_APIValidation = 2,\n  eRENDERDOC_Option_DebugDeviceMode = 2,    // deprecated name of this enum\n\n  // Capture CPU callstacks for API events\n  //\n  // Default - disabled\n  //\n  // 1 - Enables capturing of callstacks\n  // 0 - no callstacks are captured\n  eRENDERDOC_Option_CaptureCallstacks = 3,\n\n  // When capturing CPU callstacks, only capture them from actions.\n  // This option does nothing without the above option being enabled\n  //\n  // Default - disabled\n  //\n  // 1 - Only captures callstacks for actions.\n  //     Ignored if CaptureCallstacks is disabled\n  // 0 - Callstacks, if enabled, are captured for every event.\n  eRENDERDOC_Option_CaptureCallstacksOnlyDraws = 4,\n  eRENDERDOC_Option_CaptureCallstacksOnlyActions = 4,\n\n  // Specify a delay in seconds to wait for a debugger to attach, after\n  // creating or injecting into a process, before continuing to allow it to run.\n  //\n  // 0 indicates no delay, and the process will run immediately after injection\n  //\n  // Default - 0 seconds\n  //\n  eRENDERDOC_Option_DelayForDebugger = 5,\n\n  // Verify buffer access. This includes checking the memory returned by a Map() call to\n  // detect any out-of-bounds modification, as well as initialising buffers with undefined contents\n  // to a marker value to catch use of uninitialised memory.\n  //\n  // NOTE: This option is only valid for OpenGL and D3D11. Explicit APIs such as D3D12 and Vulkan do\n  // not do the same kind of interception & checking and undefined contents are really undefined.\n  //\n  // Default - disabled\n  //\n  // 1 - Verify buffer access\n  // 0 - No verification is performed, and overwriting bounds may cause crashes or corruption in\n  //     RenderDoc.\n  eRENDERDOC_Option_VerifyBufferAccess = 6,\n\n  // The old name for eRENDERDOC_Option_VerifyBufferAccess was eRENDERDOC_Option_VerifyMapWrites.\n  // This option now controls the filling of uninitialised buffers with 0xdddddddd which was\n  // previously always enabled\n  eRENDERDOC_Option_VerifyMapWrites = eRENDERDOC_Option_VerifyBufferAccess,\n\n  // Hooks any system API calls that create child processes, and injects\n  // RenderDoc into them recursively with the same options.\n  //\n  // Default - disabled\n  //\n  // 1 - Hooks into spawned child processes\n  // 0 - Child processes are not hooked by RenderDoc\n  eRENDERDOC_Option_HookIntoChildren = 7,\n\n  // By default RenderDoc only includes resources in the final capture necessary\n  // for that frame, this allows you to override that behaviour.\n  //\n  // Default - disabled\n  //\n  // 1 - all live resources at the time of capture are included in the capture\n  //     and available for inspection\n  // 0 - only the resources referenced by the captured frame are included\n  eRENDERDOC_Option_RefAllResources = 8,\n\n  // **NOTE**: As of RenderDoc v1.1 this option has been deprecated. Setting or\n  // getting it will be ignored, to allow compatibility with older versions.\n  // In v1.1 the option acts as if it's always enabled.\n  //\n  // By default RenderDoc skips saving initial states for resources where the\n  // previous contents don't appear to be used, assuming that writes before\n  // reads indicate previous contents aren't used.\n  //\n  // Default - disabled\n  //\n  // 1 - initial contents at the start of each captured frame are saved, even if\n  //     they are later overwritten or cleared before being used.\n  // 0 - unless a read is detected, initial contents will not be saved and will\n  //     appear as black or empty data.\n  eRENDERDOC_Option_SaveAllInitials = 9,\n\n  // In APIs that allow for the recording of command lists to be replayed later,\n  // RenderDoc may choose to not capture command lists before a frame capture is\n  // triggered, to reduce overheads. This means any command lists recorded once\n  // and replayed many times will not be available and may cause a failure to\n  // capture.\n  //\n  // NOTE: This is only true for APIs where multithreading is difficult or\n  // discouraged. Newer APIs like Vulkan and D3D12 will ignore this option\n  // and always capture all command lists since the API is heavily oriented\n  // around it and the overheads have been reduced by API design.\n  //\n  // 1 - All command lists are captured from the start of the application\n  // 0 - Command lists are only captured if their recording begins during\n  //     the period when a frame capture is in progress.\n  eRENDERDOC_Option_CaptureAllCmdLists = 10,\n\n  // Mute API debugging output when the API validation mode option is enabled\n  //\n  // Default - enabled\n  //\n  // 1 - Mute any API debug messages from being displayed or passed through\n  // 0 - API debugging is displayed as normal\n  eRENDERDOC_Option_DebugOutputMute = 11,\n\n  // Option to allow vendor extensions to be used even when they may be\n  // incompatible with RenderDoc and cause corrupted replays or crashes.\n  //\n  // Default - inactive\n  //\n  // No values are documented, this option should only be used when absolutely\n  // necessary as directed by a RenderDoc developer.\n  eRENDERDOC_Option_AllowUnsupportedVendorExtensions = 12,\n\n} RENDERDOC_CaptureOption;\n\n// Sets an option that controls how RenderDoc behaves on capture.\n//\n// Returns 1 if the option and value are valid\n// Returns 0 if either is invalid and the option is unchanged\ntypedef int(RENDERDOC_CC *pRENDERDOC_SetCaptureOptionU32)(RENDERDOC_CaptureOption opt, uint32_t val);\ntypedef int(RENDERDOC_CC *pRENDERDOC_SetCaptureOptionF32)(RENDERDOC_CaptureOption opt, float val);\n\n// Gets the current value of an option as a uint32_t\n//\n// If the option is invalid, 0xffffffff is returned\ntypedef uint32_t(RENDERDOC_CC *pRENDERDOC_GetCaptureOptionU32)(RENDERDOC_CaptureOption opt);\n\n// Gets the current value of an option as a float\n//\n// If the option is invalid, -FLT_MAX is returned\ntypedef float(RENDERDOC_CC *pRENDERDOC_GetCaptureOptionF32)(RENDERDOC_CaptureOption opt);\n\ntypedef enum RENDERDOC_InputButton {\n  // '0' - '9' matches ASCII values\n  eRENDERDOC_Key_0 = 0x30,\n  eRENDERDOC_Key_1 = 0x31,\n  eRENDERDOC_Key_2 = 0x32,\n  eRENDERDOC_Key_3 = 0x33,\n  eRENDERDOC_Key_4 = 0x34,\n  eRENDERDOC_Key_5 = 0x35,\n  eRENDERDOC_Key_6 = 0x36,\n  eRENDERDOC_Key_7 = 0x37,\n  eRENDERDOC_Key_8 = 0x38,\n  eRENDERDOC_Key_9 = 0x39,\n\n  // 'A' - 'Z' matches ASCII values\n  eRENDERDOC_Key_A = 0x41,\n  eRENDERDOC_Key_B = 0x42,\n  eRENDERDOC_Key_C = 0x43,\n  eRENDERDOC_Key_D = 0x44,\n  eRENDERDOC_Key_E = 0x45,\n  eRENDERDOC_Key_F = 0x46,\n  eRENDERDOC_Key_G = 0x47,\n  eRENDERDOC_Key_H = 0x48,\n  eRENDERDOC_Key_I = 0x49,\n  eRENDERDOC_Key_J = 0x4A,\n  eRENDERDOC_Key_K = 0x4B,\n  eRENDERDOC_Key_L = 0x4C,\n  eRENDERDOC_Key_M = 0x4D,\n  eRENDERDOC_Key_N = 0x4E,\n  eRENDERDOC_Key_O = 0x4F,\n  eRENDERDOC_Key_P = 0x50,\n  eRENDERDOC_Key_Q = 0x51,\n  eRENDERDOC_Key_R = 0x52,\n  eRENDERDOC_Key_S = 0x53,\n  eRENDERDOC_Key_T = 0x54,\n  eRENDERDOC_Key_U = 0x55,\n  eRENDERDOC_Key_V = 0x56,\n  eRENDERDOC_Key_W = 0x57,\n  eRENDERDOC_Key_X = 0x58,\n  eRENDERDOC_Key_Y = 0x59,\n  eRENDERDOC_Key_Z = 0x5A,\n\n  // leave the rest of the ASCII range free\n  // in case we want to use it later\n  eRENDERDOC_Key_NonPrintable = 0x100,\n\n  eRENDERDOC_Key_Divide,\n  eRENDERDOC_Key_Multiply,\n  eRENDERDOC_Key_Subtract,\n  eRENDERDOC_Key_Plus,\n\n  eRENDERDOC_Key_F1,\n  eRENDERDOC_Key_F2,\n  eRENDERDOC_Key_F3,\n  eRENDERDOC_Key_F4,\n  eRENDERDOC_Key_F5,\n  eRENDERDOC_Key_F6,\n  eRENDERDOC_Key_F7,\n  eRENDERDOC_Key_F8,\n  eRENDERDOC_Key_F9,\n  eRENDERDOC_Key_F10,\n  eRENDERDOC_Key_F11,\n  eRENDERDOC_Key_F12,\n\n  eRENDERDOC_Key_Home,\n  eRENDERDOC_Key_End,\n  eRENDERDOC_Key_Insert,\n  eRENDERDOC_Key_Delete,\n  eRENDERDOC_Key_PageUp,\n  eRENDERDOC_Key_PageDn,\n\n  eRENDERDOC_Key_Backspace,\n  eRENDERDOC_Key_Tab,\n  eRENDERDOC_Key_PrtScrn,\n  eRENDERDOC_Key_Pause,\n\n  eRENDERDOC_Key_Max,\n} RENDERDOC_InputButton;\n\n// Sets which key or keys can be used to toggle focus between multiple windows\n//\n// If keys is NULL or num is 0, toggle keys will be disabled\ntypedef void(RENDERDOC_CC *pRENDERDOC_SetFocusToggleKeys)(RENDERDOC_InputButton *keys, int num);\n\n// Sets which key or keys can be used to capture the next frame\n//\n// If keys is NULL or num is 0, captures keys will be disabled\ntypedef void(RENDERDOC_CC *pRENDERDOC_SetCaptureKeys)(RENDERDOC_InputButton *keys, int num);\n\ntypedef enum RENDERDOC_OverlayBits {\n  // This single bit controls whether the overlay is enabled or disabled globally\n  eRENDERDOC_Overlay_Enabled = 0x1,\n\n  // Show the average framerate over several seconds as well as min/max\n  eRENDERDOC_Overlay_FrameRate = 0x2,\n\n  // Show the current frame number\n  eRENDERDOC_Overlay_FrameNumber = 0x4,\n\n  // Show a list of recent captures, and how many captures have been made\n  eRENDERDOC_Overlay_CaptureList = 0x8,\n\n  // Default values for the overlay mask\n  eRENDERDOC_Overlay_Default = (eRENDERDOC_Overlay_Enabled | eRENDERDOC_Overlay_FrameRate |\n                                eRENDERDOC_Overlay_FrameNumber | eRENDERDOC_Overlay_CaptureList),\n\n  // Enable all bits\n  eRENDERDOC_Overlay_All = ~0U,\n\n  // Disable all bits\n  eRENDERDOC_Overlay_None = 0,\n} RENDERDOC_OverlayBits;\n\n// returns the overlay bits that have been set\ntypedef uint32_t(RENDERDOC_CC *pRENDERDOC_GetOverlayBits)();\n// sets the overlay bits with an and & or mask\ntypedef void(RENDERDOC_CC *pRENDERDOC_MaskOverlayBits)(uint32_t And, uint32_t Or);\n\n// this function will attempt to remove RenderDoc's hooks in the application.\n//\n// Note: that this can only work correctly if done immediately after\n// the module is loaded, before any API work happens. RenderDoc will remove its\n// injected hooks and shut down. Behaviour is undefined if this is called\n// after any API functions have been called, and there is still no guarantee of\n// success.\ntypedef void(RENDERDOC_CC *pRENDERDOC_RemoveHooks)();\n\n// DEPRECATED: compatibility for code compiled against pre-1.4.1 headers.\ntypedef pRENDERDOC_RemoveHooks pRENDERDOC_Shutdown;\n\n// This function will unload RenderDoc's crash handler.\n//\n// If you use your own crash handler and don't want RenderDoc's handler to\n// intercede, you can call this function to unload it and any unhandled\n// exceptions will pass to the next handler.\ntypedef void(RENDERDOC_CC *pRENDERDOC_UnloadCrashHandler)();\n\n// Sets the capture file path template\n//\n// pathtemplate is a UTF-8 string that gives a template for how captures will be named\n// and where they will be saved.\n//\n// Any extension is stripped off the path, and captures are saved in the directory\n// specified, and named with the filename and the frame number appended. If the\n// directory does not exist it will be created, including any parent directories.\n//\n// If pathtemplate is NULL, the template will remain unchanged\n//\n// Example:\n//\n// SetCaptureFilePathTemplate(\"my_captures/example\");\n//\n// Capture #1 -> my_captures/example_frame123.rdc\n// Capture #2 -> my_captures/example_frame456.rdc\ntypedef void(RENDERDOC_CC *pRENDERDOC_SetCaptureFilePathTemplate)(const char *pathtemplate);\n\n// returns the current capture path template, see SetCaptureFileTemplate above, as a UTF-8 string\ntypedef const char *(RENDERDOC_CC *pRENDERDOC_GetCaptureFilePathTemplate)();\n\n// DEPRECATED: compatibility for code compiled against pre-1.1.2 headers.\ntypedef pRENDERDOC_SetCaptureFilePathTemplate pRENDERDOC_SetLogFilePathTemplate;\ntypedef pRENDERDOC_GetCaptureFilePathTemplate pRENDERDOC_GetLogFilePathTemplate;\n\n// returns the number of captures that have been made\ntypedef uint32_t(RENDERDOC_CC *pRENDERDOC_GetNumCaptures)();\n\n// This function returns the details of a capture, by index. New captures are added\n// to the end of the list.\n//\n// filename will be filled with the absolute path to the capture file, as a UTF-8 string\n// pathlength will be written with the length in bytes of the filename string\n// timestamp will be written with the time of the capture, in seconds since the Unix epoch\n//\n// Any of the parameters can be NULL and they'll be skipped.\n//\n// The function will return 1 if the capture index is valid, or 0 if the index is invalid\n// If the index is invalid, the values will be unchanged\n//\n// Note: when captures are deleted in the UI they will remain in this list, so the\n// capture path may not exist anymore.\ntypedef uint32_t(RENDERDOC_CC *pRENDERDOC_GetCapture)(uint32_t idx, char *filename,\n                                                      uint32_t *pathlength, uint64_t *timestamp);\n\n// Sets the comments associated with a capture file. These comments are displayed in the\n// UI program when opening.\n//\n// filePath should be a path to the capture file to add comments to. If set to NULL or \"\"\n// the most recent capture file created made will be used instead.\n// comments should be a NULL-terminated UTF-8 string to add as comments.\n//\n// Any existing comments will be overwritten.\ntypedef void(RENDERDOC_CC *pRENDERDOC_SetCaptureFileComments)(const char *filePath,\n                                                              const char *comments);\n\n// returns 1 if the RenderDoc UI is connected to this application, 0 otherwise\ntypedef uint32_t(RENDERDOC_CC *pRENDERDOC_IsTargetControlConnected)();\n\n// DEPRECATED: compatibility for code compiled against pre-1.1.1 headers.\n// This was renamed to IsTargetControlConnected in API 1.1.1, the old typedef is kept here for\n// backwards compatibility with old code, it is castable either way since it's ABI compatible\n// as the same function pointer type.\ntypedef pRENDERDOC_IsTargetControlConnected pRENDERDOC_IsRemoteAccessConnected;\n\n// This function will launch the Replay UI associated with the RenderDoc library injected\n// into the running application.\n//\n// if connectTargetControl is 1, the Replay UI will be launched with a command line parameter\n// to connect to this application\n// cmdline is the rest of the command line, as a UTF-8 string. E.g. a captures to open\n// if cmdline is NULL, the command line will be empty.\n//\n// returns the PID of the replay UI if successful, 0 if not successful.\ntypedef uint32_t(RENDERDOC_CC *pRENDERDOC_LaunchReplayUI)(uint32_t connectTargetControl,\n                                                          const char *cmdline);\n\n// RenderDoc can return a higher version than requested if it's backwards compatible,\n// this function returns the actual version returned. If a parameter is NULL, it will be\n// ignored and the others will be filled out.\ntypedef void(RENDERDOC_CC *pRENDERDOC_GetAPIVersion)(int *major, int *minor, int *patch);\n\n// Requests that the replay UI show itself (if hidden or not the current top window). This can be\n// used in conjunction with IsTargetControlConnected and LaunchReplayUI to intelligently handle\n// showing the UI after making a capture.\n//\n// This will return 1 if the request was successfully passed on, though it's not guaranteed that\n// the UI will be on top in all cases depending on OS rules. It will return 0 if there is no current\n// target control connection to make such a request, or if there was another error\ntypedef uint32_t(RENDERDOC_CC *pRENDERDOC_ShowReplayUI)();\n\n//////////////////////////////////////////////////////////////////////////\n// Capturing functions\n//\n\n// A device pointer is a pointer to the API's root handle.\n//\n// This would be an ID3D11Device, HGLRC/GLXContext, ID3D12Device, etc\ntypedef void *RENDERDOC_DevicePointer;\n\n// A window handle is the OS's native window handle\n//\n// This would be an HWND, GLXDrawable, etc\ntypedef void *RENDERDOC_WindowHandle;\n\n// A helper macro for Vulkan, where the device handle cannot be used directly.\n//\n// Passing the VkInstance to this macro will return the RENDERDOC_DevicePointer to use.\n//\n// Specifically, the value needed is the dispatch table pointer, which sits as the first\n// pointer-sized object in the memory pointed to by the VkInstance. Thus we cast to a void** and\n// indirect once.\n#define RENDERDOC_DEVICEPOINTER_FROM_VKINSTANCE(inst) (*((void **)(inst)))\n\n// This sets the RenderDoc in-app overlay in the API/window pair as 'active' and it will\n// respond to keypresses. Neither parameter can be NULL\ntypedef void(RENDERDOC_CC *pRENDERDOC_SetActiveWindow)(RENDERDOC_DevicePointer device,\n                                                       RENDERDOC_WindowHandle wndHandle);\n\n// capture the next frame on whichever window and API is currently considered active\ntypedef void(RENDERDOC_CC *pRENDERDOC_TriggerCapture)();\n\n// capture the next N frames on whichever window and API is currently considered active\ntypedef void(RENDERDOC_CC *pRENDERDOC_TriggerMultiFrameCapture)(uint32_t numFrames);\n\n// When choosing either a device pointer or a window handle to capture, you can pass NULL.\n// Passing NULL specifies a 'wildcard' match against anything. This allows you to specify\n// any API rendering to a specific window, or a specific API instance rendering to any window,\n// or in the simplest case of one window and one API, you can just pass NULL for both.\n//\n// In either case, if there are two or more possible matching (device,window) pairs it\n// is undefined which one will be captured.\n//\n// Note: for headless rendering you can pass NULL for the window handle and either specify\n// a device pointer or leave it NULL as above.\n\n// Immediately starts capturing API calls on the specified device pointer and window handle.\n//\n// If there is no matching thing to capture (e.g. no supported API has been initialised),\n// this will do nothing.\n//\n// The results are undefined (including crashes) if two captures are started overlapping,\n// even on separate devices and/oror windows.\ntypedef void(RENDERDOC_CC *pRENDERDOC_StartFrameCapture)(RENDERDOC_DevicePointer device,\n                                                         RENDERDOC_WindowHandle wndHandle);\n\n// Returns whether or not a frame capture is currently ongoing anywhere.\n//\n// This will return 1 if a capture is ongoing, and 0 if there is no capture running\ntypedef uint32_t(RENDERDOC_CC *pRENDERDOC_IsFrameCapturing)();\n\n// Ends capturing immediately.\n//\n// This will return 1 if the capture succeeded, and 0 if there was an error capturing.\ntypedef uint32_t(RENDERDOC_CC *pRENDERDOC_EndFrameCapture)(RENDERDOC_DevicePointer device,\n                                                           RENDERDOC_WindowHandle wndHandle);\n\n// Ends capturing immediately and discard any data stored without saving to disk.\n//\n// This will return 1 if the capture was discarded, and 0 if there was an error or no capture\n// was in progress\ntypedef uint32_t(RENDERDOC_CC *pRENDERDOC_DiscardFrameCapture)(RENDERDOC_DevicePointer device,\n                                                               RENDERDOC_WindowHandle wndHandle);\n\n// Only valid to be called between a call to StartFrameCapture and EndFrameCapture. Gives a custom\n// title to the capture produced which will be displayed in the UI.\n//\n// If multiple captures are ongoing, this title will be applied to the first capture to end after\n// this call. The second capture to end will have no title, unless this function is called again.\n//\n// Calling this function has no effect if no capture is currently running, and if it is called\n// multiple times only the last title will be used.\ntypedef void(RENDERDOC_CC *pRENDERDOC_SetCaptureTitle)(const char *title);\n\n//////////////////////////////////////////////////////////////////////////////////////////////////\n// RenderDoc API versions\n//\n\n// RenderDoc uses semantic versioning (http://semver.org/).\n//\n// MAJOR version is incremented when incompatible API changes happen.\n// MINOR version is incremented when functionality is added in a backwards-compatible manner.\n// PATCH version is incremented when backwards-compatible bug fixes happen.\n//\n// Note that this means the API returned can be higher than the one you might have requested.\n// e.g. if you are running against a newer RenderDoc that supports 1.0.1, it will be returned\n// instead of 1.0.0. You can check this with the GetAPIVersion entry point\ntypedef enum RENDERDOC_Version {\n  eRENDERDOC_API_Version_1_0_0 = 10000,    // RENDERDOC_API_1_0_0 = 1 00 00\n  eRENDERDOC_API_Version_1_0_1 = 10001,    // RENDERDOC_API_1_0_1 = 1 00 01\n  eRENDERDOC_API_Version_1_0_2 = 10002,    // RENDERDOC_API_1_0_2 = 1 00 02\n  eRENDERDOC_API_Version_1_1_0 = 10100,    // RENDERDOC_API_1_1_0 = 1 01 00\n  eRENDERDOC_API_Version_1_1_1 = 10101,    // RENDERDOC_API_1_1_1 = 1 01 01\n  eRENDERDOC_API_Version_1_1_2 = 10102,    // RENDERDOC_API_1_1_2 = 1 01 02\n  eRENDERDOC_API_Version_1_2_0 = 10200,    // RENDERDOC_API_1_2_0 = 1 02 00\n  eRENDERDOC_API_Version_1_3_0 = 10300,    // RENDERDOC_API_1_3_0 = 1 03 00\n  eRENDERDOC_API_Version_1_4_0 = 10400,    // RENDERDOC_API_1_4_0 = 1 04 00\n  eRENDERDOC_API_Version_1_4_1 = 10401,    // RENDERDOC_API_1_4_1 = 1 04 01\n  eRENDERDOC_API_Version_1_4_2 = 10402,    // RENDERDOC_API_1_4_2 = 1 04 02\n  eRENDERDOC_API_Version_1_5_0 = 10500,    // RENDERDOC_API_1_5_0 = 1 05 00\n  eRENDERDOC_API_Version_1_6_0 = 10600,    // RENDERDOC_API_1_6_0 = 1 06 00\n} RENDERDOC_Version;\n\n// API version changelog:\n//\n// 1.0.0 - initial release\n// 1.0.1 - Bugfix: IsFrameCapturing() was returning false for captures that were triggered\n//         by keypress or TriggerCapture, instead of Start/EndFrameCapture.\n// 1.0.2 - Refactor: Renamed eRENDERDOC_Option_DebugDeviceMode to eRENDERDOC_Option_APIValidation\n// 1.1.0 - Add feature: TriggerMultiFrameCapture(). Backwards compatible with 1.0.x since the new\n//         function pointer is added to the end of the struct, the original layout is identical\n// 1.1.1 - Refactor: Renamed remote access to target control (to better disambiguate from remote\n//         replay/remote server concept in replay UI)\n// 1.1.2 - Refactor: Renamed \"log file\" in function names to just capture, to clarify that these\n//         are captures and not debug logging files. This is the first API version in the v1.0\n//         branch.\n// 1.2.0 - Added feature: SetCaptureFileComments() to add comments to a capture file that will be\n//         displayed in the UI program on load.\n// 1.3.0 - Added feature: New capture option eRENDERDOC_Option_AllowUnsupportedVendorExtensions\n//         which allows users to opt-in to allowing unsupported vendor extensions to function.\n//         Should be used at the user's own risk.\n//         Refactor: Renamed eRENDERDOC_Option_VerifyMapWrites to\n//         eRENDERDOC_Option_VerifyBufferAccess, which now also controls initialisation to\n//         0xdddddddd of uninitialised buffer contents.\n// 1.4.0 - Added feature: DiscardFrameCapture() to discard a frame capture in progress and stop\n//         capturing without saving anything to disk.\n// 1.4.1 - Refactor: Renamed Shutdown to RemoveHooks to better clarify what is happening\n// 1.4.2 - Refactor: Renamed 'draws' to 'actions' in callstack capture option.\n// 1.5.0 - Added feature: ShowReplayUI() to request that the replay UI show itself if connected\n// 1.6.0 - Added feature: SetCaptureTitle() which can be used to set a title for a\n//         capture made with StartFrameCapture() or EndFrameCapture()\n\ntypedef struct RENDERDOC_API_1_6_0\n{\n  pRENDERDOC_GetAPIVersion GetAPIVersion;\n\n  pRENDERDOC_SetCaptureOptionU32 SetCaptureOptionU32;\n  pRENDERDOC_SetCaptureOptionF32 SetCaptureOptionF32;\n\n  pRENDERDOC_GetCaptureOptionU32 GetCaptureOptionU32;\n  pRENDERDOC_GetCaptureOptionF32 GetCaptureOptionF32;\n\n  pRENDERDOC_SetFocusToggleKeys SetFocusToggleKeys;\n  pRENDERDOC_SetCaptureKeys SetCaptureKeys;\n\n  pRENDERDOC_GetOverlayBits GetOverlayBits;\n  pRENDERDOC_MaskOverlayBits MaskOverlayBits;\n\n  // Shutdown was renamed to RemoveHooks in 1.4.1.\n  // These unions allow old code to continue compiling without changes\n  union\n  {\n    pRENDERDOC_Shutdown Shutdown;\n    pRENDERDOC_RemoveHooks RemoveHooks;\n  };\n  pRENDERDOC_UnloadCrashHandler UnloadCrashHandler;\n\n  // Get/SetLogFilePathTemplate was renamed to Get/SetCaptureFilePathTemplate in 1.1.2.\n  // These unions allow old code to continue compiling without changes\n  union\n  {\n    // deprecated name\n    pRENDERDOC_SetLogFilePathTemplate SetLogFilePathTemplate;\n    // current name\n    pRENDERDOC_SetCaptureFilePathTemplate SetCaptureFilePathTemplate;\n  };\n  union\n  {\n    // deprecated name\n    pRENDERDOC_GetLogFilePathTemplate GetLogFilePathTemplate;\n    // current name\n    pRENDERDOC_GetCaptureFilePathTemplate GetCaptureFilePathTemplate;\n  };\n\n  pRENDERDOC_GetNumCaptures GetNumCaptures;\n  pRENDERDOC_GetCapture GetCapture;\n\n  pRENDERDOC_TriggerCapture TriggerCapture;\n\n  // IsRemoteAccessConnected was renamed to IsTargetControlConnected in 1.1.1.\n  // This union allows old code to continue compiling without changes\n  union\n  {\n    // deprecated name\n    pRENDERDOC_IsRemoteAccessConnected IsRemoteAccessConnected;\n    // current name\n    pRENDERDOC_IsTargetControlConnected IsTargetControlConnected;\n  };\n  pRENDERDOC_LaunchReplayUI LaunchReplayUI;\n\n  pRENDERDOC_SetActiveWindow SetActiveWindow;\n\n  pRENDERDOC_StartFrameCapture StartFrameCapture;\n  pRENDERDOC_IsFrameCapturing IsFrameCapturing;\n  pRENDERDOC_EndFrameCapture EndFrameCapture;\n\n  // new function in 1.1.0\n  pRENDERDOC_TriggerMultiFrameCapture TriggerMultiFrameCapture;\n\n  // new function in 1.2.0\n  pRENDERDOC_SetCaptureFileComments SetCaptureFileComments;\n\n  // new function in 1.4.0\n  pRENDERDOC_DiscardFrameCapture DiscardFrameCapture;\n\n  // new function in 1.5.0\n  pRENDERDOC_ShowReplayUI ShowReplayUI;\n\n  // new function in 1.6.0\n  pRENDERDOC_SetCaptureTitle SetCaptureTitle;\n} RENDERDOC_API_1_6_0;\n\ntypedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_0_0;\ntypedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_0_1;\ntypedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_0_2;\ntypedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_1_0;\ntypedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_1_1;\ntypedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_1_2;\ntypedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_2_0;\ntypedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_3_0;\ntypedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_4_0;\ntypedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_4_1;\ntypedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_4_2;\ntypedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_5_0;\n\n//////////////////////////////////////////////////////////////////////////////////////////////////\n// RenderDoc API entry point\n//\n// This entry point can be obtained via GetProcAddress/dlsym if RenderDoc is available.\n//\n// The name is the same as the typedef - \"RENDERDOC_GetAPI\"\n//\n// This function is not thread safe, and should not be called on multiple threads at once.\n// Ideally, call this once as early as possible in your application's startup, before doing\n// any API work, since some configuration functionality etc has to be done also before\n// initialising any APIs.\n//\n// Parameters:\n//   version is a single value from the RENDERDOC_Version above.\n//\n//   outAPIPointers will be filled out with a pointer to the corresponding struct of function\n//   pointers.\n//\n// Returns:\n//   1 - if the outAPIPointers has been filled with a pointer to the API struct requested\n//   0 - if the requested version is not supported or the arguments are invalid.\n//\ntypedef int(RENDERDOC_CC *pRENDERDOC_GetAPI)(RENDERDOC_Version version, void **outAPIPointers);\n\n#ifdef __cplusplus\n}    // extern \"C\"\n#endif\n"
  },
  {
    "path": "deps/utest/utest.h",
    "content": "/*\n   The latest version of this library is available on GitHub;\n   https://github.com/sheredom/utest.h\n*/\n\n/*\n   This is free and unencumbered software released into the public domain.\n\n   Anyone is free to copy, modify, publish, use, compile, sell, or\n   distribute this software, either in source code form or as a compiled\n   binary, for any purpose, commercial or non-commercial, and by any\n   means.\n\n   In jurisdictions that recognize copyright laws, the author or authors\n   of this software dedicate any and all copyright interest in the\n   software to the public domain. We make this dedication for the benefit\n   of the public at large and to the detriment of our heirs and\n   successors. We intend this dedication to be an overt act of\n   relinquishment in perpetuity of all present and future rights to this\n   software under copyright law.\n\n   THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n   IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n   OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n   ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n   OTHER DEALINGS IN THE SOFTWARE.\n\n   For more information, please refer to <http://unlicense.org/>\n*/\n\n#ifndef SHEREDOM_UTEST_H_INCLUDED\n#define SHEREDOM_UTEST_H_INCLUDED\n\n#ifdef _MSC_VER\n/*\n   Disable warning about not inlining 'inline' functions.\n*/\n#pragma warning(disable : 4710)\n\n/*\n   Disable warning about inlining functions that are not marked 'inline'.\n*/\n#pragma warning(disable : 4711)\n\n/*\n   Disable warning for alignment padding added\n*/\n#pragma warning(disable : 4820)\n\n#if _MSC_VER > 1900\n/*\n  Disable warning about preprocessor macros not being defined in MSVC headers.\n*/\n#pragma warning(disable : 4668)\n\n/*\n  Disable warning about no function prototype given in MSVC headers.\n*/\n#pragma warning(disable : 4255)\n\n/*\n  Disable warning about pointer or reference to potentially throwing function.\n*/\n#pragma warning(disable : 5039)\n\n/*\n  Disable warning about macro expansion producing 'defined' has undefined\n  behavior.\n*/\n#pragma warning(disable : 5105)\n#endif\n\n#if _MSC_VER > 1930\n/*\n  Disable warning about 'const' variable is not used.\n*/\n#pragma warning(disable : 5264)\n#endif\n\n#pragma warning(push, 1)\n#endif\n\n#if defined(_MSC_VER) && (_MSC_VER < 1920)\ntypedef __int64 utest_int64_t;\ntypedef unsigned __int64 utest_uint64_t;\ntypedef unsigned __int32 utest_uint32_t;\n#else\n#include <stdint.h>\ntypedef int64_t utest_int64_t;\ntypedef uint64_t utest_uint64_t;\ntypedef uint32_t utest_uint32_t;\n#endif\n\n#include <stddef.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <errno.h>\n\n#if defined(__cplusplus)\n#if defined(_MSC_VER) && !defined(_CPPUNWIND)\n/* We're on MSVC and the compiler is compiling without exception support! */\n#elif !defined(_MSC_VER) && !defined(__EXCEPTIONS)\n/* We're on a GCC/Clang compiler that doesn't have exception support! */\n#else\n#define UTEST_HAS_EXCEPTIONS 1\n#endif\n#endif\n\n#if defined(UTEST_HAS_EXCEPTIONS)\n#include <stdexcept>\n#endif\n\n#if defined(_MSC_VER)\n#pragma warning(pop)\n#endif\n\n#if defined(__cplusplus)\n#define UTEST_C_FUNC extern \"C\"\n#else\n#define UTEST_C_FUNC\n#endif\n\n#define UTEST_TEST_PASSED (0)\n#define UTEST_TEST_FAILURE (1)\n#define UTEST_TEST_SKIPPED (2)\n\n#if defined(__TINYC__)\n#define UTEST_ATTRIBUTE(a) __attribute((a))\n#else\n#define UTEST_ATTRIBUTE(a) __attribute__((a))\n#endif\n\n#if defined(_MSC_VER) || defined(__MINGW64__) || defined(__MINGW32__)\n\n#if defined(__MINGW64__) || defined(__MINGW32__)\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wpragmas\"\n#pragma GCC diagnostic ignored \"-Wunknown-pragmas\"\n#endif\n\n#if defined(_WINDOWS_) || defined(_WINDOWS_H)\ntypedef LARGE_INTEGER utest_large_integer;\n#else\n// use old QueryPerformanceCounter definitions (not sure is this needed in some\n// edge cases or not) on Win7 with VS2015 these extern declaration cause \"second\n// C linkage of overloaded function not allowed\" error\ntypedef union {\n  struct {\n    unsigned long LowPart;\n    long HighPart;\n  } DUMMYSTRUCTNAME;\n  struct {\n    unsigned long LowPart;\n    long HighPart;\n  } u;\n  utest_int64_t QuadPart;\n} utest_large_integer;\n\nUTEST_C_FUNC __declspec(dllimport) int __stdcall QueryPerformanceCounter(\n    utest_large_integer *);\nUTEST_C_FUNC __declspec(dllimport) int __stdcall QueryPerformanceFrequency(\n    utest_large_integer *);\n\n#if defined(__MINGW64__) || defined(__MINGW32__)\n#pragma GCC diagnostic pop\n#endif\n#endif\n\n#elif defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) ||    \\\n    defined(__NetBSD__) || defined(__DragonFly__) || defined(__sun__) ||       \\\n    defined(__HAIKU__)\n/*\n   slightly obscure include here - we need to include glibc's features.h, but\n   we don't want to just include a header that might not be defined for other\n   c libraries like musl. Instead we include limits.h, which we know on all\n   glibc distributions includes features.h\n*/\n#include <limits.h>\n\n#if defined(__GLIBC__) && defined(__GLIBC_MINOR__)\n#include <time.h>\n\n#if ((2 < __GLIBC__) || ((2 == __GLIBC__) && (17 <= __GLIBC_MINOR__)))\n/* glibc is version 2.17 or above, so we can just use clock_gettime */\n#define UTEST_USE_CLOCKGETTIME\n#else\n#include <sys/syscall.h>\n#include <unistd.h>\n#endif\n#else // Other libc implementations\n#include <time.h>\n#define UTEST_USE_CLOCKGETTIME\n#endif\n\n#elif defined(__APPLE__)\n#include <time.h>\n#endif\n\n#if defined(_MSC_VER) && (_MSC_VER < 1920)\n#define UTEST_PRId64 \"I64d\"\n#define UTEST_PRIu64 \"I64u\"\n#else\n#include <inttypes.h>\n\n#define UTEST_PRId64 PRId64\n#define UTEST_PRIu64 PRIu64\n#endif\n\n#if defined(__cplusplus)\n#define UTEST_INLINE inline\n\n#if defined(__clang__)\n#define UTEST_INITIALIZER_BEGIN_DISABLE_WARNINGS                               \\\n  _Pragma(\"clang diagnostic push\")                                             \\\n      _Pragma(\"clang diagnostic ignored \\\"-Wglobal-constructors\\\"\")\n\n#define UTEST_INITIALIZER_END_DISABLE_WARNINGS _Pragma(\"clang diagnostic pop\")\n#else\n#define UTEST_INITIALIZER_BEGIN_DISABLE_WARNINGS\n#define UTEST_INITIALIZER_END_DISABLE_WARNINGS\n#endif\n\n#define UTEST_INITIALIZER(f)                                                   \\\n  struct f##_cpp_struct {                                                      \\\n    f##_cpp_struct();                                                          \\\n  };                                                                           \\\n  UTEST_INITIALIZER_BEGIN_DISABLE_WARNINGS static f##_cpp_struct               \\\n      f##_cpp_global UTEST_INITIALIZER_END_DISABLE_WARNINGS;                   \\\n  f##_cpp_struct::f##_cpp_struct()\n#elif defined(_MSC_VER)\n#define UTEST_INLINE __forceinline\n\n#if defined(_WIN64)\n#define UTEST_SYMBOL_PREFIX\n#else\n#define UTEST_SYMBOL_PREFIX \"_\"\n#endif\n\n#if defined(__clang__)\n#define UTEST_INITIALIZER_BEGIN_DISABLE_WARNINGS                               \\\n  _Pragma(\"clang diagnostic push\")                                             \\\n      _Pragma(\"clang diagnostic ignored \\\"-Wmissing-variable-declarations\\\"\")\n\n#define UTEST_INITIALIZER_END_DISABLE_WARNINGS _Pragma(\"clang diagnostic pop\")\n#else\n#define UTEST_INITIALIZER_BEGIN_DISABLE_WARNINGS\n#define UTEST_INITIALIZER_END_DISABLE_WARNINGS\n#endif\n\n#pragma section(\".CRT$XCU\", read)\n#define UTEST_INITIALIZER(f)                                                   \\\n  static void __cdecl f(void);                                                 \\\n  UTEST_INITIALIZER_BEGIN_DISABLE_WARNINGS                                     \\\n  __pragma(comment(linker, \"/include:\" UTEST_SYMBOL_PREFIX #f \"_\"))            \\\n      UTEST_C_FUNC                                                             \\\n      __declspec(allocate(\".CRT$XCU\")) void(__cdecl * f##_)(void) = f;         \\\n  UTEST_INITIALIZER_END_DISABLE_WARNINGS                                       \\\n  static void __cdecl f(void)\n#else\n#if defined(__linux__)\n#if defined(__clang__)\n#if __has_warning(\"-Wreserved-id-macro\")\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wreserved-id-macro\"\n#endif\n#endif\n\n#define __STDC_FORMAT_MACROS 1\n\n#if defined(__clang__)\n#if __has_warning(\"-Wreserved-id-macro\")\n#pragma clang diagnostic pop\n#endif\n#endif\n#endif\n\n#define UTEST_INLINE inline\n\n#define UTEST_INITIALIZER(f)                                                   \\\n  static void f(void) UTEST_ATTRIBUTE(constructor);                            \\\n  static void f(void)\n#endif\n\n#if defined(__cplusplus)\n#define UTEST_CAST(type, x) static_cast<type>(x)\n#define UTEST_PTR_CAST(type, x) reinterpret_cast<type>(x)\n#define UTEST_EXTERN extern \"C\"\n#define UTEST_NULL NULL\n#else\n#define UTEST_CAST(type, x) ((type)(x))\n#define UTEST_PTR_CAST(type, x) ((type)(x))\n#define UTEST_EXTERN extern\n#define UTEST_NULL 0\n#endif\n\n#ifdef _MSC_VER\n/*\n    io.h contains definitions for some structures with natural padding. This is\n    uninteresting, but for some reason MSVC's behaviour is to warn about\n    including this system header. That *is* interesting\n*/\n#pragma warning(disable : 4820)\n#pragma warning(push, 1)\n#include <io.h>\n#pragma warning(pop)\n#define UTEST_COLOUR_OUTPUT() (_isatty(_fileno(stdout)))\n#else\n#if defined(__EMSCRIPTEN__)\n#include <emscripten/html5.h>\n#define UTEST_COLOUR_OUTPUT() false\n#else\n#include <unistd.h>\n#define UTEST_COLOUR_OUTPUT() (isatty(STDOUT_FILENO))\n#endif\n#endif\n\nstatic UTEST_INLINE void *utest_realloc(void *const pointer, size_t new_size) {\n  void *const new_pointer = realloc(pointer, new_size);\n\n  if (UTEST_NULL == new_pointer) {\n    free(pointer);\n  }\n\n  return new_pointer;\n}\n\n// Prevent 64-bit integer overflow when computing a timestamp by using a trick\n// from Sokol:\n// https://github.com/floooh/sokol/blob/189843bf4f86969ca4cc4b6d94e793a37c5128a7/sokol_time.h#L204\nstatic UTEST_INLINE utest_int64_t utest_mul_div(const utest_int64_t value,\n                                                const utest_int64_t numer,\n                                                const utest_int64_t denom) {\n  const utest_int64_t q = value / denom;\n  const utest_int64_t r = value % denom;\n  return q * numer + r * numer / denom;\n}\n\nstatic UTEST_INLINE utest_int64_t utest_ns(void) {\n#if defined(_MSC_VER) || defined(__MINGW64__) || defined(__MINGW32__)\n  utest_large_integer counter;\n  utest_large_integer frequency;\n  QueryPerformanceCounter(&counter);\n  QueryPerformanceFrequency(&frequency);\n  return utest_mul_div(counter.QuadPart, 1000000000, frequency.QuadPart);\n#elif defined(__linux__) && defined(__STRICT_ANSI__)\n  return utest_mul_div(clock(), 1000000000, CLOCKS_PER_SEC);\n#elif defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) ||    \\\n    defined(__NetBSD__) || defined(__DragonFly__) || defined(__sun__) ||       \\\n    defined(__HAIKU__)\n  struct timespec ts;\n#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) &&              \\\n    !defined(__HAIKU__)\n  timespec_get(&ts, TIME_UTC);\n#else\n  const clockid_t cid = CLOCK_REALTIME;\n#if defined(UTEST_USE_CLOCKGETTIME)\n  clock_gettime(cid, &ts);\n#else\n  syscall(SYS_clock_gettime, cid, &ts);\n#endif\n#endif\n  return UTEST_CAST(utest_int64_t, ts.tv_sec) * 1000 * 1000 * 1000 + ts.tv_nsec;\n#elif __APPLE__\n  return UTEST_CAST(utest_int64_t, clock_gettime_nsec_np(CLOCK_UPTIME_RAW));\n#elif __EMSCRIPTEN__\n  return emscripten_performance_now() * 1000000.0;\n#else\n#error Unsupported platform!\n#endif\n}\n\ntypedef void (*utest_testcase_t)(int *, size_t);\n\nstruct utest_test_state_s {\n  utest_testcase_t func;\n  size_t index;\n  char *name;\n};\n\nstruct utest_state_s {\n  struct utest_test_state_s *tests;\n  size_t tests_length;\n  FILE *output;\n};\n\n/* extern to the global state utest needs to execute */\nUTEST_EXTERN struct utest_state_s utest_state;\n\n#if defined(_MSC_VER)\n#define UTEST_WEAK __forceinline\n#elif defined(__MINGW32__) || defined(__MINGW64__)\n#define UTEST_WEAK static UTEST_ATTRIBUTE(used)\n#elif defined(__clang__) || defined(__GNUC__) || defined(__TINYC__)\n#define UTEST_WEAK UTEST_ATTRIBUTE(weak)\n#else\n#error Non clang, non gcc, non MSVC, non tcc compiler found!\n#endif\n\n#if defined(_MSC_VER)\n#define UTEST_UNUSED\n#else\n#define UTEST_UNUSED UTEST_ATTRIBUTE(unused)\n#endif\n\n#ifdef __clang__\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wvariadic-macros\"\n#pragma clang diagnostic ignored \"-Wc++98-compat-pedantic\"\n#endif\n#define UTEST_PRINTF(...)                                                      \\\n  if (utest_state.output) {                                                    \\\n    fprintf(utest_state.output, __VA_ARGS__);                                  \\\n  }                                                                            \\\n  printf(__VA_ARGS__)\n#ifdef __clang__\n#pragma clang diagnostic pop\n#endif\n\n#ifdef __clang__\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wvariadic-macros\"\n#pragma clang diagnostic ignored \"-Wc++98-compat-pedantic\"\n#endif\n\n#ifdef _MSC_VER\n#define UTEST_SNPRINTF(BUFFER, N, ...) _snprintf_s(BUFFER, N, N, __VA_ARGS__)\n#else\n#define UTEST_SNPRINTF(...) snprintf(__VA_ARGS__)\n#endif\n\n#ifdef __clang__\n#pragma clang diagnostic pop\n#endif\n\n#if defined(__cplusplus)\n/* if we are using c++ we can use overloaded methods (its in the language) */\n#define UTEST_OVERLOADABLE\n#elif defined(__clang__)\n/* otherwise, if we are using clang with c - use the overloadable attribute */\n#define UTEST_OVERLOADABLE UTEST_ATTRIBUTE(overloadable)\n#endif\n\n#if defined(__cplusplus) && (__cplusplus >= 201103L)\n\n#ifdef __clang__\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wc++98-compat-pedantic\"\n#endif\n\n#include <type_traits>\n\ntemplate <typename T, bool is_enum = std::is_enum<T>::value>\nstruct utest_type_deducer final {\n  static void _(const T t);\n};\n\ntemplate <> struct utest_type_deducer<char, false> {\n  static void _(const char c) {\n    if (std::is_signed<decltype(c)>::value) {\n      UTEST_PRINTF(\"%d\", static_cast<int>(c));\n    } else {\n      UTEST_PRINTF(\"%u\", static_cast<unsigned int>(c));\n    }\n  }\n};\ntemplate <> struct utest_type_deducer<signed char, false> {\n  static void _(const signed char c) {\n    UTEST_PRINTF(\"%d\", static_cast<int>(c));\n  }\n};\n\ntemplate <> struct utest_type_deducer<unsigned char, false> {\n  static void _(const unsigned char c) {\n    UTEST_PRINTF(\"%u\", static_cast<unsigned int>(c));\n  }\n};\n\ntemplate <> struct utest_type_deducer<short, false> {\n  static void _(const short s) { UTEST_PRINTF(\"%d\", static_cast<int>(s)); }\n};\n\ntemplate <> struct utest_type_deducer<unsigned short, false> {\n  static void _(const unsigned short s) {\n    UTEST_PRINTF(\"%u\", static_cast<unsigned>(s));\n  }\n};\n\ntemplate <> struct utest_type_deducer<float, false> {\n  static void _(const float f) { UTEST_PRINTF(\"%f\", static_cast<double>(f)); }\n};\n\ntemplate <> struct utest_type_deducer<double, false> {\n  static void _(const double d) { UTEST_PRINTF(\"%f\", d); }\n};\n\ntemplate <> struct utest_type_deducer<long double, false> {\n  static void _(const long double d) {\n#if defined(__MINGW32__) || defined(__MINGW64__)\n    /* MINGW is weird - doesn't like LF at all?! */\n    UTEST_PRINTF(\"%f\", (double)d);\n#else\n    UTEST_PRINTF(\"%Lf\", d);\n#endif\n  }\n};\n\ntemplate <> struct utest_type_deducer<int, false> {\n  static void _(const int i) { UTEST_PRINTF(\"%d\", i); }\n};\n\ntemplate <> struct utest_type_deducer<unsigned int, false> {\n  static void _(const unsigned int i) { UTEST_PRINTF(\"%u\", i); }\n};\n\ntemplate <> struct utest_type_deducer<long, false> {\n  static void _(const long i) { UTEST_PRINTF(\"%ld\", i); }\n};\n\ntemplate <> struct utest_type_deducer<unsigned long, false> {\n  static void _(const unsigned long i) { UTEST_PRINTF(\"%lu\", i); }\n};\n\ntemplate <> struct utest_type_deducer<long long, false> {\n  static void _(const long long i) { UTEST_PRINTF(\"%lld\", i); }\n};\n\ntemplate <> struct utest_type_deducer<unsigned long long, false> {\n  static void _(const unsigned long long i) { UTEST_PRINTF(\"%llu\", i); }\n};\n\ntemplate <> struct utest_type_deducer<bool, false> {\n  static void _(const bool i) { UTEST_PRINTF(i ? \"true\" : \"false\"); }\n};\n\ntemplate <typename T> struct utest_type_deducer<const T *, false> {\n  static void _(const T *t) {\n    UTEST_PRINTF(\"%p\", static_cast<const void *>(t));\n  }\n};\n\ntemplate <typename T> struct utest_type_deducer<T *, false> {\n  static void _(T *t) { UTEST_PRINTF(\"%p\", static_cast<void *>(t)); }\n};\n\ntemplate <typename T> struct utest_type_deducer<T, true> {\n  static void _(const T t) {\n    UTEST_PRINTF(\"%llu\", static_cast<unsigned long long>(t));\n  }\n};\n\n// default printer for all other objects (specialize for custom printing)\ntemplate <typename T> struct utest_type_deducer<T, false> {\n  static void _(const T& t) {\n    UTEST_PRINTF(\"(object %p)\", static_cast<const void*>(&t));\n  }\n};\n\ntemplate <> struct utest_type_deducer<std::nullptr_t, false> {\n  static void _(std::nullptr_t t) {\n    UTEST_PRINTF(\"%p\", static_cast<void *>(t));\n  }\n};\n\ntemplate <typename T>\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(const T& t) {\n  utest_type_deducer<T>::_(t);\n}\n\n#ifdef __clang__\n#pragma clang diagnostic pop\n#endif\n\n#elif defined(UTEST_OVERLOADABLE)\n\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(signed char c);\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(signed char c) {\n  UTEST_PRINTF(\"%d\", UTEST_CAST(int, c));\n}\n\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(unsigned char c);\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(unsigned char c) {\n  UTEST_PRINTF(\"%u\", UTEST_CAST(unsigned int, c));\n}\n\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(float f);\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(float f) {\n  UTEST_PRINTF(\"%f\", UTEST_CAST(double, f));\n}\n\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(double d);\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(double d) {\n  UTEST_PRINTF(\"%f\", d);\n}\n\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long double d);\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long double d) {\n#if defined(__MINGW32__) || defined(__MINGW64__)\n  /* MINGW is weird - doesn't like LF at all?! */\n  UTEST_PRINTF(\"%f\", (double)d);\n#else\n  UTEST_PRINTF(\"%Lf\", d);\n#endif\n}\n\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(int i);\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(int i) {\n  UTEST_PRINTF(\"%d\", i);\n}\n\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(unsigned int i);\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(unsigned int i) {\n  UTEST_PRINTF(\"%u\", i);\n}\n\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long int i);\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long int i) {\n  UTEST_PRINTF(\"%ld\", i);\n}\n\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long unsigned int i);\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long unsigned int i) {\n  UTEST_PRINTF(\"%lu\", i);\n}\n\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(const void *p);\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(const void *p) {\n  UTEST_PRINTF(\"%p\", p);\n}\n\n/*\n   long long is a c++11 extension\n*/\n#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) ||              \\\n    defined(__cplusplus) && (__cplusplus >= 201103L) ||                        \\\n    (defined(__MINGW32__) || defined(__MINGW64__))\n\n#ifdef __clang__\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wc++98-compat-pedantic\"\n#endif\n\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long long int i);\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long long int i) {\n  UTEST_PRINTF(\"%lld\", i);\n}\n\nUTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long long unsigned int i);\nUTEST_WEAK UTEST_OVERLOADABLE void\nutest_type_printer(long long unsigned int i) {\n  UTEST_PRINTF(\"%llu\", i);\n}\n\n#ifdef __clang__\n#pragma clang diagnostic pop\n#endif\n\n#endif\n#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) &&            \\\n        !(defined(__MINGW32__) || defined(__MINGW64__)) ||                     \\\n    defined(__TINYC__)\n#define utest_type_printer(val)                                                \\\n  UTEST_PRINTF(                                                                \\\n      _Generic((val),                                                          \\\n      signed char: \"%d\",                                                       \\\n      unsigned char: \"%u\",                                                     \\\n      short: \"%d\",                                                             \\\n      unsigned short: \"%u\",                                                    \\\n      int: \"%d\",                                                               \\\n      long: \"%ld\",                                                             \\\n      long long: \"%lld\",                                                       \\\n      unsigned: \"%u\",                                                          \\\n      unsigned long: \"%lu\",                                                    \\\n      unsigned long long: \"%llu\",                                              \\\n      float: \"%f\",                                                             \\\n      double: \"%f\",                                                            \\\n      long double: \"%Lf\",                                                      \\\n      default: _Generic((val - val), ptrdiff_t: \"%p\", default: \"undef\")),      \\\n      (val))\n#else\n/*\n   we don't have the ability to print the values we got, so we create a macro\n   to tell our users we can't do anything fancy\n*/\n#define utest_type_printer(...) UTEST_PRINTF(\"undef\")\n#endif\n\n#if defined(_MSC_VER)\n#define UTEST_SURPRESS_WARNING_BEGIN                                           \\\n  __pragma(warning(push)) __pragma(warning(disable : 4127))                    \\\n      __pragma(warning(disable : 4571)) __pragma(warning(disable : 4130))\n#define UTEST_SURPRESS_WARNING_END __pragma(warning(pop))\n#else\n#define UTEST_SURPRESS_WARNING_BEGIN\n#define UTEST_SURPRESS_WARNING_END\n#endif\n\n#if defined(__cplusplus) && (__cplusplus >= 201103L)\n#define UTEST_AUTO(x) const auto&\n#elif !defined(__cplusplus)\n\n#if defined(__clang__)\n/* clang-format off */\n/* had to disable clang-format here because it malforms the pragmas */\n#define UTEST_AUTO(x)                                                          \\\n  _Pragma(\"clang diagnostic push\")                                             \\\n      _Pragma(\"clang diagnostic ignored \\\"-Wgnu-auto-type\\\"\") __auto_type      \\\n          _Pragma(\"clang diagnostic pop\")\n/* clang-format on */\n#else\n#define UTEST_AUTO(x) __typeof__(x + 0)\n#endif\n\n#else\n#define UTEST_AUTO(x) typeof(x + 0)\n#endif\n\n#if defined(__clang__)\n#define UTEST_STRNCMP(x, y, size)                                              \\\n  _Pragma(\"clang diagnostic push\")                                             \\\n      _Pragma(\"clang diagnostic ignored \\\"-Wdisabled-macro-expansion\\\"\")       \\\n          strncmp(x, y, size) _Pragma(\"clang diagnostic pop\")\n#else\n#define UTEST_STRNCMP(x, y, size) strncmp(x, y, size)\n#endif\n\n#if defined(_MSC_VER)\n#define UTEST_STRNCPY(x, y, size) strcpy_s(x, size, y)\n#elif !defined(__clang__) && defined(__GNUC__)\nstatic UTEST_INLINE char *\nutest_strncpy_gcc(char *const dst, const char *const src, const size_t size) {\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wstringop-overflow\"\n  return strncpy(dst, src, size);\n#pragma GCC diagnostic pop\n}\n\n#define UTEST_STRNCPY(x, y, size) utest_strncpy_gcc(x, y, size)\n#else\n#define UTEST_STRNCPY(x, y, size) strncpy(x, y, size)\n#endif\n\n#define UTEST_SKIP(msg)                                                        \\\n  do {                                                                         \\\n    UTEST_PRINTF(\"   Skipped : '%s'\\n\", (msg));                                \\\n    *utest_result = UTEST_TEST_SKIPPED;                                        \\\n    return;                                                                    \\\n  } while (0)\n\n#if defined(__clang__)\n#define UTEST_COND(x, y, cond, msg, is_assert)                                 \\\n  UTEST_SURPRESS_WARNING_BEGIN do {                                            \\\n    _Pragma(\"clang diagnostic push\")                                           \\\n        _Pragma(\"clang diagnostic ignored \\\"-Wlanguage-extension-token\\\"\")     \\\n            _Pragma(\"clang diagnostic ignored \\\"-Wc++98-compat-pedantic\\\"\")    \\\n                _Pragma(\"clang diagnostic ignored \\\"-Wfloat-equal\\\"\")          \\\n                    UTEST_AUTO(x) xEval = (x);                                 \\\n    UTEST_AUTO(y) yEval = (y);                                                 \\\n    if (!((xEval)cond(yEval))) {                                               \\\n      const char *const xAsString = #x;                                        \\\n      const char *const yAsString = #y;                                        \\\n      _Pragma(\"clang diagnostic pop\")                                          \\\n          UTEST_PRINTF(\"%s:%i: Failure\\n\", __FILE__, __LINE__);                \\\n      UTEST_PRINTF(\"  Expected : (\");                                          \\\n      UTEST_PRINTF(\"%s) \" #cond \" (%s\", xAsString, yAsString);                 \\\n      UTEST_PRINTF(\")\\n\");                                                     \\\n      UTEST_PRINTF(\"    Actual : \");                                           \\\n      utest_type_printer(xEval);                                               \\\n      UTEST_PRINTF(\" vs \");                                                    \\\n      utest_type_printer(yEval);                                               \\\n      UTEST_PRINTF(\"\\n\");                                                      \\\n      if (strlen(msg) > 0) {                                                   \\\n        UTEST_PRINTF(\"   Message : %s\\n\", msg);                                \\\n      }                                                                        \\\n      *utest_result = UTEST_TEST_FAILURE;                                      \\\n      if (is_assert) {                                                         \\\n        return;                                                                \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  while (0)                                                                    \\\n  UTEST_SURPRESS_WARNING_END\n#elif defined(__GNUC__) || defined(__TINYC__)\n#define UTEST_COND(x, y, cond, msg, is_assert)                                 \\\n  UTEST_SURPRESS_WARNING_BEGIN do {                                            \\\n    UTEST_AUTO(x) xEval = (x);                                                 \\\n    UTEST_AUTO(y) yEval = (y);                                                 \\\n    if (!((xEval)cond(yEval))) {                                               \\\n      const char *const xAsString = #x;                                        \\\n      const char *const yAsString = #y;                                        \\\n      UTEST_PRINTF(\"%s:%i: Failure\\n\", __FILE__, __LINE__);                    \\\n      UTEST_PRINTF(\"  Expected : (\");                                          \\\n      UTEST_PRINTF(\"%s) \" #cond \" (%s\", xAsString, yAsString);                 \\\n      UTEST_PRINTF(\")\\n\");                                                     \\\n      UTEST_PRINTF(\"    Actual : \");                                           \\\n      utest_type_printer(xEval);                                               \\\n      UTEST_PRINTF(\" vs \");                                                    \\\n      utest_type_printer(yEval);                                               \\\n      UTEST_PRINTF(\"\\n\");                                                      \\\n      if (strlen(msg) > 0) {                                                   \\\n        UTEST_PRINTF(\"   Message : %s\\n\", msg);                                \\\n      }                                                                        \\\n      *utest_result = UTEST_TEST_FAILURE;                                      \\\n      if (is_assert) {                                                         \\\n        return;                                                                \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  while (0)                                                                    \\\n  UTEST_SURPRESS_WARNING_END\n#else\n#define UTEST_COND(x, y, cond, msg, is_assert)                                 \\\n  UTEST_SURPRESS_WARNING_BEGIN do {                                            \\\n    if (!((x)cond(y))) {                                                       \\\n      UTEST_PRINTF(\"%s:%i: Failure (Expected \" #cond \" Actual)\", __FILE__,     \\\n                   __LINE__);                                                  \\\n      if (strlen(msg) > 0) {                                                   \\\n        UTEST_PRINTF(\" Message : %s\", msg);                                    \\\n      }                                                                        \\\n      UTEST_PRINTF(\"\\n\");                                                      \\\n      *utest_result = UTEST_TEST_FAILURE;                                      \\\n      if (is_assert) {                                                         \\\n        return;                                                                \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  while (0)                                                                    \\\n  UTEST_SURPRESS_WARNING_END\n#endif\n\n#define EXPECT_EQ(x, y) UTEST_COND(x, y, ==, \"\", 0)\n#define EXPECT_EQ_MSG(x, y, msg) UTEST_COND(x, y, ==, msg, 0)\n#define ASSERT_EQ(x, y) UTEST_COND(x, y, ==, \"\", 1)\n#define ASSERT_EQ_MSG(x, y, msg) UTEST_COND(x, y, ==, msg, 1)\n\n#define EXPECT_NE(x, y) UTEST_COND(x, y, !=, \"\", 0)\n#define EXPECT_NE_MSG(x, y, msg) UTEST_COND(x, y, !=, msg, 0)\n#define ASSERT_NE(x, y) UTEST_COND(x, y, !=, \"\", 1)\n#define ASSERT_NE_MSG(x, y, msg) UTEST_COND(x, y, !=, msg, 1)\n\n#define EXPECT_LT(x, y) UTEST_COND(x, y, <, \"\", 0)\n#define EXPECT_LT_MSG(x, y, msg) UTEST_COND(x, y, <, msg, 0)\n#define ASSERT_LT(x, y) UTEST_COND(x, y, <, \"\", 1)\n#define ASSERT_LT_MSG(x, y, msg) UTEST_COND(x, y, <, msg, 1)\n\n#define EXPECT_LE(x, y) UTEST_COND(x, y, <=, \"\", 0)\n#define EXPECT_LE_MSG(x, y, msg) UTEST_COND(x, y, <=, msg, 0)\n#define ASSERT_LE(x, y) UTEST_COND(x, y, <=, \"\", 1)\n#define ASSERT_LE_MSG(x, y, msg) UTEST_COND(x, y, <=, msg, 1)\n\n#define EXPECT_GT(x, y) UTEST_COND(x, y, >, \"\", 0)\n#define EXPECT_GT_MSG(x, y, msg) UTEST_COND(x, y, >, msg, 0)\n#define ASSERT_GT(x, y) UTEST_COND(x, y, >, \"\", 1)\n#define ASSERT_GT_MSG(x, y, msg) UTEST_COND(x, y, >, msg, 1)\n\n#define EXPECT_GE(x, y) UTEST_COND(x, y, >=, \"\", 0)\n#define EXPECT_GE_MSG(x, y, msg) UTEST_COND(x, y, >=, msg, 0)\n#define ASSERT_GE(x, y) UTEST_COND(x, y, >=, \"\", 1)\n#define ASSERT_GE_MSG(x, y, msg) UTEST_COND(x, y, >=, msg, 1)\n\n#define UTEST_TRUE(x, msg, is_assert)                                          \\\n  UTEST_SURPRESS_WARNING_BEGIN do {                                            \\\n    const int xEval = !!(x);                                                   \\\n    if (!(xEval)) {                                                            \\\n      UTEST_PRINTF(\"%s:%i: Failure\\n\", __FILE__, __LINE__);                    \\\n      UTEST_PRINTF(\"  Expected : true\\n\");                                     \\\n      UTEST_PRINTF(\"    Actual : %s\\n\", (xEval) ? \"true\" : \"false\");           \\\n      if (strlen(msg) > 0) {                                                   \\\n        UTEST_PRINTF(\"   Message : %s\\n\", msg);                                \\\n      }                                                                        \\\n      *utest_result = UTEST_TEST_FAILURE;                                      \\\n      if (is_assert) {                                                         \\\n        return;                                                                \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  while (0)                                                                    \\\n  UTEST_SURPRESS_WARNING_END\n\n#define EXPECT_TRUE(x) UTEST_TRUE(x, \"\", 0)\n#define EXPECT_TRUE_MSG(x, msg) UTEST_TRUE(x, msg, 0)\n#define ASSERT_TRUE(x) UTEST_TRUE(x, \"\", 1)\n#define ASSERT_TRUE_MSG(x, msg) UTEST_TRUE(x, msg, 1)\n\n#define UTEST_FALSE(x, msg, is_assert)                                         \\\n  UTEST_SURPRESS_WARNING_BEGIN do {                                            \\\n    const int xEval = !!(x);                                                   \\\n    if (xEval) {                                                               \\\n      UTEST_PRINTF(\"%s:%i: Failure\\n\", __FILE__, __LINE__);                    \\\n      UTEST_PRINTF(\"  Expected : false\\n\");                                    \\\n      UTEST_PRINTF(\"    Actual : %s\\n\", (xEval) ? \"true\" : \"false\");           \\\n      if (strlen(msg) > 0) {                                                   \\\n        UTEST_PRINTF(\"   Message : %s\\n\", msg);                                \\\n      }                                                                        \\\n      *utest_result = UTEST_TEST_FAILURE;                                      \\\n      if (is_assert) {                                                         \\\n        return;                                                                \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  while (0)                                                                    \\\n  UTEST_SURPRESS_WARNING_END\n\n#define EXPECT_FALSE(x) UTEST_FALSE(x, \"\", 0)\n#define EXPECT_FALSE_MSG(x, msg) UTEST_FALSE(x, msg, 0)\n#define ASSERT_FALSE(x) UTEST_FALSE(x, \"\", 1)\n#define ASSERT_FALSE_MSG(x, msg) UTEST_FALSE(x, msg, 1)\n\n#define UTEST_STREQ(x, y, msg, is_assert)                                      \\\n  UTEST_SURPRESS_WARNING_BEGIN do {                                            \\\n    const char *xEval = (x);                                                   \\\n    const char *yEval = (y);                                                   \\\n    if (UTEST_NULL == xEval || UTEST_NULL == yEval ||                          \\\n        0 != strcmp(xEval, yEval)) {                                           \\\n      UTEST_PRINTF(\"%s:%i: Failure\\n\", __FILE__, __LINE__);                    \\\n      UTEST_PRINTF(\"  Expected : \\\"%s\\\"\\n\", xEval);                            \\\n      UTEST_PRINTF(\"    Actual : \\\"%s\\\"\\n\", yEval);                            \\\n      if (strlen(msg) > 0) {                                                   \\\n        UTEST_PRINTF(\"   Message : %s\\n\", msg);                                \\\n      }                                                                        \\\n      *utest_result = UTEST_TEST_FAILURE;                                      \\\n      if (is_assert) {                                                         \\\n        return;                                                                \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  while (0)                                                                    \\\n  UTEST_SURPRESS_WARNING_END\n\n#define EXPECT_STREQ(x, y) UTEST_STREQ(x, y, \"\", 0)\n#define EXPECT_STREQ_MSG(x, y, msg) UTEST_STREQ(x, y, msg, 0)\n#define ASSERT_STREQ(x, y) UTEST_STREQ(x, y, \"\", 1)\n#define ASSERT_STREQ_MSG(x, y, msg) UTEST_STREQ(x, y, msg, 1)\n\n#define UTEST_STRNE(x, y, msg, is_assert)                                      \\\n  UTEST_SURPRESS_WARNING_BEGIN do {                                            \\\n    const char *xEval = (x);                                                   \\\n    const char *yEval = (y);                                                   \\\n    if (UTEST_NULL == xEval || UTEST_NULL == yEval ||                          \\\n        0 == strcmp(xEval, yEval)) {                                           \\\n      UTEST_PRINTF(\"%s:%i: Failure\\n\", __FILE__, __LINE__);                    \\\n      UTEST_PRINTF(\"  Expected : \\\"%s\\\"\\n\", xEval);                            \\\n      UTEST_PRINTF(\"    Actual : \\\"%s\\\"\\n\", yEval);                            \\\n      if (strlen(msg) > 0) {                                                   \\\n        UTEST_PRINTF(\"   Message : %s\\n\", msg);                                \\\n      }                                                                        \\\n      *utest_result = UTEST_TEST_FAILURE;                                      \\\n      if (is_assert) {                                                         \\\n        return;                                                                \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  while (0)                                                                    \\\n  UTEST_SURPRESS_WARNING_END\n\n#define EXPECT_STRNE(x, y) UTEST_STRNE(x, y, \"\", 0)\n#define EXPECT_STRNE_MSG(x, y, msg) UTEST_STRNE(x, y, msg, 0)\n#define ASSERT_STRNE(x, y) UTEST_STRNE(x, y, \"\", 1)\n#define ASSERT_STRNE_MSG(x, y, msg) UTEST_STRNE(x, y, msg, 1)\n\n#define UTEST_STRNEQ(x, y, n, msg, is_assert)                                  \\\n  UTEST_SURPRESS_WARNING_BEGIN do {                                            \\\n    const char *xEval = (x);                                                   \\\n    const char *yEval = (y);                                                   \\\n    const size_t nEval = UTEST_CAST(size_t, n);                                \\\n    if (UTEST_NULL == xEval || UTEST_NULL == yEval ||                          \\\n        0 != UTEST_STRNCMP(xEval, yEval, nEval)) {                             \\\n      UTEST_PRINTF(\"%s:%i: Failure\\n\", __FILE__, __LINE__);                    \\\n      UTEST_PRINTF(\"  Expected : \\\"%.*s\\\"\\n\", UTEST_CAST(int, nEval), xEval);  \\\n      UTEST_PRINTF(\"    Actual : \\\"%.*s\\\"\\n\", UTEST_CAST(int, nEval), yEval);  \\\n      if (strlen(msg) > 0) {                                                   \\\n        UTEST_PRINTF(\"   Message : %s\\n\", msg);                                \\\n      }                                                                        \\\n      *utest_result = UTEST_TEST_FAILURE;                                      \\\n      if (is_assert) {                                                         \\\n        return;                                                                \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  while (0)                                                                    \\\n  UTEST_SURPRESS_WARNING_END\n\n#define EXPECT_STRNEQ(x, y, n) UTEST_STRNEQ(x, y, n, \"\", 0)\n#define EXPECT_STRNEQ_MSG(x, y, n, msg) UTEST_STRNEQ(x, y, n, msg, 0)\n#define ASSERT_STRNEQ(x, y, n) UTEST_STRNEQ(x, y, n, \"\", 1)\n#define ASSERT_STRNEQ_MSG(x, y, n, msg) UTEST_STRNEQ(x, y, n, msg, 1)\n\n#define UTEST_STRNNE(x, y, n, msg, is_assert)                                  \\\n  UTEST_SURPRESS_WARNING_BEGIN do {                                            \\\n    const char *xEval = (x);                                                   \\\n    const char *yEval = (y);                                                   \\\n    const size_t nEval = UTEST_CAST(size_t, n);                                \\\n    if (UTEST_NULL == xEval || UTEST_NULL == yEval ||                          \\\n        0 == UTEST_STRNCMP(xEval, yEval, nEval)) {                             \\\n      UTEST_PRINTF(\"%s:%i: Failure\\n\", __FILE__, __LINE__);                    \\\n      UTEST_PRINTF(\"  Expected : \\\"%.*s\\\"\\n\", UTEST_CAST(int, nEval), xEval);  \\\n      UTEST_PRINTF(\"    Actual : \\\"%.*s\\\"\\n\", UTEST_CAST(int, nEval), yEval);  \\\n      if (strlen(msg) > 0) {                                                   \\\n        UTEST_PRINTF(\"   Message : %s\\n\", msg);                                \\\n      }                                                                        \\\n      *utest_result = UTEST_TEST_FAILURE;                                      \\\n      if (is_assert) {                                                         \\\n        return;                                                                \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  while (0)                                                                    \\\n  UTEST_SURPRESS_WARNING_END\n\n#define EXPECT_STRNNE(x, y, n) UTEST_STRNNE(x, y, n, \"\", 0)\n#define EXPECT_STRNNE_MSG(x, y, n, msg) UTEST_STRNNE(x, y, n, msg, 0)\n#define ASSERT_STRNNE(x, y, n) UTEST_STRNNE(x, y, n, \"\", 1)\n#define ASSERT_STRNNE_MSG(x, y, n, msg) UTEST_STRNNE(x, y, n, msg, 1)\n\n#define UTEST_NEAR(x, y, epsilon, msg, is_assert)                              \\\n  UTEST_SURPRESS_WARNING_BEGIN do {                                            \\\n    const double diff =                                                        \\\n        utest_fabs(UTEST_CAST(double, x) - UTEST_CAST(double, y));             \\\n    if (diff > UTEST_CAST(double, epsilon) || utest_isnan(diff)) {             \\\n      UTEST_PRINTF(\"%s:%i: Failure\\n\", __FILE__, __LINE__);                    \\\n      UTEST_PRINTF(\"  Expected : %f\\n\", UTEST_CAST(double, x));                \\\n      UTEST_PRINTF(\"    Actual : %f\\n\", UTEST_CAST(double, y));                \\\n      if (strlen(msg) > 0) {                                                   \\\n        UTEST_PRINTF(\"   Message : %s\\n\", msg);                                \\\n      }                                                                        \\\n      *utest_result = UTEST_TEST_FAILURE;                                      \\\n      if (is_assert) {                                                         \\\n        return;                                                                \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  while (0)                                                                    \\\n  UTEST_SURPRESS_WARNING_END\n\n#define EXPECT_NEAR(x, y, epsilon) UTEST_NEAR(x, y, epsilon, \"\", 0)\n#define EXPECT_NEAR_MSG(x, y, epsilon, msg) UTEST_NEAR(x, y, epsilon, msg, 0)\n#define ASSERT_NEAR(x, y, epsilon) UTEST_NEAR(x, y, epsilon, \"\", 1)\n#define ASSERT_NEAR_MSG(x, y, epsilon, msg) UTEST_NEAR(x, y, epsilon, msg, 1)\n\n#if defined(UTEST_HAS_EXCEPTIONS)\n#define UTEST_EXCEPTION(x, exception_type, msg, is_assert)                     \\\n  UTEST_SURPRESS_WARNING_BEGIN do {                                            \\\n    int exception_caught = 0;                                                  \\\n    try {                                                                      \\\n      x;                                                                       \\\n    } catch (const exception_type &) {                                         \\\n      exception_caught = 1;                                                    \\\n    } catch (...) {                                                            \\\n      exception_caught = 2;                                                    \\\n    }                                                                          \\\n    if (1 != exception_caught) {                                               \\\n      UTEST_PRINTF(\"%s:%i: Failure\\n\", __FILE__, __LINE__);                    \\\n      UTEST_PRINTF(\"  Expected : %s exception\\n\", #exception_type);            \\\n      UTEST_PRINTF(\"    Actual : %s\\n\", (2 == exception_caught)                \\\n                                            ? \"Unexpected exception\"           \\\n                                            : \"No exception\");                 \\\n      if (strlen(msg) > 0) {                                                   \\\n        UTEST_PRINTF(\"   Message : %s\\n\", msg);                                \\\n      }                                                                        \\\n      *utest_result = UTEST_TEST_FAILURE;                                      \\\n      if (is_assert) {                                                         \\\n        return;                                                                \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  while (0)                                                                    \\\n  UTEST_SURPRESS_WARNING_END\n\n#define EXPECT_EXCEPTION(x, exception_type)                                    \\\n  UTEST_EXCEPTION(x, exception_type, \"\", 0)\n#define EXPECT_EXCEPTION_MSG(x, exception_type, msg)                           \\\n  UTEST_EXCEPTION(x, exception_type, msg, 0)\n#define ASSERT_EXCEPTION(x, exception_type)                                    \\\n  UTEST_EXCEPTION(x, exception_type, \"\", 1)\n#define ASSERT_EXCEPTION_MSG(x, exception_type, msg)                           \\\n  UTEST_EXCEPTION(x, exception_type, msg, 1)\n\n#define UTEST_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message,     \\\n                                     msg, is_assert)                           \\\n  UTEST_SURPRESS_WARNING_BEGIN do {                                            \\\n    int exception_caught = 0;                                                  \\\n    char *message_caught = UTEST_NULL;                                         \\\n    try {                                                                      \\\n      x;                                                                       \\\n    } catch (const exception_type &e) {                                        \\\n      const char *const what = e.what();                                       \\\n      exception_caught = 1;                                                    \\\n      if (0 !=                                                                 \\\n          UTEST_STRNCMP(what, exception_message, strlen(exception_message))) { \\\n        const size_t message_size = strlen(what) + 1;                          \\\n        message_caught = UTEST_PTR_CAST(char *, malloc(message_size));         \\\n        UTEST_STRNCPY(message_caught, what, message_size);                     \\\n      }                                                                        \\\n    } catch (...) {                                                            \\\n      exception_caught = 2;                                                    \\\n    }                                                                          \\\n    if (1 != exception_caught) {                                               \\\n      UTEST_PRINTF(\"%s:%i: Failure\\n\", __FILE__, __LINE__);                    \\\n      UTEST_PRINTF(\"  Expected : %s exception\\n\", #exception_type);            \\\n      UTEST_PRINTF(\"    Actual : %s\\n\", (2 == exception_caught)                \\\n                                            ? \"Unexpected exception\"           \\\n                                            : \"No exception\");                 \\\n      if (strlen(msg) > 0) {                                                   \\\n        UTEST_PRINTF(\"   Message : %s\\n\", msg);                                \\\n      }                                                                        \\\n      *utest_result = UTEST_TEST_FAILURE;                                      \\\n      if (is_assert) {                                                         \\\n        return;                                                                \\\n      }                                                                        \\\n    } else if (UTEST_NULL != message_caught) {                                 \\\n      UTEST_PRINTF(\"%s:%i: Failure\\n\", __FILE__, __LINE__);                    \\\n      UTEST_PRINTF(\"  Expected : %s exception with message %s\\n\",              \\\n                   #exception_type, exception_message);                        \\\n      UTEST_PRINTF(\"    Actual message : %s\\n\", message_caught);               \\\n      if (strlen(msg) > 0) {                                                   \\\n        UTEST_PRINTF(\"   Message : %s\\n\", msg);                                \\\n      }                                                                        \\\n      *utest_result = UTEST_TEST_FAILURE;                                      \\\n      free(message_caught);                                                    \\\n      if (is_assert) {                                                         \\\n        return;                                                                \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  while (0)                                                                    \\\n  UTEST_SURPRESS_WARNING_END\n\n#define EXPECT_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message)    \\\n  UTEST_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message, \"\", 0)\n#define EXPECT_EXCEPTION_WITH_MESSAGE_MSG(x, exception_type,                   \\\n                                          exception_message, msg)              \\\n  UTEST_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message, msg, 0)\n#define ASSERT_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message)    \\\n  UTEST_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message, \"\", 1)\n#define ASSERT_EXCEPTION_WITH_MESSAGE_MSG(x, exception_type,                   \\\n                                          exception_message, msg)              \\\n  UTEST_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message, msg, 1)\n#endif\n\n#if defined(__clang__)\n#if __has_warning(\"-Wunsafe-buffer-usage\")\n#define UTEST_SURPRESS_WARNINGS_BEGIN                                          \\\n  _Pragma(\"clang diagnostic push\")                                             \\\n      _Pragma(\"clang diagnostic ignored \\\"-Wunsafe-buffer-usage\\\"\")\n#define UTEST_SURPRESS_WARNINGS_END _Pragma(\"clang diagnostic pop\")\n#else\n#define UTEST_SURPRESS_WARNINGS_BEGIN\n#define UTEST_SURPRESS_WARNINGS_END\n#endif\n#elif defined(__GNUC__) && __GNUC__ >= 8 && defined(__cplusplus)\n#define UTEST_SURPRESS_WARNINGS_BEGIN                                          \\\n  _Pragma(\"GCC diagnostic push\")                                               \\\n      _Pragma(\"GCC diagnostic ignored \\\"-Wclass-memaccess\\\"\")\n#define UTEST_SURPRESS_WARNINGS_END _Pragma(\"GCC diagnostic pop\")\n#else\n#define UTEST_SURPRESS_WARNINGS_BEGIN\n#define UTEST_SURPRESS_WARNINGS_END\n#endif\n\n#define UTEST(SET, NAME)                                                       \\\n  UTEST_SURPRESS_WARNINGS_BEGIN                                                \\\n  UTEST_EXTERN struct utest_state_s utest_state;                               \\\n  static void utest_run_##SET##_##NAME(int *utest_result);                     \\\n  static void utest_##SET##_##NAME(int *utest_result, size_t utest_index) {    \\\n    (void)utest_index;                                                         \\\n    utest_run_##SET##_##NAME(utest_result);                                    \\\n  }                                                                            \\\n  UTEST_INITIALIZER(utest_register_##SET##_##NAME) {                           \\\n    const size_t index = utest_state.tests_length++;                           \\\n    const char name_part[] = #SET \".\" #NAME;                                   \\\n    const size_t name_size = strlen(name_part) + 1;                            \\\n    char *name = UTEST_PTR_CAST(char *, malloc(name_size));                    \\\n    utest_state.tests = UTEST_PTR_CAST(                                        \\\n        struct utest_test_state_s *,                                           \\\n        utest_realloc(UTEST_PTR_CAST(void *, utest_state.tests),               \\\n                      sizeof(struct utest_test_state_s) *                      \\\n                          utest_state.tests_length));                          \\\n    if (utest_state.tests && name) {                                           \\\n      utest_state.tests[index].func = &utest_##SET##_##NAME;                   \\\n      utest_state.tests[index].name = name;                                    \\\n      utest_state.tests[index].index = 0;                                      \\\n      UTEST_SNPRINTF(name, name_size, \"%s\", name_part);                        \\\n    } else {                                                                   \\\n      if (utest_state.tests) {                                                 \\\n        free(utest_state.tests);                                               \\\n        utest_state.tests = NULL;                                              \\\n      }                                                                        \\\n      if (name) {                                                              \\\n        free(name);                                                            \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  UTEST_SURPRESS_WARNINGS_END                                                  \\\n  void utest_run_##SET##_##NAME(int *utest_result)\n\n#define UTEST_F_SETUP(FIXTURE)                                                 \\\n  static void utest_f_setup_##FIXTURE(int *utest_result,                       \\\n                                      struct FIXTURE *utest_fixture)\n\n#define UTEST_F_TEARDOWN(FIXTURE)                                              \\\n  static void utest_f_teardown_##FIXTURE(int *utest_result,                    \\\n                                         struct FIXTURE *utest_fixture)\n\n#define UTEST_F(FIXTURE, NAME)                                                 \\\n  UTEST_SURPRESS_WARNINGS_BEGIN                                                \\\n  UTEST_EXTERN struct utest_state_s utest_state;                               \\\n  static void utest_f_setup_##FIXTURE(int *, struct FIXTURE *);                \\\n  static void utest_f_teardown_##FIXTURE(int *, struct FIXTURE *);             \\\n  static void utest_run_##FIXTURE##_##NAME(int *, struct FIXTURE *);           \\\n  static void utest_f_##FIXTURE##_##NAME(int *utest_result,                    \\\n                                         size_t utest_index) {                 \\\n    struct FIXTURE fixture;                                                    \\\n    (void)utest_index;                                                         \\\n    memset(&fixture, 0, sizeof(fixture));                                      \\\n    utest_f_setup_##FIXTURE(utest_result, &fixture);                           \\\n    if (UTEST_TEST_PASSED != *utest_result) {                                  \\\n      return;                                                                  \\\n    }                                                                          \\\n    utest_run_##FIXTURE##_##NAME(utest_result, &fixture);                      \\\n    utest_f_teardown_##FIXTURE(utest_result, &fixture);                        \\\n  }                                                                            \\\n  UTEST_INITIALIZER(utest_register_##FIXTURE##_##NAME) {                       \\\n    const size_t index = utest_state.tests_length++;                           \\\n    const char name_part[] = #FIXTURE \".\" #NAME;                               \\\n    const size_t name_size = strlen(name_part) + 1;                            \\\n    char *name = UTEST_PTR_CAST(char *, malloc(name_size));                    \\\n    utest_state.tests = UTEST_PTR_CAST(                                        \\\n        struct utest_test_state_s *,                                           \\\n        utest_realloc(UTEST_PTR_CAST(void *, utest_state.tests),               \\\n                      sizeof(struct utest_test_state_s) *                      \\\n                          utest_state.tests_length));                          \\\n    if (utest_state.tests && name) {                                           \\\n      utest_state.tests[index].func = &utest_f_##FIXTURE##_##NAME;             \\\n      utest_state.tests[index].name = name;                                    \\\n      utest_state.tests[index].index = 0;                                      \\\n      UTEST_SNPRINTF(name, name_size, \"%s\", name_part);                        \\\n    } else {                                                                   \\\n      if (utest_state.tests) {                                                 \\\n        free(utest_state.tests);                                               \\\n        utest_state.tests = NULL;                                              \\\n      }                                                                        \\\n      if (name) {                                                              \\\n        free(name);                                                            \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  UTEST_SURPRESS_WARNINGS_END                                                  \\\n  void utest_run_##FIXTURE##_##NAME(int *utest_result,                         \\\n                                    struct FIXTURE *utest_fixture)\n\n#define UTEST_I_SETUP(FIXTURE)                                                 \\\n  static void utest_i_setup_##FIXTURE(                                         \\\n      int *utest_result, struct FIXTURE *utest_fixture, size_t utest_index)\n\n#define UTEST_I_TEARDOWN(FIXTURE)                                              \\\n  static void utest_i_teardown_##FIXTURE(                                      \\\n      int *utest_result, struct FIXTURE *utest_fixture, size_t utest_index)\n\n#define UTEST_I(FIXTURE, NAME, INDEX)                                          \\\n  UTEST_SURPRESS_WARNINGS_BEGIN                                                \\\n  UTEST_EXTERN struct utest_state_s utest_state;                               \\\n  static void utest_run_##FIXTURE##_##NAME##_##INDEX(int *, struct FIXTURE *); \\\n  static void utest_i_##FIXTURE##_##NAME##_##INDEX(int *utest_result,          \\\n                                                   size_t index) {             \\\n    struct FIXTURE fixture;                                                    \\\n    memset(&fixture, 0, sizeof(fixture));                                      \\\n    utest_i_setup_##FIXTURE(utest_result, &fixture, index);                    \\\n    if (UTEST_TEST_PASSED != *utest_result) {                                  \\\n      return;                                                                  \\\n    }                                                                          \\\n    utest_run_##FIXTURE##_##NAME##_##INDEX(utest_result, &fixture);            \\\n    utest_i_teardown_##FIXTURE(utest_result, &fixture, index);                 \\\n  }                                                                            \\\n  UTEST_INITIALIZER(utest_register_##FIXTURE##_##NAME##_##INDEX) {             \\\n    size_t i;                                                                  \\\n    utest_uint64_t iUp;                                                        \\\n    for (i = 0; i < (INDEX); i++) {                                            \\\n      const size_t index = utest_state.tests_length++;                         \\\n      const char name_part[] = #FIXTURE \".\" #NAME;                             \\\n      const size_t name_size = strlen(name_part) + 32;                         \\\n      char *name = UTEST_PTR_CAST(char *, malloc(name_size));                  \\\n      utest_state.tests = UTEST_PTR_CAST(                                      \\\n          struct utest_test_state_s *,                                         \\\n          utest_realloc(UTEST_PTR_CAST(void *, utest_state.tests),             \\\n                        sizeof(struct utest_test_state_s) *                    \\\n                            utest_state.tests_length));                        \\\n      if (utest_state.tests && name) {                                         \\\n        utest_state.tests[index].func = &utest_i_##FIXTURE##_##NAME##_##INDEX; \\\n        utest_state.tests[index].index = i;                                    \\\n        utest_state.tests[index].name = name;                                  \\\n        iUp = UTEST_CAST(utest_uint64_t, i);                                   \\\n        UTEST_SNPRINTF(name, name_size, \"%s/%\" UTEST_PRIu64, name_part, iUp);  \\\n      } else {                                                                 \\\n        if (utest_state.tests) {                                               \\\n          free(utest_state.tests);                                             \\\n          utest_state.tests = NULL;                                            \\\n        }                                                                      \\\n        if (name) {                                                            \\\n          free(name);                                                          \\\n        }                                                                      \\\n      }                                                                        \\\n    }                                                                          \\\n  }                                                                            \\\n  UTEST_SURPRESS_WARNINGS_END                                                  \\\n  void utest_run_##FIXTURE##_##NAME##_##INDEX(int *utest_result,               \\\n                                              struct FIXTURE *utest_fixture)\n\n#ifdef __clang__\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wc++98-compat-pedantic\"\n#endif\n\nUTEST_WEAK\ndouble utest_fabs(double d);\nUTEST_WEAK\ndouble utest_fabs(double d) {\n  union {\n    double d;\n    utest_uint64_t u;\n  } both;\n  both.d = d;\n  both.u &= 0x7fffffffffffffffu;\n  return both.d;\n}\n\nUTEST_WEAK\nint utest_isnan(double d);\nUTEST_WEAK\nint utest_isnan(double d) {\n  union {\n    double d;\n    utest_uint64_t u;\n  } both;\n  both.d = d;\n  both.u &= 0x7fffffffffffffffu;\n  return both.u > 0x7ff0000000000000u;\n}\n\n#ifdef __clang__\n#pragma clang diagnostic pop\n#endif\n\n#if defined(__clang__)\n#if __has_warning(\"-Wunsafe-buffer-usage\")\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n#endif\n#endif\n\nUTEST_WEAK\nint utest_should_filter_test(const char *filter, const char *testcase);\nUTEST_WEAK int utest_should_filter_test(const char *filter,\n                                        const char *testcase) {\n  if (filter) {\n    const char *filter_cur = filter;\n    const char *testcase_cur = testcase;\n    const char *filter_wildcard = UTEST_NULL;\n\n    while (('\\0' != *filter_cur) && ('\\0' != *testcase_cur)) {\n      if ('*' == *filter_cur) {\n        /* store the position of the wildcard */\n        filter_wildcard = filter_cur;\n\n        /* skip the wildcard character */\n        filter_cur++;\n\n        while (('\\0' != *filter_cur) && ('\\0' != *testcase_cur)) {\n          if ('*' == *filter_cur) {\n            /*\n               we found another wildcard (filter is something like *foo*) so we\n               exit the current loop, and return to the parent loop to handle\n               the wildcard case\n            */\n            break;\n          } else if (*filter_cur != *testcase_cur) {\n            /* otherwise our filter didn't match, so reset it */\n            filter_cur = filter_wildcard;\n          }\n\n          /* move testcase along */\n          testcase_cur++;\n\n          /* move filter along */\n          filter_cur++;\n        }\n\n        if (('\\0' == *filter_cur) && ('\\0' == *testcase_cur)) {\n          return 0;\n        }\n\n        /* if the testcase has been exhausted, we don't have a match! */\n        if ('\\0' == *testcase_cur) {\n          return 1;\n        }\n      } else {\n        if (*testcase_cur != *filter_cur) {\n          /* test case doesn't match filter */\n          return 1;\n        } else {\n          /* move our filter and testcase forward */\n          testcase_cur++;\n          filter_cur++;\n        }\n      }\n    }\n\n    if (('\\0' != *filter_cur) ||\n        (('\\0' != *testcase_cur) &&\n         ((filter == filter_cur) || ('*' != filter_cur[-1])))) {\n      /* we have a mismatch! */\n      return 1;\n    }\n  }\n\n  return 0;\n}\n\nstatic UTEST_INLINE FILE *utest_fopen(const char *filename, const char *mode) {\n#ifdef _MSC_VER\n  FILE *file;\n  if (0 == fopen_s(&file, filename, mode)) {\n    return file;\n  } else {\n    return UTEST_NULL;\n  }\n#else\n  return fopen(filename, mode);\n#endif\n}\n\nstatic UTEST_INLINE int utest_main(int argc, const char *const argv[]);\nint utest_main(int argc, const char *const argv[]) {\n  utest_uint64_t failed = 0;\n  utest_uint64_t skipped = 0;\n  size_t index = 0;\n  size_t *failed_testcases = UTEST_NULL;\n  size_t failed_testcases_length = 0;\n  size_t *skipped_testcases = UTEST_NULL;\n  size_t skipped_testcases_length = 0;\n  const char *filter = UTEST_NULL;\n  utest_uint64_t ran_tests = 0;\n  int enable_mixed_units = 0;\n  int random_order = 0;\n  utest_uint32_t seed = 0;\n\n  enum colours { RESET, GREEN, RED, YELLOW };\n\n  const int use_colours = UTEST_COLOUR_OUTPUT();\n  const char *colours[] = {\"\\033[0m\", \"\\033[32m\", \"\\033[31m\", \"\\033[33m\"};\n\n  if (!use_colours) {\n    for (index = 0; index < sizeof colours / sizeof colours[0]; index++) {\n      colours[index] = \"\";\n    }\n  }\n  /* loop through all arguments looking for our options */\n  for (index = 1; index < UTEST_CAST(size_t, argc); index++) {\n    /* Informational switches */\n    const char help_str[] = \"--help\";\n    const char list_str[] = \"--list-tests\";\n    /* Test config switches */\n    const char filter_str[] = \"--filter=\";\n    const char output_str[] = \"--output=\";\n    const char enable_mixed_units_str[] = \"--enable-mixed-units\";\n    const char random_order_str[] = \"--random-order\";\n    const char random_order_with_seed_str[] = \"--random-order=\";\n\n    if (0 == UTEST_STRNCMP(argv[index], help_str, strlen(help_str))) {\n      printf(\"utest.h - the single file unit testing solution for C/C++!\\n\"\n             \"Command line Options:\\n\"\n             \"  --help                  Show this message and exit.\\n\"\n             \"  --filter=<filter>       Filter the test cases to run (EG. \"\n             \"MyTest*.a would run MyTestCase.a but not MyTestCase.b).\\n\"\n             \"  --list-tests            List testnames, one per line. Output \"\n             \"names can be passed to --filter.\\n\");\n      printf(\"  --output=<output>       Output an xunit XML file to the file \"\n             \"specified in <output>.\\n\"\n             \"  --enable-mixed-units    Enable the per-test output to contain \"\n             \"mixed units (s/ms/us/ns).\\n\"\n             \"  --random-order[=<seed>] Randomize the order that the tests are \"\n             \"ran in. If the optional <seed> argument is not provided, then a \"\n             \"random starting seed is used.\\n\");\n      goto cleanup;\n    } else if (0 ==\n               UTEST_STRNCMP(argv[index], filter_str, strlen(filter_str))) {\n      /* user wants to filter what test cases run! */\n      filter = argv[index] + strlen(filter_str);\n    } else if (0 ==\n               UTEST_STRNCMP(argv[index], output_str, strlen(output_str))) {\n      utest_state.output = utest_fopen(argv[index] + strlen(output_str), \"w+\");\n    } else if (0 == UTEST_STRNCMP(argv[index], list_str, strlen(list_str))) {\n      for (index = 0; index < utest_state.tests_length; index++) {\n        UTEST_PRINTF(\"%s\\n\", utest_state.tests[index].name);\n      }\n      /* when printing the test list, don't actually run the tests */\n      return 0;\n    } else if (0 == UTEST_STRNCMP(argv[index], enable_mixed_units_str,\n                                  strlen(enable_mixed_units_str))) {\n      enable_mixed_units = 1;\n    } else if (0 == UTEST_STRNCMP(argv[index], random_order_with_seed_str,\n                                  strlen(random_order_with_seed_str))) {\n      seed =\n          UTEST_CAST(utest_uint32_t,\n                     strtoul(argv[index] + strlen(random_order_with_seed_str),\n                             UTEST_NULL, 10));\n      random_order = 1;\n    } else if (0 == UTEST_STRNCMP(argv[index], random_order_str,\n                                  strlen(random_order_str))) {\n      const utest_int64_t ns = utest_ns();\n\n      // Some really poor pseudo-random using the current time. I do this\n      // because I really want to avoid using C's rand() because that'd mean our\n      // random would be affected by any srand() usage by the user (which I\n      // don't want).\n      seed = UTEST_CAST(utest_uint32_t, ns >> 32) * 31 +\n             UTEST_CAST(utest_uint32_t, ns & 0xffffffff);\n      random_order = 1;\n    }\n  }\n\n  if (random_order) {\n    // Use Fisher-Yates with the Durstenfield's version to randomly re-order the\n    // tests.\n    for (index = utest_state.tests_length; index > 1; index--) {\n      // For the random order we'll use PCG.\n      const utest_uint32_t state = seed;\n      const utest_uint32_t word =\n          ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;\n      const utest_uint32_t next =\n          ((word >> 22u) ^ word) % UTEST_CAST(utest_uint32_t, index);\n\n      // Swap the randomly chosen element into the last location.\n      const struct utest_test_state_s copy = utest_state.tests[index - 1];\n      utest_state.tests[index - 1] = utest_state.tests[next];\n      utest_state.tests[next] = copy;\n\n      // Move the seed onwards.\n      seed = seed * 747796405u + 2891336453u;\n    }\n  }\n\n  for (index = 0; index < utest_state.tests_length; index++) {\n    if (utest_should_filter_test(filter, utest_state.tests[index].name)) {\n      continue;\n    }\n\n    ran_tests++;\n  }\n\n  printf(\"%s[==========]%s Running %\" UTEST_PRIu64 \" test cases.\\n\",\n         colours[GREEN], colours[RESET], UTEST_CAST(utest_uint64_t, ran_tests));\n\n  if (utest_state.output) {\n    fprintf(utest_state.output, \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n\");\n    fprintf(utest_state.output,\n            \"<testsuites tests=\\\"%\" UTEST_PRIu64 \"\\\" name=\\\"All\\\">\\n\",\n            UTEST_CAST(utest_uint64_t, ran_tests));\n    fprintf(utest_state.output,\n            \"<testsuite name=\\\"Tests\\\" tests=\\\"%\" UTEST_PRIu64 \"\\\">\\n\",\n            UTEST_CAST(utest_uint64_t, ran_tests));\n  }\n\n  for (index = 0; index < utest_state.tests_length; index++) {\n    int result = UTEST_TEST_PASSED;\n    utest_int64_t ns = 0;\n\n    if (utest_should_filter_test(filter, utest_state.tests[index].name)) {\n      continue;\n    }\n\n    printf(\"%s[ RUN      ]%s %s\\n\", colours[GREEN], colours[RESET],\n           utest_state.tests[index].name);\n\n    if (utest_state.output) {\n      fprintf(utest_state.output, \"<testcase name=\\\"%s\\\">\",\n              utest_state.tests[index].name);\n    }\n\n    ns = utest_ns();\n    errno = 0;\n#if defined(UTEST_HAS_EXCEPTIONS)\n    UTEST_SURPRESS_WARNING_BEGIN\n    try {\n      utest_state.tests[index].func(&result, utest_state.tests[index].index);\n    } catch (const std::exception &err) {\n      printf(\" Exception : %s\\n\", err.what());\n      result = UTEST_TEST_FAILURE;\n    } catch (...) {\n      printf(\" Exception : Unknown\\n\");\n      result = UTEST_TEST_FAILURE;\n    }\n    UTEST_SURPRESS_WARNING_END\n#else\n    utest_state.tests[index].func(&result, utest_state.tests[index].index);\n#endif\n    ns = utest_ns() - ns;\n\n    if (utest_state.output) {\n      fprintf(utest_state.output, \"</testcase>\\n\");\n    }\n\n    // Record the failing test.\n    if (UTEST_TEST_FAILURE == result) {\n      const size_t failed_testcase_index = failed_testcases_length++;\n      failed_testcases = UTEST_PTR_CAST(\n          size_t *, utest_realloc(UTEST_PTR_CAST(void *, failed_testcases),\n                                  sizeof(size_t) * failed_testcases_length));\n      if (UTEST_NULL != failed_testcases) {\n        failed_testcases[failed_testcase_index] = index;\n      }\n      failed++;\n    } else if (UTEST_TEST_SKIPPED == result) {\n      const size_t skipped_testcase_index = skipped_testcases_length++;\n      skipped_testcases = UTEST_PTR_CAST(\n          size_t *, utest_realloc(UTEST_PTR_CAST(void *, skipped_testcases),\n                                  sizeof(size_t) * skipped_testcases_length));\n      if (UTEST_NULL != skipped_testcases) {\n        skipped_testcases[skipped_testcase_index] = index;\n      }\n      skipped++;\n    }\n\n    {\n      const char *const units[] = {\"ns\", \"us\", \"ms\", \"s\", UTEST_NULL};\n      unsigned int unit_index = 0;\n      utest_int64_t time = ns;\n\n      if (enable_mixed_units) {\n        for (unit_index = 0; UTEST_NULL != units[unit_index]; unit_index++) {\n          if (10000 > time) {\n            break;\n          }\n\n          time /= 1000;\n        }\n      }\n\n      if (UTEST_TEST_FAILURE == result) {\n        printf(\"%s[  FAILED  ]%s %s (%\" UTEST_PRId64 \"%s)\\n\", colours[RED],\n               colours[RESET], utest_state.tests[index].name, time,\n               units[unit_index]);\n      } else if (UTEST_TEST_SKIPPED == result) {\n        printf(\"%s[  SKIPPED ]%s %s (%\" UTEST_PRId64 \"%s)\\n\", colours[YELLOW],\n               colours[RESET], utest_state.tests[index].name, time,\n               units[unit_index]);\n      } else {\n        printf(\"%s[       OK ]%s %s (%\" UTEST_PRId64 \"%s)\\n\", colours[GREEN],\n               colours[RESET], utest_state.tests[index].name, time,\n               units[unit_index]);\n      }\n    }\n  }\n\n  printf(\"%s[==========]%s %\" UTEST_PRIu64 \" test cases ran.\\n\", colours[GREEN],\n         colours[RESET], ran_tests);\n  printf(\"%s[  PASSED  ]%s %\" UTEST_PRIu64 \" tests.\\n\", colours[GREEN],\n         colours[RESET], ran_tests - failed - skipped);\n\n  if (0 != skipped) {\n    printf(\"%s[  SKIPPED ]%s %\" UTEST_PRIu64 \" tests, listed below:\\n\",\n           colours[YELLOW], colours[RESET], skipped);\n    for (index = 0; index < skipped_testcases_length; index++) {\n      printf(\"%s[  SKIPPED ]%s %s\\n\", colours[YELLOW], colours[RESET],\n             utest_state.tests[skipped_testcases[index]].name);\n    }\n  }\n\n  if (0 != failed) {\n    printf(\"%s[  FAILED  ]%s %\" UTEST_PRIu64 \" tests, listed below:\\n\",\n           colours[RED], colours[RESET], failed);\n    for (index = 0; index < failed_testcases_length; index++) {\n      printf(\"%s[  FAILED  ]%s %s\\n\", colours[RED], colours[RESET],\n             utest_state.tests[failed_testcases[index]].name);\n    }\n  }\n\n  if (utest_state.output) {\n    fprintf(utest_state.output, \"</testsuite>\\n</testsuites>\\n\");\n  }\n\ncleanup:\n  for (index = 0; index < utest_state.tests_length; index++) {\n    free(UTEST_PTR_CAST(void *, utest_state.tests[index].name));\n  }\n\n  free(UTEST_PTR_CAST(void *, skipped_testcases));\n  free(UTEST_PTR_CAST(void *, failed_testcases));\n  free(UTEST_PTR_CAST(void *, utest_state.tests));\n\n  if (utest_state.output) {\n    fclose(utest_state.output);\n  }\n\n  return UTEST_CAST(int, failed);\n}\n\n#if defined(__clang__)\n#if __has_warning(\"-Wunsafe-buffer-usage\")\n#pragma clang diagnostic pop\n#endif\n#endif\n\n/*\n   we need, in exactly one source file, define the global struct that will hold\n   the data we need to run utest. This macro allows the user to declare the\n   data without having to use the UTEST_MAIN macro, thus allowing them to write\n   their own main() function.\n*/\n#define UTEST_STATE() struct utest_state_s utest_state = {0, 0, 0}\n\n/*\n   define a main() function to call into utest.h and start executing tests! A\n   user can optionally not use this macro, and instead define their own main()\n   function and manually call utest_main. The user must, in exactly one source\n   file, use the UTEST_STATE macro to declare a global struct variable that\n   utest requires.\n*/\n#define UTEST_MAIN()                                                           \\\n  UTEST_STATE();                                                               \\\n  int main(int argc, const char *const argv[]) {                               \\\n    return utest_main(argc, argv);                                             \\\n  }\n\n#endif /* SHEREDOM_UTEST_H_INCLUDED */\n"
  },
  {
    "path": "deps/vma/CHANGELOG.md",
    "content": "# 3.2.1 (2025-02-05)\n\nChanges:\n\n- Fixed an assert in `vmaCreateAllocator` function incorrectly failing when Vulkan version 1.4 is used (#457).\n- Fix for importing function `vkGetPhysicalDeviceMemoryProperties2` / `vkGetPhysicalDeviceMemoryProperties2KHR` when `VMA_DYNAMIC_VULKAN_FUNCTIONS` macro is enabled (#410).\n- Other minor fixes and improvements...\n\n# 3.2.0 (2024-12-30)\n\nAdditions to the library API:\n\n- Added support for Vulkan 1.4.\n- Added support for VK_KHR_external_memory_win32 extension - `VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT` flag, `vmaGetMemoryWin32Handle` function, and a whole new documentation chapter about it (#442).\n\nOther changes:\n\n- Fixed thread safety issue (#451).\n- Many other bug fixes and improvements in the library code, documentation, sample app, Cmake script, mostly to improve compatibility with various compilers and GPUs.\n\n# 3.1.0 (2024-05-27)\n\nThis release gathers fixes and improvements made during many months of continuous development on the main branch, mostly based on issues and pull requests on GitHub.\n\nAdditions to the library API:\n\n- Added convenience functions `vmaCopyMemoryToAllocation`, `vmaCopyAllocationToMemory`.\n- Added functions `vmaCreateAliasingBuffer2`, `vmaCreateAliasingImage2` that offer creating a buffer/image in an existing allocation with additional `allocationLocalOffset`.\n- Added function `vmaGetAllocationInfo2`, structure `VmaAllocationInfo2` that return additional information about an allocation, useful for interop with other APIs (#383, #340).\n- Added callback `VmaDefragmentationInfo::pfnBreakCallback` that allows breaking long execution of `vmaBeginDefragmentation`.\n  Also added `PFN_vmaCheckDefragmentationBreakFunction`, `VmaDefragmentationInfo::pBreakCallbackUserData`.\n- Added support for VK_KHR_maintenance4 extension - `VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT` flag (#397).\n- Added support for VK_KHR_maintenance5 extension - `VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT` flag (#411).\n\nOther changes:\n\n- Changes in debug and configuration macros:\n  - Split macros into separate `VMA_DEBUG_LOG` and `VMA_DEBUG_LOG_FORMAT` (#297).\n  - Added macros `VMA_ASSERT_LEAK`, `VMA_LEAK_LOG_FORMAT` separate from normal `VMA_ASSERT`, `VMA_DEBUG_LOG_FORMAT` (#379, #385).\n  - Added macro `VMA_EXTENDS_VK_STRUCT` (#347).\n- Countless bug fixes and improvements in the code and documentation, mostly to improve compatibility with various compilers and GPUs, including:\n  - Fixed missing `#include` that resulted in compilation error about `snprintf` not declared on some compilers (#312).\n  - Fixed main memory type selection algorithm for GPUs that have no `HOST_CACHED` memory type, like Raspberry Pi (#362).\n- Major changes in Cmake script.\n- Fixes in GpuMemDumpVis.py script.\n\n# 3.0.1 (2022-05-26)\n\n- Fixes in defragmentation algorithm.\n- Fixes in GpuMemDumpVis.py regarding image height calculation.\n- Other bug fixes, optimizations, and improvements in the code and documentation.\n\n# 3.0.0 (2022-03-25)\n\nIt has been a long time since the previous official release, so hopefully everyone has been using the latest code from \"master\" branch, which is always maintained in a good state, not the old version. For completeness, here is the list of changes since v2.3.0. The major version number has changed, so there are some compatibility-breaking changes, but the basic API stays the same and is mostly backward-compatible.\n\nMajor features added (some compatibility-breaking):\n\n- Added new API for selecting preferred memory type: flags `VMA_MEMORY_USAGE_AUTO`, `VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE`, `VMA_MEMORY_USAGE_AUTO_PREFER_HOST`, `VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT`, `VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT`, `VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT`. Old values like `VMA_MEMORY_USAGE_GPU_ONLY` still work as before, for backward compatibility, but are not recommended.\n- Added new defragmentation API and algorithm, replacing the old one. See structure `VmaDefragmentationInfo`, `VmaDefragmentationMove`, `VmaDefragmentationPassMoveInfo`, `VmaDefragmentationStats`, function `vmaBeginDefragmentation`, `vmaEndDefragmentation`, `vmaBeginDefragmentationPass`, `vmaEndDefragmentationPass`.\n- Redesigned API for statistics, replacing the old one. See structures: `VmaStatistics`, `VmaDetailedStatistics`, `VmaTotalStatistics`. `VmaBudget`, functions: `vmaGetHeapBudgets`, `vmaCalculateStatistics`, `vmaGetPoolStatistics`, `vmaCalculatePoolStatistics`, `vmaGetVirtualBlockStatistics`, `vmaCalculateVirtualBlockStatistics`.\n- Added \"Virtual allocator\" feature - possibility to use core allocation algorithms for allocation of custom memory, not necessarily Vulkan device memory. See functions like `vmaCreateVirtualBlock`, `vmaDestroyVirtualBlock` and many more.\n- `VmaAllocation` now keeps both `void* pUserData` and `char* pName`. Added function `vmaSetAllocationName`, member `VmaAllocationInfo::pName`. Flag `VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT` is now deprecated.\n- Clarified and cleaned up various ways of importing Vulkan functions. See macros `VMA_STATIC_VULKAN_FUNCTIONS`, `VMA_DYNAMIC_VULKAN_FUNCTIONS`, structure `VmaVulkanFunctions`. Added members `VmaVulkanFunctions::vkGetInstanceProcAddr`, `vkGetDeviceProcAddr`, which are now required when using `VMA_DYNAMIC_VULKAN_FUNCTIONS`.\n\nRemoved (compatibility-breaking):\n\n- Removed whole \"lost allocations\" feature. Removed from the interface: `VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT`, `VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT`, `vmaCreateLostAllocation`, `vmaMakePoolAllocationsLost`, `vmaTouchAllocation`, `VmaAllocatorCreateInfo::frameInUseCount`, `VmaPoolCreateInfo::frameInUseCount`.\n- Removed whole \"record & replay\" feature. Removed from the API: `VmaAllocatorCreateInfo::pRecordSettings`, `VmaRecordSettings`, `VmaRecordFlagBits`, `VmaRecordFlags`. Removed VmaReplay application.\n- Removed \"buddy\" algorithm - removed flag `VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT`.\n\nMinor but compatibility-breaking changes:\n\n- Changes in `ALLOCATION_CREATE_STRATEGY` flags. Removed flags: `VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT`, `VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT`, `VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT`, which were aliases to other existing flags.\n- Added a member `void* pUserData` to `VmaDeviceMemoryCallbacks`. Updated `PFN_vmaAllocateDeviceMemoryFunction`, `PFN_vmaFreeDeviceMemoryFunction` to use the new `pUserData` member.\n- Removed function `vmaResizeAllocation` that was already deprecated.\n\nOther major changes:\n\n- Added new features to custom pools: support for dedicated allocations, new member `VmaPoolCreateInfo::pMemoryAllocateNext`, `minAllocationAlignment`.\n- Added support for Vulkan 1.2, 1.3.\n- Added support for VK_KHR_buffer_device_address extension - flag `VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT`.\n- Added support for VK_EXT_memory_priority extension - flag `VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT`, members `VmaAllocationCreateInfo::priority`, `VmaPoolCreateInfo::priority`.\n- Added support for VK_AMD_device_coherent_memory extension - flag `VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT`.\n- Added member `VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes`.\n- Added function `vmaGetAllocatorInfo`, structure `VmaAllocatorInfo`.\n- Added functions `vmaFlushAllocations`, `vmaInvalidateAllocations` for multiple allocations at once.\n- Added flag `VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT`.\n- Added function `vmaCreateBufferWithAlignment`.\n- Added convenience function `vmaGetAllocationMemoryProperties`.\n- Added convenience functions: `vmaCreateAliasingBuffer`, `vmaCreateAliasingImage`.\n\nOther minor changes:\n\n- Implemented Two-Level Segregated Fit (TLSF) allocation algorithm, replacing previous default one. It is much faster, especially when freeing many allocations at once or when `bufferImageGranularity` is large.\n- Renamed debug macro `VMA_DEBUG_ALIGNMENT` to `VMA_MIN_ALIGNMENT`.\n- Added CMake support - CMakeLists.txt files. Removed Premake support.\n- Changed `vmaInvalidateAllocation` and `vmaFlushAllocation` to return `VkResult`.\n- Added nullability annotations for Clang: `VMA_NULLABLE`, `VMA_NOT_NULL`, `VMA_NULLABLE_NON_DISPATCHABLE`, `VMA_NOT_NULL_NON_DISPATCHABLE`, `VMA_LEN_IF_NOT_NULL`.\n- JSON dump format has changed.\n- Countless fixes and improvements, including performance optimizations, compatibility with various platforms and compilers, documentation.\n\n# 2.3.0 (2019-12-04)\n\nMajor release after a year of development in \"master\" branch and feature branches. Notable new features: supporting Vulkan 1.1, supporting query for memory budget.\n\nMajor changes:\n\n- Added support for Vulkan 1.1.\n    - Added member `VmaAllocatorCreateInfo::vulkanApiVersion`.\n    - When Vulkan 1.1 is used, there is no need to enable VK_KHR_dedicated_allocation or VK_KHR_bind_memory2 extensions, as they are promoted to Vulkan itself.\n- Added support for query for memory budget and staying within the budget.\n    - Added function `vmaGetBudget`, structure `VmaBudget`. This can also serve as simple statistics, more efficient than `vmaCalculateStats`.\n    - By default the budget it is estimated based on memory heap sizes. It may be queried from the system using VK_EXT_memory_budget extension if you use `VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT` flag and `VmaAllocatorCreateInfo::instance` member.\n    - Added flag `VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT` that fails an allocation if it would exceed the budget.\n- Added new memory usage options:\n    - `VMA_MEMORY_USAGE_CPU_COPY` for memory that is preferably not `DEVICE_LOCAL` but not guaranteed to be `HOST_VISIBLE`.\n    - `VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED` for memory that is `LAZILY_ALLOCATED`.\n- Added support for VK_KHR_bind_memory2 extension:\n    - Added `VMA_ALLOCATION_CREATE_DONT_BIND_BIT` flag that lets you create both buffer/image and allocation, but don't bind them together.\n    - Added flag `VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT`, functions `vmaBindBufferMemory2`, `vmaBindImageMemory2` that let you specify additional local offset and `pNext` pointer while binding.\n- Added functions `vmaSetPoolName`, `vmaGetPoolName` that let you assign string names to custom pools. JSON dump file format and VmaDumpVis tool is updated to show these names.\n- Defragmentation is legal only on buffers and images in `VK_IMAGE_TILING_LINEAR`. This is due to the way it is currently implemented in the library and the restrictions of the Vulkan specification. Clarified documentation in this regard. See discussion in #59.\n\nMinor changes:\n\n- Made `vmaResizeAllocation` function deprecated, always returning failure.\n- Made changes in the internal algorithm for the choice of memory type. Be careful! You may now get a type that is not `HOST_VISIBLE` or `HOST_COHERENT` if it's not stated as always ensured by some `VMA_MEMORY_USAGE_*` flag.\n- Extended VmaReplay application with more detailed statistics printed at the end.\n- Added macros `VMA_CALL_PRE`, `VMA_CALL_POST` that let you decorate declarations of all library functions if you want to e.g. export/import them as dynamically linked library.\n- Optimized `VmaAllocation` objects to be allocated out of an internal free-list allocator. This makes allocation and deallocation causing 0 dynamic CPU heap allocations on average.\n- Updated recording CSV file format version to 1.8, to support new functions.\n- Many additions and fixes in documentation. Many compatibility fixes for various compilers and platforms. Other internal bugfixes, optimizations, updates, refactoring...\n\n# 2.2.0 (2018-12-13)\n\nMajor release after many months of development in \"master\" branch and feature branches. Notable new features: defragmentation of GPU memory, buddy algorithm, convenience functions for sparse binding.\n\nMajor changes:\n\n- New, more powerful defragmentation:\n  - Added structure `VmaDefragmentationInfo2`, functions `vmaDefragmentationBegin`, `vmaDefragmentationEnd`.\n  - Added support for defragmentation of GPU memory.\n  - Defragmentation of CPU memory now uses `memmove`, so it can move data to overlapping regions.\n  - Defragmentation of CPU memory is now available for memory types that are `HOST_VISIBLE` but not `HOST_COHERENT`.\n  - Added structure member `VmaVulkanFunctions::vkCmdCopyBuffer`.\n  - Major internal changes in defragmentation algorithm.\n  - VmaReplay: added parameters: `--DefragmentAfterLine`, `--DefragmentationFlags`.\n  - Old interface (structure `VmaDefragmentationInfo`, function `vmaDefragment`) is now deprecated.\n- Added buddy algorithm, available for custom pools - flag `VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT`.\n- Added convenience functions for multiple allocations and deallocations at once, intended for sparse binding resources - functions `vmaAllocateMemoryPages`, `vmaFreeMemoryPages`.\n- Added function that tries to resize existing allocation in place: `vmaResizeAllocation`.\n- Added flags for allocation strategy: `VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT`, `VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT`, `VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT`, and their aliases: `VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT`, `VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT`, `VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT`.\n\nMinor changes:\n\n- Changed behavior of allocation functions to return `VK_ERROR_VALIDATION_FAILED_EXT` when trying to allocate memory of size 0, create buffer with size 0, or image with one of the dimensions 0.\n- VmaReplay: Added support for Windows end of lines.\n- Updated recording CSV file format version to 1.5, to support new functions.\n- Internal optimization: using read-write mutex on some platforms.\n- Many additions and fixes in documentation. Many compatibility fixes for various compilers. Other internal bugfixes, optimizations, refactoring, added more internal validation...\n\n# 2.1.0 (2018-09-10)\n\nMinor bugfixes.\n\n# 2.1.0-beta.1 (2018-08-27)\n\nMajor release after many months of development in \"development\" branch and features branches. Many new features added, some bugs fixed. API stays backward-compatible.\n\nMajor changes:\n\n- Added linear allocation algorithm, accessible for custom pools, that can be used as free-at-once, stack, double stack, or ring buffer. See \"Linear allocation algorithm\" documentation chapter.\n  - Added `VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT`, `VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT`.\n- Added feature to record sequence of calls to the library to a file and replay it using dedicated application. See documentation chapter \"Record and replay\".\n  - Recording: added `VmaAllocatorCreateInfo::pRecordSettings`.\n  - Replaying: added VmaReplay project.\n  - Recording file format: added document \"docs/Recording file format.md\".\n- Improved support for non-coherent memory.\n  - Added functions: `vmaFlushAllocation`, `vmaInvalidateAllocation`.\n  - `nonCoherentAtomSize` is now respected automatically.\n  - Added `VmaVulkanFunctions::vkFlushMappedMemoryRanges`, `vkInvalidateMappedMemoryRanges`.\n- Improved debug features related to detecting incorrect mapped memory usage. See documentation chapter \"Debugging incorrect memory usage\".\n  - Added debug macro `VMA_DEBUG_DETECT_CORRUPTION`, functions `vmaCheckCorruption`, `vmaCheckPoolCorruption`.\n  - Added debug macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to initialize contents of allocations with a bit pattern.\n  - Changed behavior of `VMA_DEBUG_MARGIN` macro - it now adds margin also before first and after last allocation in a block.\n- Changed format of JSON dump returned by `vmaBuildStatsString` (not backward compatible!).\n  - Custom pools and memory blocks now have IDs that don't change after sorting.\n  - Added properties: \"CreationFrameIndex\", \"LastUseFrameIndex\", \"Usage\".\n  - Changed VmaDumpVis tool to use these new properties for better coloring.\n  - Changed behavior of `vmaGetAllocationInfo` and `vmaTouchAllocation` to update `allocation.lastUseFrameIndex` even if allocation cannot become lost.\n\nMinor changes:\n\n- Changes in custom pools:\n  - Added new structure member `VmaPoolStats::blockCount`.\n  - Changed behavior of `VmaPoolCreateInfo::blockSize` = 0 (default) - it now means that pool may use variable block sizes, just like default pools do.\n- Improved logic of `vmaFindMemoryTypeIndex` for some cases, especially integrated GPUs.\n- VulkanSample application: Removed dependency on external library MathFu. Added own vector and matrix structures.\n- Changes that improve compatibility with various platforms, including: Visual Studio 2012, 32-bit code, C compilers.\n  - Changed usage of \"VK_KHR_dedicated_allocation\" extension in the code to be optional, driven by macro `VMA_DEDICATED_ALLOCATION`, for compatibility with Android.\n- Many additions and fixes in documentation, including description of new features, as well as \"Validation layer warnings\".\n- Other bugfixes.\n\n# 2.0.0 (2018-03-19)\n\nA major release with many compatibility-breaking changes.\n\nNotable new features:\n\n- Introduction of `VmaAllocation` handle that you must retrieve from allocation functions and pass to deallocation functions next to normal `VkBuffer` and `VkImage`.\n- Introduction of `VmaAllocationInfo` structure that you can retrieve from `VmaAllocation` handle to access parameters of the allocation (like `VkDeviceMemory` and offset) instead of retrieving them directly from allocation functions.\n- Support for reference-counted mapping and persistently mapped allocations - see `vmaMapMemory`, `VMA_ALLOCATION_CREATE_MAPPED_BIT`.\n- Support for custom memory pools - see `VmaPool` handle, `VmaPoolCreateInfo` structure, `vmaCreatePool` function.\n- Support for defragmentation (compaction) of allocations - see function `vmaDefragment` and related structures.\n- Support for \"lost allocations\" - see appropriate chapter on documentation Main Page.\n\n# 1.0.1 (2017-07-04)\n\n- Fixes for Linux GCC compilation.\n- Changed \"CONFIGURATION SECTION\" to contain #ifndef so you can define these macros before including this header, not necessarily change them in the file.\n\n# 1.0.0 (2017-06-16)\n\nFirst public release.\n"
  },
  {
    "path": "deps/vma/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.10)\n\nproject(vma)\n\n\nadd_library(vma STATIC\n    ${CMAKE_CURRENT_LIST_DIR}/src/vk_mem_alloc.cpp\n    ${CMAKE_CURRENT_LIST_DIR}/src/vk_mem_alloc.h)\n\nif (CMAKE_CXX_COMPILER_ID MATCHES \"Clang\")\n    target_compile_options(vma PRIVATE -Wno-nullability-completeness)\nendif()\n\ntarget_include_directories(vma PRIVATE ${CMAKE_CURRENT_LIST_DIR}/../vulkan-headers)\ntarget_include_directories(vma PUBLIC\n    ${CMAKE_CURRENT_LIST_DIR}/src)\ntarget_compile_definitions(vma PUBLIC VMA_STATS_STRING_ENABLED=0)\n"
  },
  {
    "path": "deps/vma/LICENSE.txt",
    "content": "Copyright (c) 2017-2025 Advanced Micro Devices, Inc. All rights reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"
  },
  {
    "path": "deps/vma/README.md",
    "content": "# Vulkan Memory Allocator\n\nEasy to integrate Vulkan memory allocation library.\n\n**Documentation:** Browse online: [Vulkan Memory Allocator](https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/) (generated from Doxygen-style comments in [include/vk_mem_alloc.h](include/vk_mem_alloc.h))\n\n**License:** MIT. See [LICENSE.txt](LICENSE.txt)\n\n**Changelog:** See [CHANGELOG.md](CHANGELOG.md)\n\n**Product page:** [Vulkan Memory Allocator on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/)\n\n**Build status:**\n\n- Windows: [![Build status](https://ci.appveyor.com/api/projects/status/4vlcrb0emkaio2pn/branch/master?svg=true)](https://ci.appveyor.com/project/adam-sawicki-amd/vulkanmemoryallocator/branch/master)  \n- Linux: [![Build Status](https://app.travis-ci.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.svg?branch=master)](https://app.travis-ci.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)\n\n[![Average time to resolve an issue](http://isitmaintained.com/badge/resolution/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.svg)](http://isitmaintained.com/project/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator \"Average time to resolve an issue\")\n\n# Problem\n\nMemory allocation and resource (buffer and image) creation in Vulkan is difficult (comparing to older graphics APIs, like D3D11 or OpenGL) for several reasons:\n\n- It requires a lot of boilerplate code, just like everything else in Vulkan, because it is a low-level and high-performance API.\n- There is additional level of indirection: `VkDeviceMemory` is allocated separately from creating `VkBuffer`/`VkImage` and they must be bound together.\n- Driver must be queried for supported memory heaps and memory types. Different GPU vendors provide different types of it.\n- It is recommended to allocate bigger chunks of memory and assign parts of them to particular resources, as there is a limit on maximum number of memory blocks that can be allocated.\n\n# Features\n\nThis library can help game developers to manage memory allocations and resource creation by offering some higher-level functions:\n\n1. Functions that help to choose correct and optimal memory type based on intended usage of the memory.\n   - Required or preferred traits of the memory are expressed using higher-level description comparing to Vulkan flags.\n2. Functions that allocate memory blocks, reserve and return parts of them (`VkDeviceMemory` + offset + size) to the user.\n   - Library keeps track of allocated memory blocks, used and unused ranges inside them, finds best matching unused ranges for new allocations, respects all the rules of alignment and buffer/image granularity.\n3. Functions that can create an image/buffer, allocate memory for it and bind them together - all in one call.\n\nAdditional features:\n\n- Well-documented - description of all functions and structures provided, along with chapters that contain general description and example code.\n- Thread-safety: Library is designed to be used in multithreaded code. Access to a single device memory block referred by different buffers and textures (binding, mapping) is synchronized internally. Memory mapping is reference-counted.\n- Configuration: Fill optional members of `VmaAllocatorCreateInfo` structure to provide custom CPU memory allocator, pointers to Vulkan functions and other parameters.\n- Customization and integration with custom engines: Predefine appropriate macros to provide your own implementation of all external facilities used by the library like assert, mutex, atomic.\n- Support for memory mapping, reference-counted internally. Support for persistently mapped memory: Just allocate with appropriate flag and access the pointer to already mapped memory.\n- Support for non-coherent memory. Functions that flush/invalidate memory. `nonCoherentAtomSize` is respected automatically.\n- Support for resource aliasing (overlap).\n- Support for sparse binding and sparse residency: Convenience functions that allocate or free multiple memory pages at once.\n- Custom memory pools: Create a pool with desired parameters (e.g. fixed or limited maximum size) and allocate memory out of it.\n- Linear allocator: Create a pool with linear algorithm and use it for much faster allocations and deallocations in free-at-once, stack, double stack, or ring buffer fashion.\n- Support for Vulkan 1.0...1.4.\n- Support for extensions (and equivalent functionality included in new Vulkan versions):\n   - VK_KHR_dedicated_allocation: Just enable it and it will be used automatically by the library.\n   - VK_KHR_bind_memory2.\n   - VK_KHR_maintenance4.\n   - VK_KHR_maintenance5, including `VkBufferUsageFlags2CreateInfoKHR`.\n   - VK_EXT_memory_budget: Used internally if available to query for current usage and budget. If not available, it falls back to an estimation based on memory heap sizes.\n   - VK_KHR_buffer_device_address: Flag `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR` is automatically added to memory allocations where needed.\n   - VK_EXT_memory_priority: Set `priority` of allocations or custom pools and it will be set automatically using this extension.\n   - VK_AMD_device_coherent_memory.\n   - VK_KHR_external_memory_win32.\n- Defragmentation of GPU and CPU memory: Let the library move data around to free some memory blocks and make your allocations better compacted.\n- Statistics: Obtain brief or detailed statistics about the amount of memory used, unused, number of allocated blocks, number of allocations etc. - globally, per memory heap, and per memory type.\n- Debug annotations: Associate custom `void* pUserData` and debug `char* pName` with each allocation.\n- JSON dump: Obtain a string in JSON format with detailed map of internal state, including list of allocations, their string names, and gaps between them.\n- Convert this JSON dump into a picture to visualize your memory. See [tools/GpuMemDumpVis](tools/GpuMemDumpVis/README.md).\n- Debugging incorrect memory usage: Enable initialization of all allocated memory with a bit pattern to detect usage of uninitialized or freed memory. Enable validation of a magic number after every allocation to detect out-of-bounds memory corruption.\n- Support for interoperability with OpenGL.\n- Virtual allocator: Interface for using core allocation algorithm to allocate any custom data, e.g. pieces of one large buffer.\n\n# Prerequisites\n\n- Self-contained C++ library in single header file. No external dependencies other than standard C and C++ library and of course Vulkan. Some features of C++14 used. STL containers, RTTI, or C++ exceptions are not used.\n- Public interface in C, in same convention as Vulkan API. Implementation in C++.\n- Error handling implemented by returning `VkResult` error codes - same way as in Vulkan.\n- Interface documented using Doxygen-style comments.\n- Platform-independent, but developed and tested on Windows using Visual Studio. Continuous integration setup for Windows and Linux. Used also on Android, MacOS, and other platforms.\n\n# Example\n\nBasic usage of this library is very simple. Advanced features are optional. After you created global `VmaAllocator` object, a complete code needed to create a buffer may look like this:\n\n```cpp\nVkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nbufferInfo.size = 65536;\nbufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;\n\nVmaAllocationCreateInfo allocInfo = {};\nallocInfo.usage = VMA_MEMORY_USAGE_AUTO;\n\nVkBuffer buffer;\nVmaAllocation allocation;\nvmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);\n```\n\nWith this one function call:\n\n1. `VkBuffer` is created.\n2. `VkDeviceMemory` block is allocated if needed.\n3. An unused region of the memory block is bound to this buffer.\n\n`VmaAllocation` is an object that represents memory assigned to this buffer. It can be queried for parameters like `VkDeviceMemory` handle and offset.\n\n# How to build\n\nOn Windows it is recommended to use [CMake GUI](https://cmake.org/runningcmake/).\n\nAlternatively you can generate/open a Visual Studio from the command line:\n\n```sh\n# By default CMake picks the newest version of Visual Studio it can use\ncmake -S .  -B build -D VMA_BUILD_SAMPLES=ON\ncmake --open build\n```\n\nOn Linux:\n\n```sh\ncmake -S . -B build\n# Since VMA has no source files, you can skip to installation immediately\ncmake --install build --prefix build/install\n```\n\n## How to use\n\nAfter calling either `find_package` or `add_subdirectory` simply link the library.\nThis automatically handles configuring the include directory. Example:\n\n```cmake\nfind_package(VulkanMemoryAllocator CONFIG REQUIRED)\ntarget_link_libraries(YourGameEngine PRIVATE GPUOpen::VulkanMemoryAllocator)\n```\n\nFor more info on using CMake visit the official [CMake documentation](https://cmake.org/cmake/help/latest/index.html).\n\n## Building using vcpkg\n\nYou can download and install VulkanMemoryAllocator using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager:\n\n    git clone https://github.com/Microsoft/vcpkg.git\n    cd vcpkg\n    ./bootstrap-vcpkg.sh\n    ./vcpkg integrate install\n    ./vcpkg install vulkan-memory-allocator\n\nThe VulkanMemoryAllocator port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository.\n\n# Binaries\n\nThe release comes with precompiled binary executable for \"VulkanSample\" application which contains test suite. It is compiled using Visual Studio 2022, so it requires appropriate libraries to work, including \"MSVCP140.dll\", \"VCRUNTIME140.dll\", \"VCRUNTIME140_1.dll\". If the launch fails with error message telling about those files missing, please download and install [Microsoft Visual C++ Redistributable](https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads), \"X64\" version.\n\n# Read more\n\nSee **[Documentation](https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/)**.\n\n# Software using this library\n\n- **[Blender](https://www.blender.org)**\n- **[Qt Project](https://github.com/qt)**\n- **[Baldur's Gate III](https://www.mobygames.com/game/150689/baldurs-gate-iii/credits/windows/?autoplatform=true)**\n- **[Cyberpunk 2077](https://www.mobygames.com/game/128136/cyberpunk-2077/credits/windows/?autoplatform=true)**\n- **[X-Plane](https://x-plane.com/)**\n- **[Detroit: Become Human](https://gpuopen.com/learn/porting-detroit-3/)**\n- **[Vulkan Samples](https://github.com/LunarG/VulkanSamples)** - official Khronos Vulkan samples. License: Apache-style.\n- **[GFXReconstruct](https://github.com/LunarG/gfxreconstruct)** - a tools for the capture and replay of graphics API calls. License: MIT.\n- **[Anvil](https://github.com/GPUOpen-LibrariesAndSDKs/Anvil)** - cross-platform framework for Vulkan. License: MIT.\n- **[Filament](https://github.com/google/filament)** - physically based rendering engine for Android, Windows, Linux and macOS, from Google. Apache License 2.0.\n- **[Atypical Games - proprietary game engine](https://developer.samsung.com/galaxy-gamedev/gamedev-blog/infinitejet.html)**\n- **[Flax Engine](https://flaxengine.com/)**\n- **[Godot Engine](https://github.com/godotengine/godot/)** - multi-platform 2D and 3D game engine. License: MIT.\n- **[Lightweight Java Game Library (LWJGL)](https://www.lwjgl.org/)** - includes binding of the library for Java. License: BSD.\n- **[LightweightVK](https://github.com/corporateshark/lightweightvk)** - lightweight C++ bindless Vulkan 1.3 wrapper. License: MIT.\n- **[PowerVR SDK](https://github.com/powervr-graphics/Native_SDK)** - C++ cross-platform 3D graphics SDK, from Imagination. License: MIT.\n- **[Skia](https://github.com/google/skia)** - complete 2D graphic library for drawing Text, Geometries, and Images, from Google.\n- **[The Forge](https://github.com/ConfettiFX/The-Forge)** - cross-platform rendering framework. Apache License 2.0.\n- **[VK9](https://github.com/disks86/VK9)** - Direct3D 9 compatibility layer using Vulkan. Zlib license.\n- **[vkDOOM3](https://github.com/DustinHLand/vkDOOM3)** - Vulkan port of GPL DOOM 3 BFG Edition. License: GNU GPL.\n- **[vkQuake2](https://github.com/kondrak/vkQuake2)** - vanilla Quake 2 with Vulkan support. License: GNU GPL.\n- **[Vulkan Best Practice for Mobile Developers](https://github.com/ARM-software/vulkan_best_practice_for_mobile_developers)** from ARM. License: MIT.\n- **[RPCS3](https://github.com/RPCS3/rpcs3)** - PlayStation 3 emulator/debugger. License: GNU GPLv2.\n- **[PPSSPP](https://github.com/hrydgard/ppsspp)** - Playstation Portable emulator/debugger. License: GNU GPLv2+.\n- **[Wicked Engine](https://github.com/turanszkij/WickedEngine)** - 3D engine with modern graphics \n\n[Many other projects on GitHub](https://github.com/search?q=AMD_VULKAN_MEMORY_ALLOCATOR_H&type=Code) and some game development studios that use Vulkan in their games.\n\n# See also\n\n- **[D3D12 Memory Allocator](https://github.com/GPUOpen-LibrariesAndSDKs/D3D12MemoryAllocator)** - equivalent library for Direct3D 12. License: MIT.\n- **[Awesome Vulkan](https://github.com/vinjn/awesome-vulkan)** - a curated list of awesome Vulkan libraries, debuggers and resources.\n- **[vcpkg](https://github.com/Microsoft/vcpkg)** dependency manager from Microsoft also offers a port of this library.\n- **[VulkanMemoryAllocator-Hpp](https://github.com/YaaZ/VulkanMemoryAllocator-Hpp)** - C++ binding for this library. License: CC0-1.0.\n- **[PyVMA](https://github.com/realitix/pyvma)** - Python wrapper for this library. Author: Jean-Sébastien B. (@realitix). License: Apache 2.0.\n- **[vk-mem](https://github.com/gwihlidal/vk-mem-rs)** - Rust binding for this library. Author: Graham Wihlidal. License: Apache 2.0 or MIT.\n- **[Haskell bindings](https://hackage.haskell.org/package/VulkanMemoryAllocator)**, **[github](https://github.com/expipiplus1/vulkan/tree/master/VulkanMemoryAllocator)** - Haskell bindings for this library. Author: Ellie Hermaszewska (@expipiplus1). License BSD-3-Clause.\n- **[vma_sample_sdl](https://github.com/rextimmy/vma_sample_sdl)** - SDL port of the sample app of this library (with the goal of running it on multiple platforms, including MacOS). Author: @rextimmy. License: MIT.\n- **[vulkan-malloc](https://github.com/dylanede/vulkan-malloc)** - Vulkan memory allocation library for Rust. Based on version 1 of this library. Author: Dylan Ede (@dylanede). License: MIT / Apache 2.0.\n"
  },
  {
    "path": "deps/vma/src/vk_mem_alloc.cpp",
    "content": "#define VMA_IMPLEMENTATION\n#define WIN32_LEAN_AND_MEAN\n#include \"vk_mem_alloc.h\""
  },
  {
    "path": "deps/vma/src/vk_mem_alloc.h",
    "content": "//\n// Copyright (c) 2017-2025 Advanced Micro Devices, Inc. All rights reserved.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n//\n\n#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H\n#define AMD_VULKAN_MEMORY_ALLOCATOR_H\n\n/** \\mainpage Vulkan Memory Allocator\n\n<b>Version 3.2.1</b>\n\nCopyright (c) 2017-2025 Advanced Micro Devices, Inc. All rights reserved. \\n\nLicense: MIT \\n\nSee also: [product page on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/),\n[repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)\n\n\n<b>API documentation divided into groups:</b> [Topics](topics.html)\n\n<b>General documentation chapters:</b>\n\n- <b>User guide</b>\n  - \\subpage quick_start\n    - [Project setup](@ref quick_start_project_setup)\n    - [Initialization](@ref quick_start_initialization)\n    - [Resource allocation](@ref quick_start_resource_allocation)\n  - \\subpage choosing_memory_type\n    - [Usage](@ref choosing_memory_type_usage)\n    - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)\n    - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)\n    - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)\n    - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations)\n  - \\subpage memory_mapping\n    - [Copy functions](@ref memory_mapping_copy_functions)\n    - [Mapping functions](@ref memory_mapping_mapping_functions)\n    - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)\n    - [Cache flush and invalidate](@ref memory_mapping_cache_control)\n  - \\subpage staying_within_budget\n    - [Querying for budget](@ref staying_within_budget_querying_for_budget)\n    - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage)\n  - \\subpage resource_aliasing\n  - \\subpage custom_memory_pools\n    - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)\n    - [When not to use custom pools](@ref custom_memory_pools_when_not_use)\n    - [Linear allocation algorithm](@ref linear_algorithm)\n      - [Free-at-once](@ref linear_algorithm_free_at_once)\n      - [Stack](@ref linear_algorithm_stack)\n      - [Double stack](@ref linear_algorithm_double_stack)\n      - [Ring buffer](@ref linear_algorithm_ring_buffer)\n  - \\subpage defragmentation\n  - \\subpage statistics\n    - [Numeric statistics](@ref statistics_numeric_statistics)\n    - [JSON dump](@ref statistics_json_dump)\n  - \\subpage allocation_annotation\n    - [Allocation user data](@ref allocation_user_data)\n    - [Allocation names](@ref allocation_names)\n  - \\subpage virtual_allocator\n  - \\subpage debugging_memory_usage\n    - [Memory initialization](@ref debugging_memory_usage_initialization)\n    - [Margins](@ref debugging_memory_usage_margins)\n    - [Corruption detection](@ref debugging_memory_usage_corruption_detection)\n    - [Leak detection features](@ref debugging_memory_usage_leak_detection)\n  - \\subpage other_api_interop\n- \\subpage usage_patterns\n    - [GPU-only resource](@ref usage_patterns_gpu_only)\n    - [Staging copy for upload](@ref usage_patterns_staging_copy_upload)\n    - [Readback](@ref usage_patterns_readback)\n    - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading)\n    - [Other use cases](@ref usage_patterns_other_use_cases)\n- \\subpage configuration\n  - [Pointers to Vulkan functions](@ref config_Vulkan_functions)\n  - [Custom host memory allocator](@ref custom_memory_allocator)\n  - [Device memory allocation callbacks](@ref allocation_callbacks)\n  - [Device heap memory limit](@ref heap_memory_limit)\n- <b>Extension support</b>\n    - \\subpage vk_khr_dedicated_allocation\n    - \\subpage enabling_buffer_device_address\n    - \\subpage vk_ext_memory_priority\n    - \\subpage vk_amd_device_coherent_memory\n    - \\subpage vk_khr_external_memory_win32\n- \\subpage general_considerations\n  - [Thread safety](@ref general_considerations_thread_safety)\n  - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility)\n  - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)\n  - [Allocation algorithm](@ref general_considerations_allocation_algorithm)\n  - [Features not supported](@ref general_considerations_features_not_supported)\n\n\\defgroup group_init Library initialization\n\n\\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object.\n\n\\defgroup group_alloc Memory allocation\n\n\\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images.\nMost basic ones being: vmaCreateBuffer(), vmaCreateImage().\n\n\\defgroup group_virtual Virtual allocator\n\n\\brief API elements related to the mechanism of \\ref virtual_allocator - using the core allocation algorithm\nfor user-defined purpose without allocating any real GPU memory.\n\n\\defgroup group_stats Statistics\n\n\\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format.\nSee documentation chapter: \\ref statistics.\n*/\n\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#if !defined(VULKAN_H_)\n#include <vulkan/vulkan.h>\n#endif\n\n#if !defined(VMA_VULKAN_VERSION)\n    #if defined(VK_VERSION_1_4)\n        #define VMA_VULKAN_VERSION 1004000\n    #elif defined(VK_VERSION_1_3)\n        #define VMA_VULKAN_VERSION 1003000\n    #elif defined(VK_VERSION_1_2)\n        #define VMA_VULKAN_VERSION 1002000\n    #elif defined(VK_VERSION_1_1)\n        #define VMA_VULKAN_VERSION 1001000\n    #else\n        #define VMA_VULKAN_VERSION 1000000\n    #endif\n#endif\n\n#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS\n    extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;\n    extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;\n    extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;\n    extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;\n    extern PFN_vkAllocateMemory vkAllocateMemory;\n    extern PFN_vkFreeMemory vkFreeMemory;\n    extern PFN_vkMapMemory vkMapMemory;\n    extern PFN_vkUnmapMemory vkUnmapMemory;\n    extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;\n    extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;\n    extern PFN_vkBindBufferMemory vkBindBufferMemory;\n    extern PFN_vkBindImageMemory vkBindImageMemory;\n    extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;\n    extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;\n    extern PFN_vkCreateBuffer vkCreateBuffer;\n    extern PFN_vkDestroyBuffer vkDestroyBuffer;\n    extern PFN_vkCreateImage vkCreateImage;\n    extern PFN_vkDestroyImage vkDestroyImage;\n    extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;\n    #if VMA_VULKAN_VERSION >= 1001000\n        extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;\n        extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;\n        extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;\n        extern PFN_vkBindImageMemory2 vkBindImageMemory2;\n        extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;\n    #endif // #if VMA_VULKAN_VERSION >= 1001000\n#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES\n\n#if !defined(VMA_DEDICATED_ALLOCATION)\n    #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation\n        #define VMA_DEDICATED_ALLOCATION 1\n    #else\n        #define VMA_DEDICATED_ALLOCATION 0\n    #endif\n#endif\n\n#if !defined(VMA_BIND_MEMORY2)\n    #if VK_KHR_bind_memory2\n        #define VMA_BIND_MEMORY2 1\n    #else\n        #define VMA_BIND_MEMORY2 0\n    #endif\n#endif\n\n#if !defined(VMA_MEMORY_BUDGET)\n    #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)\n        #define VMA_MEMORY_BUDGET 1\n    #else\n        #define VMA_MEMORY_BUDGET 0\n    #endif\n#endif\n\n// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.\n#if !defined(VMA_BUFFER_DEVICE_ADDRESS)\n    #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000\n        #define VMA_BUFFER_DEVICE_ADDRESS 1\n    #else\n        #define VMA_BUFFER_DEVICE_ADDRESS 0\n    #endif\n#endif\n\n// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.\n#if !defined(VMA_MEMORY_PRIORITY)\n    #if VK_EXT_memory_priority\n        #define VMA_MEMORY_PRIORITY 1\n    #else\n        #define VMA_MEMORY_PRIORITY 0\n    #endif\n#endif\n\n// Defined to 1 when VK_KHR_maintenance4 device extension is defined in Vulkan headers.\n#if !defined(VMA_KHR_MAINTENANCE4)\n    #if VK_KHR_maintenance4\n        #define VMA_KHR_MAINTENANCE4 1\n    #else\n        #define VMA_KHR_MAINTENANCE4 0\n    #endif\n#endif\n\n// Defined to 1 when VK_KHR_maintenance5 device extension is defined in Vulkan headers.\n#if !defined(VMA_KHR_MAINTENANCE5)\n    #if VK_KHR_maintenance5\n        #define VMA_KHR_MAINTENANCE5 1\n    #else\n        #define VMA_KHR_MAINTENANCE5 0\n    #endif\n#endif\n\n\n// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.\n#if !defined(VMA_EXTERNAL_MEMORY)\n    #if VK_KHR_external_memory\n        #define VMA_EXTERNAL_MEMORY 1\n    #else\n        #define VMA_EXTERNAL_MEMORY 0\n    #endif\n#endif\n\n// Defined to 1 when VK_KHR_external_memory_win32 device extension is defined in Vulkan headers.\n#if !defined(VMA_EXTERNAL_MEMORY_WIN32)\n    #if VK_KHR_external_memory_win32\n        #define VMA_EXTERNAL_MEMORY_WIN32 1\n    #else\n        #define VMA_EXTERNAL_MEMORY_WIN32 0\n    #endif\n#endif\n\n// Define these macros to decorate all public functions with additional code,\n// before and after returned type, appropriately. This may be useful for\n// exporting the functions when compiling VMA as a separate library. Example:\n// #define VMA_CALL_PRE  __declspec(dllexport)\n// #define VMA_CALL_POST __cdecl\n#ifndef VMA_CALL_PRE\n    #define VMA_CALL_PRE\n#endif\n#ifndef VMA_CALL_POST\n    #define VMA_CALL_POST\n#endif\n\n// Define this macro to decorate pNext pointers with an attribute specifying the Vulkan\n// structure that will be extended via the pNext chain.\n#ifndef VMA_EXTENDS_VK_STRUCT\n    #define VMA_EXTENDS_VK_STRUCT(vkStruct)\n#endif\n\n// Define this macro to decorate pointers with an attribute specifying the\n// length of the array they point to if they are not null.\n//\n// The length may be one of\n// - The name of another parameter in the argument list where the pointer is declared\n// - The name of another member in the struct where the pointer is declared\n// - The name of a member of a struct type, meaning the value of that member in\n//   the context of the call. For example\n//   VMA_LEN_IF_NOT_NULL(\"VkPhysicalDeviceMemoryProperties::memoryHeapCount\"),\n//   this means the number of memory heaps available in the device associated\n//   with the VmaAllocator being dealt with.\n#ifndef VMA_LEN_IF_NOT_NULL\n    #define VMA_LEN_IF_NOT_NULL(len)\n#endif\n\n// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.\n// see: https://clang.llvm.org/docs/AttributeReference.html#nullable\n#ifndef VMA_NULLABLE\n    #ifdef __clang__\n        #define VMA_NULLABLE _Nullable\n    #else\n        #define VMA_NULLABLE\n    #endif\n#endif\n\n// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.\n// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull\n#ifndef VMA_NOT_NULL\n    #ifdef __clang__\n        #define VMA_NOT_NULL _Nonnull\n    #else\n        #define VMA_NOT_NULL\n    #endif\n#endif\n\n// If non-dispatchable handles are represented as pointers then we can give\n// then nullability annotations\n#ifndef VMA_NOT_NULL_NON_DISPATCHABLE\n    #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)\n        #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL\n    #else\n        #define VMA_NOT_NULL_NON_DISPATCHABLE\n    #endif\n#endif\n\n#ifndef VMA_NULLABLE_NON_DISPATCHABLE\n    #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)\n        #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE\n    #else\n        #define VMA_NULLABLE_NON_DISPATCHABLE\n    #endif\n#endif\n\n#ifndef VMA_STATS_STRING_ENABLED\n    #define VMA_STATS_STRING_ENABLED 1\n#endif\n\n////////////////////////////////////////////////////////////////////////////////\n////////////////////////////////////////////////////////////////////////////////\n//\n//    INTERFACE\n//\n////////////////////////////////////////////////////////////////////////////////\n////////////////////////////////////////////////////////////////////////////////\n\n// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE.\n#ifndef _VMA_ENUM_DECLARATIONS\n\n/**\n\\addtogroup group_init\n@{\n*/\n\n/// Flags for created #VmaAllocator.\ntypedef enum VmaAllocatorCreateFlagBits\n{\n    /** \\brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.\n\n    Using this flag may increase performance because internal mutexes are not used.\n    */\n    VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,\n    /** \\brief Enables usage of VK_KHR_dedicated_allocation extension.\n\n    The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.\n    When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.\n\n    Using this extension will automatically allocate dedicated blocks of memory for\n    some buffers and images instead of suballocating place for them out of bigger\n    memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT\n    flag) when it is recommended by the driver. It may improve performance on some\n    GPUs.\n\n    You may set this flag only if you found out that following device extensions are\n    supported, you enabled them while creating Vulkan device passed as\n    VmaAllocatorCreateInfo::device, and you want them to be used internally by this\n    library:\n\n    - VK_KHR_get_memory_requirements2 (device extension)\n    - VK_KHR_dedicated_allocation (device extension)\n\n    When this flag is set, you can experience following warnings reported by Vulkan\n    validation layer. You can ignore them.\n\n    > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.\n    */\n    VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002,\n    /**\n    Enables usage of VK_KHR_bind_memory2 extension.\n\n    The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.\n    When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.\n\n    You may set this flag only if you found out that this device extension is supported,\n    you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,\n    and you want it to be used internally by this library.\n\n    The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`,\n    which allow to pass a chain of `pNext` structures while binding.\n    This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2().\n    */\n    VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004,\n    /**\n    Enables usage of VK_EXT_memory_budget extension.\n\n    You may set this flag only if you found out that this device extension is supported,\n    you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,\n    and you want it to be used internally by this library, along with another instance extension\n    VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted).\n\n    The extension provides query for current memory usage and budget, which will probably\n    be more accurate than an estimation used by the library otherwise.\n    */\n    VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008,\n    /**\n    Enables usage of VK_AMD_device_coherent_memory extension.\n\n    You may set this flag only if you:\n\n    - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,\n    - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device,\n    - want it to be used internally by this library.\n\n    The extension and accompanying device feature provide access to memory types with\n    `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags.\n    They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR.\n\n    When the extension is not enabled, such memory types are still enumerated, but their usage is illegal.\n    To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type,\n    returning `VK_ERROR_FEATURE_NOT_PRESENT`.\n    */\n    VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010,\n    /**\n    Enables usage of \"buffer device address\" feature, which allows you to use function\n    `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader.\n\n    You may set this flag only if you:\n\n    1. (For Vulkan version < 1.2) Found as available and enabled device extension\n    VK_KHR_buffer_device_address.\n    This extension is promoted to core Vulkan 1.2.\n    2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`.\n\n    When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA.\n    The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to\n    allocated memory blocks wherever it might be needed.\n\n    For more information, see documentation chapter \\ref enabling_buffer_device_address.\n    */\n    VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020,\n    /**\n    Enables usage of VK_EXT_memory_priority extension in the library.\n\n    You may set this flag only if you found available and enabled this device extension,\n    along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`,\n    while creating Vulkan device passed as VmaAllocatorCreateInfo::device.\n\n    When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority\n    are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored.\n\n    A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.\n    Larger values are higher priority. The granularity of the priorities is implementation-dependent.\n    It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`.\n    The value to be used for default priority is 0.5.\n    For more details, see the documentation of the VK_EXT_memory_priority extension.\n    */\n    VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040,\n    /**\n    Enables usage of VK_KHR_maintenance4 extension in the library.\n\n    You may set this flag only if you found available and enabled this device extension,\n    while creating Vulkan device passed as VmaAllocatorCreateInfo::device.\n    */\n    VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT = 0x00000080,\n    /**\n    Enables usage of VK_KHR_maintenance5 extension in the library.\n\n    You should set this flag if you found available and enabled this device extension,\n    while creating Vulkan device passed as VmaAllocatorCreateInfo::device.\n    */\n    VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT = 0x00000100,\n\n    /**\n    Enables usage of VK_KHR_external_memory_win32 extension in the library.\n\n    You should set this flag if you found available and enabled this device extension,\n    while creating Vulkan device passed as VmaAllocatorCreateInfo::device.\n    For more information, see \\ref vk_khr_external_memory_win32.\n    */\n    VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT = 0x00000200,\n\n    VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VmaAllocatorCreateFlagBits;\n/// See #VmaAllocatorCreateFlagBits.\ntypedef VkFlags VmaAllocatorCreateFlags;\n\n/** @} */\n\n/**\n\\addtogroup group_alloc\n@{\n*/\n\n/// \\brief Intended usage of the allocated memory.\ntypedef enum VmaMemoryUsage\n{\n    /** No intended memory usage specified.\n    Use other members of VmaAllocationCreateInfo to specify your requirements.\n    */\n    VMA_MEMORY_USAGE_UNKNOWN = 0,\n    /**\n    \\deprecated Obsolete, preserved for backward compatibility.\n    Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.\n    */\n    VMA_MEMORY_USAGE_GPU_ONLY = 1,\n    /**\n    \\deprecated Obsolete, preserved for backward compatibility.\n    Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`.\n    */\n    VMA_MEMORY_USAGE_CPU_ONLY = 2,\n    /**\n    \\deprecated Obsolete, preserved for backward compatibility.\n    Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.\n    */\n    VMA_MEMORY_USAGE_CPU_TO_GPU = 3,\n    /**\n    \\deprecated Obsolete, preserved for backward compatibility.\n    Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.\n    */\n    VMA_MEMORY_USAGE_GPU_TO_CPU = 4,\n    /**\n    \\deprecated Obsolete, preserved for backward compatibility.\n    Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.\n    */\n    VMA_MEMORY_USAGE_CPU_COPY = 5,\n    /**\n    Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`.\n    Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation.\n\n    Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`.\n\n    Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.\n    */\n    VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6,\n    /**\n    Selects best memory type automatically.\n    This flag is recommended for most common use cases.\n\n    When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),\n    you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT\n    in VmaAllocationCreateInfo::flags.\n\n    It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.\n    vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()\n    and not with generic memory allocation functions.\n    */\n    VMA_MEMORY_USAGE_AUTO = 7,\n    /**\n    Selects best memory type automatically with preference for GPU (device) memory.\n\n    When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),\n    you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT\n    in VmaAllocationCreateInfo::flags.\n\n    It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.\n    vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()\n    and not with generic memory allocation functions.\n    */\n    VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8,\n    /**\n    Selects best memory type automatically with preference for CPU (host) memory.\n\n    When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),\n    you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT\n    in VmaAllocationCreateInfo::flags.\n\n    It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.\n    vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()\n    and not with generic memory allocation functions.\n    */\n    VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9,\n\n    VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF\n} VmaMemoryUsage;\n\n/// Flags to be passed as VmaAllocationCreateInfo::flags.\ntypedef enum VmaAllocationCreateFlagBits\n{\n    /** \\brief Set this flag if the allocation should have its own memory block.\n\n    Use it for special, big resources, like fullscreen images used as attachments.\n\n    If you use this flag while creating a buffer or an image, `VkMemoryDedicatedAllocateInfo`\n    structure is applied if possible.\n    */\n    VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001,\n\n    /** \\brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.\n\n    If new allocation cannot be placed in any of the existing blocks, allocation\n    fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.\n\n    You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and\n    #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.\n    */\n    VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002,\n    /** \\brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.\n\n    Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.\n\n    It is valid to use this flag for allocation made from memory type that is not\n    `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is\n    useful if you need an allocation that is efficient to use on GPU\n    (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that\n    support it (e.g. Intel GPU).\n    */\n    VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004,\n    /** \\deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead.\n\n    Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a\n    null-terminated string. Instead of copying pointer value, a local copy of the\n    string is made and stored in allocation's `pName`. The string is automatically\n    freed together with the allocation. It is also used in vmaBuildStatsString().\n    */\n    VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020,\n    /** Allocation will be created from upper stack in a double stack pool.\n\n    This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.\n    */\n    VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040,\n    /** Create both buffer/image and allocation, but don't bind them together.\n    It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions.\n    The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage().\n    Otherwise it is ignored.\n\n    If you want to make sure the new buffer/image is not tied to the new memory allocation\n    through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block,\n    use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT.\n    */\n    VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080,\n    /** Create allocation only if additional device memory required for it, if any, won't exceed\n    memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.\n    */\n    VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100,\n    /** \\brief Set this flag if the allocated memory will have aliasing resources.\n\n    Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified.\n    Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors.\n    */\n    VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200,\n    /**\n    Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).\n\n    - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,\n      you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.\n    - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.\n      This includes allocations created in \\ref custom_memory_pools.\n\n    Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number,\n    never read or accessed randomly, so a memory type can be selected that is uncached and write-combined.\n\n    \\warning Violating this declaration may work correctly, but will likely be very slow.\n    Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;`\n    Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once.\n    */\n    VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400,\n    /**\n    Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).\n\n    - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,\n      you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.\n    - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.\n      This includes allocations created in \\ref custom_memory_pools.\n\n    Declares that mapped memory can be read, written, and accessed in random order,\n    so a `HOST_CACHED` memory type is preferred.\n    */\n    VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800,\n    /**\n    Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,\n    it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected\n    if it may improve performance.\n\n    By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type\n    (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some \"staging\" buffer and\n    issue an explicit transfer to write/read your data.\n    To prepare for this possibility, don't forget to add appropriate flags like\n    `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image.\n    */\n    VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000,\n    /** Allocation strategy that chooses smallest possible free range for the allocation\n    to minimize memory usage and fragmentation, possibly at the expense of allocation time.\n    */\n    VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000,\n    /** Allocation strategy that chooses first suitable free range for the allocation -\n    not necessarily in terms of the smallest offset but the one that is easiest and fastest to find\n    to minimize allocation time, possibly at the expense of allocation quality.\n    */\n    VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000,\n    /** Allocation strategy that chooses always the lowest offset in available space.\n    This is not the most efficient strategy but achieves highly packed data.\n    Used internally by defragmentation, not recommended in typical usage.\n    */\n    VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT  = 0x00040000,\n    /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT.\n    */\n    VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,\n    /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT.\n    */\n    VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,\n    /** A bit mask to extract only `STRATEGY` bits from entire set of flags.\n    */\n    VMA_ALLOCATION_CREATE_STRATEGY_MASK =\n        VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT |\n        VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT |\n        VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,\n\n    VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VmaAllocationCreateFlagBits;\n/// See #VmaAllocationCreateFlagBits.\ntypedef VkFlags VmaAllocationCreateFlags;\n\n/// Flags to be passed as VmaPoolCreateInfo::flags.\ntypedef enum VmaPoolCreateFlagBits\n{\n    /** \\brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.\n\n    This is an optional optimization flag.\n\n    If you always allocate using vmaCreateBuffer(), vmaCreateImage(),\n    vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator\n    knows exact type of your allocations so it can handle Buffer-Image Granularity\n    in the optimal way.\n\n    If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),\n    exact type of such allocations is not known, so allocator must be conservative\n    in handling Buffer-Image Granularity, which can lead to suboptimal allocation\n    (wasted memory). In that case, if you can make sure you always allocate only\n    buffers and linear images or only optimal images out of this pool, use this flag\n    to make allocator disregard Buffer-Image Granularity and so make allocations\n    faster and more optimal.\n    */\n    VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002,\n\n    /** \\brief Enables alternative, linear allocation algorithm in this pool.\n\n    Specify this flag to enable linear allocation algorithm, which always creates\n    new allocations after last one and doesn't reuse space from allocations freed in\n    between. It trades memory consumption for simplified algorithm and data\n    structure, which has better performance and uses less memory for metadata.\n\n    By using this flag, you can achieve behavior of free-at-once, stack,\n    ring buffer, and double stack.\n    For details, see documentation chapter \\ref linear_algorithm.\n    */\n    VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004,\n\n    /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.\n    */\n    VMA_POOL_CREATE_ALGORITHM_MASK =\n        VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT,\n\n    VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VmaPoolCreateFlagBits;\n/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits.\ntypedef VkFlags VmaPoolCreateFlags;\n\n/// Flags to be passed as VmaDefragmentationInfo::flags.\ntypedef enum VmaDefragmentationFlagBits\n{\n    /* \\brief Use simple but fast algorithm for defragmentation.\n    May not achieve best results but will require least time to compute and least allocations to copy.\n    */\n    VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1,\n    /* \\brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified.\n    Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved.\n    */\n    VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2,\n    /* \\brief Perform full defragmentation of memory.\n    Can result in notably more time to compute and allocations to copy, but will achieve best memory packing.\n    */\n    VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4,\n    /** \\brief Use the most roboust algorithm at the cost of time to compute and number of copies to make.\n    Only available when bufferImageGranularity is greater than 1, since it aims to reduce\n    alignment issues between different types of resources.\n    Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT.\n    */\n    VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8,\n\n    /// A bit mask to extract only `ALGORITHM` bits from entire set of flags.\n    VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK =\n        VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT |\n        VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT |\n        VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT |\n        VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT,\n\n    VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VmaDefragmentationFlagBits;\n/// See #VmaDefragmentationFlagBits.\ntypedef VkFlags VmaDefragmentationFlags;\n\n/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove.\ntypedef enum VmaDefragmentationMoveOperation\n{\n    /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass().\n    VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0,\n    /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged.\n    VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1,\n    /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed.\n    VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2,\n} VmaDefragmentationMoveOperation;\n\n/** @} */\n\n/**\n\\addtogroup group_virtual\n@{\n*/\n\n/// Flags to be passed as VmaVirtualBlockCreateInfo::flags.\ntypedef enum VmaVirtualBlockCreateFlagBits\n{\n    /** \\brief Enables alternative, linear allocation algorithm in this virtual block.\n\n    Specify this flag to enable linear allocation algorithm, which always creates\n    new allocations after last one and doesn't reuse space from allocations freed in\n    between. It trades memory consumption for simplified algorithm and data\n    structure, which has better performance and uses less memory for metadata.\n\n    By using this flag, you can achieve behavior of free-at-once, stack,\n    ring buffer, and double stack.\n    For details, see documentation chapter \\ref linear_algorithm.\n    */\n    VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001,\n\n    /** \\brief Bit mask to extract only `ALGORITHM` bits from entire set of flags.\n    */\n    VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK =\n        VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT,\n\n    VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VmaVirtualBlockCreateFlagBits;\n/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits.\ntypedef VkFlags VmaVirtualBlockCreateFlags;\n\n/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags.\ntypedef enum VmaVirtualAllocationCreateFlagBits\n{\n    /** \\brief Allocation will be created from upper stack in a double stack pool.\n\n    This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag.\n    */\n    VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT,\n    /** \\brief Allocation strategy that tries to minimize memory usage.\n    */\n    VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,\n    /** \\brief Allocation strategy that tries to minimize allocation time.\n    */\n    VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,\n    /** Allocation strategy that chooses always the lowest offset in available space.\n    This is not the most efficient strategy but achieves highly packed data.\n    */\n    VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,\n    /** \\brief A bit mask to extract only `STRATEGY` bits from entire set of flags.\n\n    These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits.\n    */\n    VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK,\n\n    VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VmaVirtualAllocationCreateFlagBits;\n/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits.\ntypedef VkFlags VmaVirtualAllocationCreateFlags;\n\n/** @} */\n\n#endif // _VMA_ENUM_DECLARATIONS\n\n#ifndef _VMA_DATA_TYPES_DECLARATIONS\n\n/**\n\\addtogroup group_init\n@{ */\n\n/** \\struct VmaAllocator\n\\brief Represents main object of this library initialized.\n\nFill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.\nCall function vmaDestroyAllocator() to destroy it.\n\nIt is recommended to create just one object of this type per `VkDevice` object,\nright after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.\n*/\nVK_DEFINE_HANDLE(VmaAllocator)\n\n/** @} */\n\n/**\n\\addtogroup group_alloc\n@{\n*/\n\n/** \\struct VmaPool\n\\brief Represents custom memory pool\n\nFill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.\nCall function vmaDestroyPool() to destroy it.\n\nFor more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).\n*/\nVK_DEFINE_HANDLE(VmaPool)\n\n/** \\struct VmaAllocation\n\\brief Represents single memory allocation.\n\nIt may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type\nplus unique offset.\n\nThere are multiple ways to create such object.\nYou need to fill structure VmaAllocationCreateInfo.\nFor more information see [Choosing memory type](@ref choosing_memory_type).\n\nAlthough the library provides convenience functions that create Vulkan buffer or image,\nallocate memory for it and bind them together,\nbinding of the allocation to a buffer or an image is out of scope of the allocation itself.\nAllocation object can exist without buffer/image bound,\nbinding can be done manually by the user, and destruction of it can be done\nindependently of destruction of the allocation.\n\nThe object also remembers its size and some other information.\nTo retrieve this information, use function vmaGetAllocationInfo() and inspect\nreturned structure VmaAllocationInfo.\n*/\nVK_DEFINE_HANDLE(VmaAllocation)\n\n/** \\struct VmaDefragmentationContext\n\\brief An opaque object that represents started defragmentation process.\n\nFill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it.\nCall function vmaEndDefragmentation() to destroy it.\n*/\nVK_DEFINE_HANDLE(VmaDefragmentationContext)\n\n/** @} */\n\n/**\n\\addtogroup group_virtual\n@{\n*/\n\n/** \\struct VmaVirtualAllocation\n\\brief Represents single memory allocation done inside VmaVirtualBlock.\n\nUse it as a unique identifier to virtual allocation within the single block.\n\nUse value `VK_NULL_HANDLE` to represent a null/invalid allocation.\n*/\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation)\n\n/** @} */\n\n/**\n\\addtogroup group_virtual\n@{\n*/\n\n/** \\struct VmaVirtualBlock\n\\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory.\n\nFill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it.\nFor more information, see documentation chapter \\ref virtual_allocator.\n\nThis object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally.\n*/\nVK_DEFINE_HANDLE(VmaVirtualBlock)\n\n/** @} */\n\n/**\n\\addtogroup group_init\n@{\n*/\n\n/// Callback function called after successful vkAllocateMemory.\ntypedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)(\n    VmaAllocator VMA_NOT_NULL                    allocator,\n    uint32_t                                     memoryType,\n    VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,\n    VkDeviceSize                                 size,\n    void* VMA_NULLABLE                           pUserData);\n\n/// Callback function called before vkFreeMemory.\ntypedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)(\n    VmaAllocator VMA_NOT_NULL                    allocator,\n    uint32_t                                     memoryType,\n    VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,\n    VkDeviceSize                                 size,\n    void* VMA_NULLABLE                           pUserData);\n\n/** \\brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.\n\nProvided for informative purpose, e.g. to gather statistics about number of\nallocations or total amount of memory allocated in Vulkan.\n\nUsed in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.\n*/\ntypedef struct VmaDeviceMemoryCallbacks\n{\n    /// Optional, can be null.\n    PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate;\n    /// Optional, can be null.\n    PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree;\n    /// Optional, can be null.\n    void* VMA_NULLABLE pUserData;\n} VmaDeviceMemoryCallbacks;\n\n/** \\brief Pointers to some Vulkan functions - a subset used by the library.\n\nUsed in VmaAllocatorCreateInfo::pVulkanFunctions.\n*/\ntypedef struct VmaVulkanFunctions\n{\n    /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.\n    PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr;\n    /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.\n    PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr;\n    PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;\n    PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;\n    PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;\n    PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;\n    PFN_vkMapMemory VMA_NULLABLE vkMapMemory;\n    PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;\n    PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;\n    PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;\n    PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;\n    PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;\n    PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;\n    PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;\n    PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;\n    PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;\n    PFN_vkCreateImage VMA_NULLABLE vkCreateImage;\n    PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;\n    PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;\n#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000\n    /// Fetch \"vkGetBufferMemoryRequirements2\" on Vulkan >= 1.1, fetch \"vkGetBufferMemoryRequirements2KHR\" when using VK_KHR_dedicated_allocation extension.\n    PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;\n    /// Fetch \"vkGetImageMemoryRequirements2\" on Vulkan >= 1.1, fetch \"vkGetImageMemoryRequirements2KHR\" when using VK_KHR_dedicated_allocation extension.\n    PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;\n#endif\n#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000\n    /// Fetch \"vkBindBufferMemory2\" on Vulkan >= 1.1, fetch \"vkBindBufferMemory2KHR\" when using VK_KHR_bind_memory2 extension.\n    PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;\n    /// Fetch \"vkBindImageMemory2\" on Vulkan >= 1.1, fetch \"vkBindImageMemory2KHR\" when using VK_KHR_bind_memory2 extension.\n    PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;\n#endif\n#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000\n    /// Fetch from \"vkGetPhysicalDeviceMemoryProperties2\" on Vulkan >= 1.1, but you can also fetch it from \"vkGetPhysicalDeviceMemoryProperties2KHR\" if you enabled extension VK_KHR_get_physical_device_properties2.\n    PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;\n#endif\n#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000\n    /// Fetch from \"vkGetDeviceBufferMemoryRequirements\" on Vulkan >= 1.3, but you can also fetch it from \"vkGetDeviceBufferMemoryRequirementsKHR\" if you enabled extension VK_KHR_maintenance4.\n    PFN_vkGetDeviceBufferMemoryRequirementsKHR VMA_NULLABLE vkGetDeviceBufferMemoryRequirements;\n    /// Fetch from \"vkGetDeviceImageMemoryRequirements\" on Vulkan >= 1.3, but you can also fetch it from \"vkGetDeviceImageMemoryRequirementsKHR\" if you enabled extension VK_KHR_maintenance4.\n    PFN_vkGetDeviceImageMemoryRequirementsKHR VMA_NULLABLE vkGetDeviceImageMemoryRequirements;\n#endif\n#if VMA_EXTERNAL_MEMORY_WIN32\n    PFN_vkGetMemoryWin32HandleKHR VMA_NULLABLE vkGetMemoryWin32HandleKHR;\n#else\n    void* VMA_NULLABLE vkGetMemoryWin32HandleKHR;\n#endif\n} VmaVulkanFunctions;\n\n/// Description of a Allocator to be created.\ntypedef struct VmaAllocatorCreateInfo\n{\n    /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.\n    VmaAllocatorCreateFlags flags;\n    /// Vulkan physical device.\n    /** It must be valid throughout whole lifetime of created allocator. */\n    VkPhysicalDevice VMA_NOT_NULL physicalDevice;\n    /// Vulkan device.\n    /** It must be valid throughout whole lifetime of created allocator. */\n    VkDevice VMA_NOT_NULL device;\n    /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.\n    /** Set to 0 to use default, which is currently 256 MiB. */\n    VkDeviceSize preferredLargeHeapBlockSize;\n    /// Custom CPU memory allocation callbacks. Optional.\n    /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */\n    const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;\n    /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.\n    /** Optional, can be null. */\n    const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks;\n    /** \\brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.\n\n    If not NULL, it must be a pointer to an array of\n    `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on\n    maximum number of bytes that can be allocated out of particular Vulkan memory\n    heap.\n\n    Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that\n    heap. This is also the default in case of `pHeapSizeLimit` = NULL.\n\n    If there is a limit defined for a heap:\n\n    - If user tries to allocate more memory from that heap using this allocator,\n      the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.\n    - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the\n      value of this limit will be reported instead when using vmaGetMemoryProperties().\n\n    Warning! Using this feature may not be equivalent to installing a GPU with\n    smaller amount of memory, because graphics driver doesn't necessary fail new\n    allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is\n    exceeded. It may return success and just silently migrate some device memory\n    blocks to system RAM. This driver behavior can also be controlled using\n    VK_AMD_memory_overallocation_behavior extension.\n    */\n    const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(\"VkPhysicalDeviceMemoryProperties::memoryHeapCount\") pHeapSizeLimit;\n\n    /** \\brief Pointers to Vulkan functions. Can be null.\n\n    For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions).\n    */\n    const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions;\n    /** \\brief Handle to Vulkan instance object.\n\n    Starting from version 3.0.0 this member is no longer optional, it must be set!\n    */\n    VkInstance VMA_NOT_NULL instance;\n    /** \\brief Optional. Vulkan version that the application uses.\n\n    It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`.\n    The patch version number specified is ignored. Only the major and minor versions are considered.\n    Only versions 1.0...1.4 are supported by the current implementation.\n    Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`.\n    It must match the Vulkan version used by the application and supported on the selected physical device,\n    so it must be no higher than `VkApplicationInfo::apiVersion` passed to `vkCreateInstance`\n    and no higher than `VkPhysicalDeviceProperties::apiVersion` found on the physical device used.\n    */\n    uint32_t vulkanApiVersion;\n#if VMA_EXTERNAL_MEMORY\n    /** \\brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type.\n\n    If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount`\n    elements, defining external memory handle types of particular Vulkan memory type,\n    to be passed using `VkExportMemoryAllocateInfoKHR`.\n\n    Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type.\n    This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL.\n    */\n    const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(\"VkPhysicalDeviceMemoryProperties::memoryTypeCount\") pTypeExternalMemoryHandleTypes;\n#endif // #if VMA_EXTERNAL_MEMORY\n} VmaAllocatorCreateInfo;\n\n/// Information about existing #VmaAllocator object.\ntypedef struct VmaAllocatorInfo\n{\n    /** \\brief Handle to Vulkan instance object.\n\n    This is the same value as has been passed through VmaAllocatorCreateInfo::instance.\n    */\n    VkInstance VMA_NOT_NULL instance;\n    /** \\brief Handle to Vulkan physical device object.\n\n    This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice.\n    */\n    VkPhysicalDevice VMA_NOT_NULL physicalDevice;\n    /** \\brief Handle to Vulkan device object.\n\n    This is the same value as has been passed through VmaAllocatorCreateInfo::device.\n    */\n    VkDevice VMA_NOT_NULL device;\n} VmaAllocatorInfo;\n\n/** @} */\n\n/**\n\\addtogroup group_stats\n@{\n*/\n\n/** \\brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total.\n\nThese are fast to calculate.\nSee functions: vmaGetHeapBudgets(), vmaGetPoolStatistics().\n*/\ntypedef struct VmaStatistics\n{\n    /** \\brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated.\n    */\n    uint32_t blockCount;\n    /** \\brief Number of #VmaAllocation objects allocated.\n\n    Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`.\n    */\n    uint32_t allocationCount;\n    /** \\brief Number of bytes allocated in `VkDeviceMemory` blocks.\n\n    \\note To avoid confusion, please be aware that what Vulkan calls an \"allocation\" - a whole `VkDeviceMemory` object\n    (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a \"block\" in VMA, while VMA calls\n    \"allocation\" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image.\n    */\n    VkDeviceSize blockBytes;\n    /** \\brief Total number of bytes occupied by all #VmaAllocation objects.\n\n    Always less or equal than `blockBytes`.\n    Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan\n    but unused by any #VmaAllocation.\n    */\n    VkDeviceSize allocationBytes;\n} VmaStatistics;\n\n/** \\brief More detailed statistics than #VmaStatistics.\n\nThese are slower to calculate. Use for debugging purposes.\nSee functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics().\n\nPrevious version of the statistics API provided averages, but they have been removed\nbecause they can be easily calculated as:\n\n\\code\nVkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount;\nVkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes;\nVkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount;\n\\endcode\n*/\ntypedef struct VmaDetailedStatistics\n{\n    /// Basic statistics.\n    VmaStatistics statistics;\n    /// Number of free ranges of memory between allocations.\n    uint32_t unusedRangeCount;\n    /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations.\n    VkDeviceSize allocationSizeMin;\n    /// Largest allocation size. 0 if there are 0 allocations.\n    VkDeviceSize allocationSizeMax;\n    /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges.\n    VkDeviceSize unusedRangeSizeMin;\n    /// Largest empty range size. 0 if there are 0 empty ranges.\n    VkDeviceSize unusedRangeSizeMax;\n} VmaDetailedStatistics;\n\n/** \\brief  General statistics from current state of the Allocator -\ntotal memory usage across all memory heaps and types.\n\nThese are slower to calculate. Use for debugging purposes.\nSee function vmaCalculateStatistics().\n*/\ntypedef struct VmaTotalStatistics\n{\n    VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES];\n    VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS];\n    VmaDetailedStatistics total;\n} VmaTotalStatistics;\n\n/** \\brief Statistics of current memory usage and available budget for a specific memory heap.\n\nThese are fast to calculate.\nSee function vmaGetHeapBudgets().\n*/\ntypedef struct VmaBudget\n{\n    /** \\brief Statistics fetched from the library.\n    */\n    VmaStatistics statistics;\n    /** \\brief Estimated current memory usage of the program, in bytes.\n\n    Fetched from system using VK_EXT_memory_budget extension if enabled.\n\n    It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects\n    also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or\n    `VkDeviceMemory` blocks allocated outside of this library, if any.\n    */\n    VkDeviceSize usage;\n    /** \\brief Estimated amount of memory available to the program, in bytes.\n\n    Fetched from system using VK_EXT_memory_budget extension if enabled.\n\n    It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors\n    external to the program, decided by the operating system.\n    Difference `budget - usage` is the amount of additional memory that can probably\n    be allocated without problems. Exceeding the budget may result in various problems.\n    */\n    VkDeviceSize budget;\n} VmaBudget;\n\n/** @} */\n\n/**\n\\addtogroup group_alloc\n@{\n*/\n\n/** \\brief Parameters of new #VmaAllocation.\n\nTo be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others.\n*/\ntypedef struct VmaAllocationCreateInfo\n{\n    /// Use #VmaAllocationCreateFlagBits enum.\n    VmaAllocationCreateFlags flags;\n    /** \\brief Intended usage of memory.\n\n    You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \\n\n    If `pool` is not null, this member is ignored.\n    */\n    VmaMemoryUsage usage;\n    /** \\brief Flags that must be set in a Memory Type chosen for an allocation.\n\n    Leave 0 if you specify memory requirements in other way. \\n\n    If `pool` is not null, this member is ignored.*/\n    VkMemoryPropertyFlags requiredFlags;\n    /** \\brief Flags that preferably should be set in a memory type chosen for an allocation.\n\n    Set to 0 if no additional flags are preferred. \\n\n    If `pool` is not null, this member is ignored. */\n    VkMemoryPropertyFlags preferredFlags;\n    /** \\brief Bitmask containing one bit set for every memory type acceptable for this allocation.\n\n    Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if\n    it meets other requirements specified by this structure, with no further\n    restrictions on memory type index. \\n\n    If `pool` is not null, this member is ignored.\n    */\n    uint32_t memoryTypeBits;\n    /** \\brief Pool that this allocation should be created in.\n\n    Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:\n    `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.\n    */\n    VmaPool VMA_NULLABLE pool;\n    /** \\brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().\n\n    If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either\n    null or pointer to a null-terminated string. The string will be then copied to\n    internal buffer, so it doesn't need to be valid after allocation call.\n    */\n    void* VMA_NULLABLE pUserData;\n    /** \\brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.\n\n    It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object\n    and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.\n    Otherwise, it has the priority of a memory block where it is placed and this variable is ignored.\n    */\n    float priority;\n} VmaAllocationCreateInfo;\n\n/// Describes parameter of created #VmaPool.\ntypedef struct VmaPoolCreateInfo\n{\n    /** \\brief Vulkan memory type index to allocate this pool from.\n    */\n    uint32_t memoryTypeIndex;\n    /** \\brief Use combination of #VmaPoolCreateFlagBits.\n    */\n    VmaPoolCreateFlags flags;\n    /** \\brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.\n\n    Specify nonzero to set explicit, constant size of memory blocks used by this\n    pool.\n\n    Leave 0 to use default and let the library manage block sizes automatically.\n    Sizes of particular blocks may vary.\n    In this case, the pool will also support dedicated allocations.\n    */\n    VkDeviceSize blockSize;\n    /** \\brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.\n\n    Set to 0 to have no preallocated blocks and allow the pool be completely empty.\n    */\n    size_t minBlockCount;\n    /** \\brief Maximum number of blocks that can be allocated in this pool. Optional.\n\n    Set to 0 to use default, which is `SIZE_MAX`, which means no limit.\n\n    Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated\n    throughout whole lifetime of this pool.\n    */\n    size_t maxBlockCount;\n    /** \\brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations.\n\n    It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object.\n    Otherwise, this variable is ignored.\n    */\n    float priority;\n    /** \\brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0.\n\n    Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two.\n    It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough,\n    e.g. when doing interop with OpenGL.\n    */\n    VkDeviceSize minAllocationAlignment;\n    /** \\brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional.\n\n    Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`.\n    It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`.\n    Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool.\n\n    Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`,\n    can be attached automatically by this library when using other, more convenient of its features.\n    */\n    void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext;\n} VmaPoolCreateInfo;\n\n/** @} */\n\n/**\n\\addtogroup group_alloc\n@{\n*/\n\n/**\nParameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().\n\nThere is also an extended version of this structure that carries additional parameters: #VmaAllocationInfo2.\n*/\ntypedef struct VmaAllocationInfo\n{\n    /** \\brief Memory type index that this allocation was allocated from.\n\n    It never changes.\n    */\n    uint32_t memoryType;\n    /** \\brief Handle to Vulkan memory object.\n\n    Same memory object can be shared by multiple allocations.\n\n    It can change after the allocation is moved during \\ref defragmentation.\n    */\n    VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;\n    /** \\brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation.\n\n    You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function\n    vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image,\n    not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation\n    and apply this offset automatically.\n\n    It can change after the allocation is moved during \\ref defragmentation.\n    */\n    VkDeviceSize offset;\n    /** \\brief Size of this allocation, in bytes.\n\n    It never changes.\n\n    \\note Allocation size returned in this variable may be greater than the size\n    requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the\n    allocation is accessible for operations on memory e.g. using a pointer after\n    mapping with vmaMapMemory(), but operations on the resource e.g. using\n    `vkCmdCopyBuffer` must be limited to the size of the resource.\n    */\n    VkDeviceSize size;\n    /** \\brief Pointer to the beginning of this allocation as mapped data.\n\n    If the allocation hasn't been mapped using vmaMapMemory() and hasn't been\n    created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null.\n\n    It can change after call to vmaMapMemory(), vmaUnmapMemory().\n    It can also change after the allocation is moved during \\ref defragmentation.\n    */\n    void* VMA_NULLABLE pMappedData;\n    /** \\brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().\n\n    It can change after call to vmaSetAllocationUserData() for this allocation.\n    */\n    void* VMA_NULLABLE pUserData;\n    /** \\brief Custom allocation name that was set with vmaSetAllocationName().\n\n    It can change after call to vmaSetAllocationName() for this allocation.\n\n    Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with\n    additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED].\n    */\n    const char* VMA_NULLABLE pName;\n} VmaAllocationInfo;\n\n/// Extended parameters of a #VmaAllocation object that can be retrieved using function vmaGetAllocationInfo2().\ntypedef struct VmaAllocationInfo2\n{\n    /** \\brief Basic parameters of the allocation.\n    \n    If you need only these, you can use function vmaGetAllocationInfo() and structure #VmaAllocationInfo instead.\n    */\n    VmaAllocationInfo allocationInfo;\n    /** \\brief Size of the `VkDeviceMemory` block that the allocation belongs to.\n    \n    In case of an allocation with dedicated memory, it will be equal to `allocationInfo.size`.\n    */\n    VkDeviceSize blockSize;\n    /** \\brief `VK_TRUE` if the allocation has dedicated memory, `VK_FALSE` if it was placed as part of a larger memory block.\n    \n    When `VK_TRUE`, it also means `VkMemoryDedicatedAllocateInfo` was used when creating the allocation\n    (if VK_KHR_dedicated_allocation extension or Vulkan version >= 1.1 is enabled).\n    */\n    VkBool32 dedicatedMemory;\n} VmaAllocationInfo2;\n\n/** Callback function called during vmaBeginDefragmentation() to check custom criterion about ending current defragmentation pass.\n\nShould return true if the defragmentation needs to stop current pass.\n*/\ntypedef VkBool32 (VKAPI_PTR* PFN_vmaCheckDefragmentationBreakFunction)(void* VMA_NULLABLE pUserData);\n\n/** \\brief Parameters for defragmentation.\n\nTo be used with function vmaBeginDefragmentation().\n*/\ntypedef struct VmaDefragmentationInfo\n{\n    /// \\brief Use combination of #VmaDefragmentationFlagBits.\n    VmaDefragmentationFlags flags;\n    /** \\brief Custom pool to be defragmented.\n\n    If null then default pools will undergo defragmentation process.\n    */\n    VmaPool VMA_NULLABLE pool;\n    /** \\brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places.\n\n    `0` means no limit.\n    */\n    VkDeviceSize maxBytesPerPass;\n    /** \\brief Maximum number of allocations that can be moved during single pass to a different place.\n\n    `0` means no limit.\n    */\n    uint32_t maxAllocationsPerPass;\n    /** \\brief Optional custom callback for stopping vmaBeginDefragmentation().\n\n    Have to return true for breaking current defragmentation pass.\n    */\n    PFN_vmaCheckDefragmentationBreakFunction VMA_NULLABLE pfnBreakCallback;\n    /// \\brief Optional data to pass to custom callback for stopping pass of defragmentation.\n    void* VMA_NULLABLE pBreakCallbackUserData;\n} VmaDefragmentationInfo;\n\n/// Single move of an allocation to be done for defragmentation.\ntypedef struct VmaDefragmentationMove\n{\n    /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it.\n    VmaDefragmentationMoveOperation operation;\n    /// Allocation that should be moved.\n    VmaAllocation VMA_NOT_NULL srcAllocation;\n    /** \\brief Temporary allocation pointing to destination memory that will replace `srcAllocation`.\n\n    \\warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass,\n    to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory().\n    vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory.\n    */\n    VmaAllocation VMA_NOT_NULL dstTmpAllocation;\n} VmaDefragmentationMove;\n\n/** \\brief Parameters for incremental defragmentation steps.\n\nTo be used with function vmaBeginDefragmentationPass().\n*/\ntypedef struct VmaDefragmentationPassMoveInfo\n{\n    /// Number of elements in the `pMoves` array.\n    uint32_t moveCount;\n    /** \\brief Array of moves to be performed by the user in the current defragmentation pass.\n\n    Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass().\n\n    For each element, you should:\n\n    1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset.\n    2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`.\n    3. Make sure these commands finished executing on the GPU.\n    4. Destroy the old buffer/image.\n\n    Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass().\n    After this call, the allocation will point to the new place in memory.\n\n    Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.\n\n    Alternatively, if you decide you want to completely remove the allocation:\n\n    1. Destroy its buffer/image.\n    2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.\n\n    Then, after vmaEndDefragmentationPass() the allocation will be freed.\n    */\n    VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves;\n} VmaDefragmentationPassMoveInfo;\n\n/// Statistics returned for defragmentation process in function vmaEndDefragmentation().\ntypedef struct VmaDefragmentationStats\n{\n    /// Total number of bytes that have been copied while moving allocations to different places.\n    VkDeviceSize bytesMoved;\n    /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.\n    VkDeviceSize bytesFreed;\n    /// Number of allocations that have been moved to different places.\n    uint32_t allocationsMoved;\n    /// Number of empty `VkDeviceMemory` objects that have been released to the system.\n    uint32_t deviceMemoryBlocksFreed;\n} VmaDefragmentationStats;\n\n/** @} */\n\n/**\n\\addtogroup group_virtual\n@{\n*/\n\n/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock().\ntypedef struct VmaVirtualBlockCreateInfo\n{\n    /** \\brief Total size of the virtual block.\n\n    Sizes can be expressed in bytes or any units you want as long as you are consistent in using them.\n    For example, if you allocate from some array of structures, 1 can mean single instance of entire structure.\n    */\n    VkDeviceSize size;\n\n    /** \\brief Use combination of #VmaVirtualBlockCreateFlagBits.\n    */\n    VmaVirtualBlockCreateFlags flags;\n\n    /** \\brief Custom CPU memory allocation callbacks. Optional.\n\n    Optional, can be null. When specified, they will be used for all CPU-side memory allocations.\n    */\n    const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;\n} VmaVirtualBlockCreateInfo;\n\n/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate().\ntypedef struct VmaVirtualAllocationCreateInfo\n{\n    /** \\brief Size of the allocation.\n\n    Cannot be zero.\n    */\n    VkDeviceSize size;\n    /** \\brief Required alignment of the allocation. Optional.\n\n    Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset.\n    */\n    VkDeviceSize alignment;\n    /** \\brief Use combination of #VmaVirtualAllocationCreateFlagBits.\n    */\n    VmaVirtualAllocationCreateFlags flags;\n    /** \\brief Custom pointer to be associated with the allocation. Optional.\n\n    It can be any value and can be used for user-defined purposes. It can be fetched or changed later.\n    */\n    void* VMA_NULLABLE pUserData;\n} VmaVirtualAllocationCreateInfo;\n\n/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo().\ntypedef struct VmaVirtualAllocationInfo\n{\n    /** \\brief Offset of the allocation.\n\n    Offset at which the allocation was made.\n    */\n    VkDeviceSize offset;\n    /** \\brief Size of the allocation.\n\n    Same value as passed in VmaVirtualAllocationCreateInfo::size.\n    */\n    VkDeviceSize size;\n    /** \\brief Custom pointer associated with the allocation.\n\n    Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData().\n    */\n    void* VMA_NULLABLE pUserData;\n} VmaVirtualAllocationInfo;\n\n/** @} */\n\n#endif // _VMA_DATA_TYPES_DECLARATIONS\n\n#ifndef _VMA_FUNCTION_HEADERS\n\n/**\n\\addtogroup group_init\n@{\n*/\n\n/// Creates #VmaAllocator object.\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(\n    const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,\n    VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator);\n\n/// Destroys allocator object.\nVMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(\n    VmaAllocator VMA_NULLABLE allocator);\n\n/** \\brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc.\n\nIt might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to\n`VkPhysicalDevice`, `VkDevice` etc. every time using this function.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);\n\n/**\nPhysicalDeviceProperties are fetched from physicalDevice by the allocator.\nYou can access it here, without fetching it again on your own.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(\n    VmaAllocator VMA_NOT_NULL allocator,\n    const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties);\n\n/**\nPhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.\nYou can access it here, without fetching it again on your own.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(\n    VmaAllocator VMA_NOT_NULL allocator,\n    const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);\n\n/**\n\\brief Given Memory Type Index, returns Property Flags of this memory type.\n\nThis is just a convenience function. Same information can be obtained using\nvmaGetMemoryProperties().\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(\n    VmaAllocator VMA_NOT_NULL allocator,\n    uint32_t memoryTypeIndex,\n    VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);\n\n/** \\brief Sets index of the current frame.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(\n    VmaAllocator VMA_NOT_NULL allocator,\n    uint32_t frameIndex);\n\n/** @} */\n\n/**\n\\addtogroup group_stats\n@{\n*/\n\n/** \\brief Retrieves statistics from current state of the Allocator.\n\nThis function is called \"calculate\" not \"get\" because it has to traverse all\ninternal data structures, so it may be quite slow. Use it for debugging purposes.\nFor faster but more brief statistics suitable to be called every frame or every allocation,\nuse vmaGetHeapBudgets().\n\nNote that when using allocator from multiple threads, returned information may immediately\nbecome outdated.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaTotalStatistics* VMA_NOT_NULL pStats);\n\n/** \\brief Retrieves information about current memory usage and budget for all memory heaps.\n\n\\param allocator\n\\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used.\n\nThis function is called \"get\" not \"calculate\" because it is very fast, suitable to be called\nevery frame or every allocation. For more detailed statistics use vmaCalculateStatistics().\n\nNote that when using allocator from multiple threads, returned information may immediately\nbecome outdated.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(\"VkPhysicalDeviceMemoryProperties::memoryHeapCount\") pBudgets);\n\n/** @} */\n\n/**\n\\addtogroup group_alloc\n@{\n*/\n\n/**\n\\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.\n\nThis algorithm tries to find a memory type that:\n\n- Is allowed by memoryTypeBits.\n- Contains all the flags from pAllocationCreateInfo->requiredFlags.\n- Matches intended usage.\n- Has as many flags from pAllocationCreateInfo->preferredFlags as possible.\n\n\\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result\nfrom this function or any other allocating function probably means that your\ndevice doesn't support any memory type with requested features for the specific\ntype of resource you want to use it for. Please check parameters of your\nresource, like image layout (OPTIMAL versus LINEAR) or mip level count.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(\n    VmaAllocator VMA_NOT_NULL allocator,\n    uint32_t memoryTypeBits,\n    const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,\n    uint32_t* VMA_NOT_NULL pMemoryTypeIndex);\n\n/**\n\\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.\n\nIt can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.\nIt internally creates a temporary, dummy buffer that never has memory bound.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(\n    VmaAllocator VMA_NOT_NULL allocator,\n    const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,\n    const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,\n    uint32_t* VMA_NOT_NULL pMemoryTypeIndex);\n\n/**\n\\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.\n\nIt can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.\nIt internally creates a temporary, dummy image that never has memory bound.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(\n    VmaAllocator VMA_NOT_NULL allocator,\n    const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,\n    const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,\n    uint32_t* VMA_NOT_NULL pMemoryTypeIndex);\n\n/** \\brief Allocates Vulkan device memory and creates #VmaPool object.\n\n\\param allocator Allocator object.\n\\param pCreateInfo Parameters of pool to create.\n\\param[out] pPool Handle to created pool.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(\n    VmaAllocator VMA_NOT_NULL allocator,\n    const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,\n    VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool);\n\n/** \\brief Destroys #VmaPool object and frees Vulkan device memory.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaPool VMA_NULLABLE pool);\n\n/** @} */\n\n/**\n\\addtogroup group_stats\n@{\n*/\n\n/** \\brief Retrieves statistics of existing #VmaPool object.\n\n\\param allocator Allocator object.\n\\param pool Pool object.\n\\param[out] pPoolStats Statistics of specified pool.\n\nNote that when using the pool from multiple threads, returned information may immediately\nbecome outdated.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaPool VMA_NOT_NULL pool,\n    VmaStatistics* VMA_NOT_NULL pPoolStats);\n\n/** \\brief Retrieves detailed statistics of existing #VmaPool object.\n\n\\param allocator Allocator object.\n\\param pool Pool object.\n\\param[out] pPoolStats Statistics of specified pool.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaPool VMA_NOT_NULL pool,\n    VmaDetailedStatistics* VMA_NOT_NULL pPoolStats);\n\n/** @} */\n\n/**\n\\addtogroup group_alloc\n@{\n*/\n\n/** \\brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.\n\nCorruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,\n`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is\n`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).\n\nPossible return values:\n\n- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.\n- `VK_SUCCESS` - corruption detection has been performed and succeeded.\n- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.\n  `VMA_ASSERT` is also fired in that case.\n- Other value: Error returned by Vulkan, e.g. memory mapping failure.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaPool VMA_NOT_NULL pool);\n\n/** \\brief Retrieves name of a custom pool.\n\nAfter the call `ppName` is either null or points to an internally-owned null-terminated string\ncontaining name of the pool that was previously set. The pointer becomes invalid when the pool is\ndestroyed or its name is changed using vmaSetPoolName().\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaPool VMA_NOT_NULL pool,\n    const char* VMA_NULLABLE* VMA_NOT_NULL ppName);\n\n/** \\brief Sets name of a custom pool.\n\n`pName` can be either null or pointer to a null-terminated string with new name for the pool.\nFunction makes internal copy of the string, so it can be changed or freed immediately after this call.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaPool VMA_NOT_NULL pool,\n    const char* VMA_NULLABLE pName);\n\n/** \\brief General purpose memory allocation.\n\n\\param allocator\n\\param pVkMemoryRequirements\n\\param pCreateInfo\n\\param[out] pAllocation Handle to allocated memory.\n\\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().\n\nYou should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().\n\nIt is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),\nvmaCreateBuffer(), vmaCreateImage() instead whenever possible.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(\n    VmaAllocator VMA_NOT_NULL allocator,\n    const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,\n    const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,\n    VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,\n    VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);\n\n/** \\brief General purpose memory allocation for multiple allocation objects at once.\n\n\\param allocator Allocator object.\n\\param pVkMemoryRequirements Memory requirements for each allocation.\n\\param pCreateInfo Creation parameters for each allocation.\n\\param allocationCount Number of allocations to make.\n\\param[out] pAllocations Pointer to array that will be filled with handles to created allocations.\n\\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.\n\nYou should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().\n\nWord \"pages\" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.\nIt is just a general purpose allocation function able to make multiple allocations at once.\nIt may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.\n\nAll allocations are made using same parameters. All of them are created out of the same memory pool and type.\nIf any allocation fails, all allocations already made within this function call are also freed, so that when\nreturned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(\n    VmaAllocator VMA_NOT_NULL allocator,\n    const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,\n    const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,\n    size_t allocationCount,\n    VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,\n    VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);\n\n/** \\brief Allocates memory suitable for given `VkBuffer`.\n\n\\param allocator\n\\param buffer\n\\param pCreateInfo\n\\param[out] pAllocation Handle to allocated memory.\n\\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().\n\nIt only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory().\n\nThis is a special-purpose function. In most cases you should use vmaCreateBuffer().\n\nYou must free the allocation using vmaFreeMemory() when no longer needed.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,\n    const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,\n    VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,\n    VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);\n\n/** \\brief Allocates memory suitable for given `VkImage`.\n\n\\param allocator\n\\param image\n\\param pCreateInfo\n\\param[out] pAllocation Handle to allocated memory.\n\\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().\n\nIt only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory().\n\nThis is a special-purpose function. In most cases you should use vmaCreateImage().\n\nYou must free the allocation using vmaFreeMemory() when no longer needed.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,\n    const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,\n    VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,\n    VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);\n\n/** \\brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().\n\nPassing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(\n    VmaAllocator VMA_NOT_NULL allocator,\n    const VmaAllocation VMA_NULLABLE allocation);\n\n/** \\brief Frees memory and destroys multiple allocations.\n\nWord \"pages\" is just a suggestion to use this function to free pieces of memory used for sparse binding.\nIt is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),\nvmaAllocateMemoryPages() and other functions.\nIt may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.\n\nAllocations in `pAllocations` array can come from any memory pools and types.\nPassing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(\n    VmaAllocator VMA_NOT_NULL allocator,\n    size_t allocationCount,\n    const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);\n\n/** \\brief Returns current information about specified allocation.\n\nCurrent parameters of given allocation are returned in `pAllocationInfo`.\n\nAlthough this function doesn't lock any mutex, so it should be quite efficient,\nyou should avoid calling it too often.\nYou can retrieve same VmaAllocationInfo structure while creating your resource, from function\nvmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change\n(e.g. due to defragmentation).\n\nThere is also a new function vmaGetAllocationInfo2() that offers extended information\nabout the allocation, returned using new structure #VmaAllocationInfo2.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);\n\n/** \\brief Returns extended information about specified allocation.\n\nCurrent parameters of given allocation are returned in `pAllocationInfo`.\nExtended parameters in structure #VmaAllocationInfo2 include memory block size\nand a flag telling whether the allocation has dedicated memory.\nIt can be useful e.g. for interop with OpenGL.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo2(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VmaAllocationInfo2* VMA_NOT_NULL pAllocationInfo);\n\n/** \\brief Sets pUserData in given allocation to new value.\n\nThe value of pointer `pUserData` is copied to allocation's `pUserData`.\nIt is opaque, so you can use it however you want - e.g.\nas a pointer, ordinal number or some handle to you own data.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    void* VMA_NULLABLE pUserData);\n\n/** \\brief Sets pName in given allocation to new value.\n\n`pName` must be either null, or pointer to a null-terminated string. The function\nmakes local copy of the string and sets it as allocation's `pName`. String\npassed as pName doesn't need to be valid for whole lifetime of the allocation -\nyou can free it after this call. String previously pointed by allocation's\n`pName` is freed from memory.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    const char* VMA_NULLABLE pName);\n\n/**\n\\brief Given an allocation, returns Property Flags of its memory type.\n\nThis is just a convenience function. Same information can be obtained using\nvmaGetAllocationInfo() + vmaGetMemoryProperties().\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);\n\n\n#if VMA_EXTERNAL_MEMORY_WIN32\n/**\n\\brief Given an allocation, returns Win32 handle that may be imported by other processes or APIs.\n\n\\param hTargetProcess Must be a valid handle to target process or null. If it's null, the function returns\n    handle for the current process.\n\\param[out] pHandle Output parameter that returns the handle.\n\nThe function fills `pHandle` with handle that can be used in target process.\nThe handle is fetched using function `vkGetMemoryWin32HandleKHR`.\nWhen no longer needed, you must close it using:\n\n\\code\nCloseHandle(handle);\n\\endcode\n\nYou can close it any time, before or after destroying the allocation object.\nIt is reference-counted internally by Windows.\n\nNote the handle is returned for the entire `VkDeviceMemory` block that the allocation belongs to.\nIf the allocation is sub-allocated from a larger block, you may need to consider the offset of the allocation\n(VmaAllocationInfo::offset).\n\nIf the function fails with `VK_ERROR_FEATURE_NOT_PRESENT` error code, please double-check\nthat VmaVulkanFunctions::vkGetMemoryWin32HandleKHR function pointer is set, e.g. either by using `VMA_DYNAMIC_VULKAN_FUNCTIONS`\nor by manually passing it through VmaAllocatorCreateInfo::pVulkanFunctions.\n\nFor more information, see chapter \\ref vk_khr_external_memory_win32.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaGetMemoryWin32Handle(VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation, HANDLE hTargetProcess, HANDLE* VMA_NOT_NULL pHandle);\n#endif // VMA_EXTERNAL_MEMORY_WIN32\n\n/** \\brief Maps memory represented by given allocation and returns pointer to it.\n\nMaps memory represented by given allocation to make it accessible to CPU code.\nWhen succeeded, `*ppData` contains pointer to first byte of this memory.\n\n\\warning\nIf the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is\ncorrectly offsetted to the beginning of region assigned to this particular allocation.\nUnlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block.\nYou should not add VmaAllocationInfo::offset to it!\n\nMapping is internally reference-counted and synchronized, so despite raw Vulkan\nfunction `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`\nmultiple times simultaneously, it is safe to call this function on allocations\nassigned to the same memory block. Actual Vulkan memory will be mapped on first\nmapping and unmapped on last unmapping.\n\nIf the function succeeded, you must call vmaUnmapMemory() to unmap the\nallocation when mapping is no longer needed or before freeing the allocation, at\nthe latest.\n\nIt also safe to call this function multiple times on the same allocation. You\nmust call vmaUnmapMemory() same number of times as you called vmaMapMemory().\n\nIt is also safe to call this function on allocation created with\n#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.\nYou must still call vmaUnmapMemory() same number of times as you called\nvmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the\n\"0-th\" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.\n\nThis function fails when used on allocation made in memory type that is not\n`HOST_VISIBLE`.\n\nThis function doesn't automatically flush or invalidate caches.\nIf the allocation is made from a memory types that is not `HOST_COHERENT`,\nyou also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    void* VMA_NULLABLE* VMA_NOT_NULL ppData);\n\n/** \\brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().\n\nFor details, see description of vmaMapMemory().\n\nThis function doesn't automatically flush or invalidate caches.\nIf the allocation is made from a memory types that is not `HOST_COHERENT`,\nyou also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation);\n\n/** \\brief Flushes memory of given allocation.\n\nCalls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.\nIt needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`.\nUnmap operation doesn't do that automatically.\n\n- `offset` must be relative to the beginning of allocation.\n- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.\n- `offset` and `size` don't have to be aligned.\n  They are internally rounded down/up to multiply of `nonCoherentAtomSize`.\n- If `size` is 0, this call is ignored.\n- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,\n  this call is ignored.\n\nWarning! `offset` and `size` are relative to the contents of given `allocation`.\nIf you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.\nDo not pass allocation's offset as `offset`!!!\n\nThis function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is\ncalled, otherwise `VK_SUCCESS`.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VkDeviceSize offset,\n    VkDeviceSize size);\n\n/** \\brief Invalidates memory of given allocation.\n\nCalls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.\nIt needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`.\nMap operation doesn't do that automatically.\n\n- `offset` must be relative to the beginning of allocation.\n- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.\n- `offset` and `size` don't have to be aligned.\n  They are internally rounded down/up to multiply of `nonCoherentAtomSize`.\n- If `size` is 0, this call is ignored.\n- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,\n  this call is ignored.\n\nWarning! `offset` and `size` are relative to the contents of given `allocation`.\nIf you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.\nDo not pass allocation's offset as `offset`!!!\n\nThis function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if\nit is called, otherwise `VK_SUCCESS`.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VkDeviceSize offset,\n    VkDeviceSize size);\n\n/** \\brief Flushes memory of given set of allocations.\n\nCalls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations.\nFor more information, see documentation of vmaFlushAllocation().\n\n\\param allocator\n\\param allocationCount\n\\param allocations\n\\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all offsets are zero.\n\\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.\n\nThis function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is\ncalled, otherwise `VK_SUCCESS`.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(\n    VmaAllocator VMA_NOT_NULL allocator,\n    uint32_t allocationCount,\n    const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,\n    const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,\n    const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);\n\n/** \\brief Invalidates memory of given set of allocations.\n\nCalls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations.\nFor more information, see documentation of vmaInvalidateAllocation().\n\n\\param allocator\n\\param allocationCount\n\\param allocations\n\\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all offsets are zero.\n\\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.\n\nThis function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is\ncalled, otherwise `VK_SUCCESS`.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(\n    VmaAllocator VMA_NOT_NULL allocator,\n    uint32_t allocationCount,\n    const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,\n    const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,\n    const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);\n\n/** \\brief Maps the allocation temporarily if needed, copies data from specified host pointer to it, and flushes the memory from the host caches if needed.\n\n\\param allocator\n\\param pSrcHostPointer Pointer to the host data that become source of the copy.\n\\param dstAllocation   Handle to the allocation that becomes destination of the copy.\n\\param dstAllocationLocalOffset  Offset within `dstAllocation` where to write copied data, in bytes.\n\\param size            Number of bytes to copy.\n\nThis is a convenience function that allows to copy data from a host pointer to an allocation easily.\nSame behavior can be achieved by calling vmaMapMemory(), `memcpy()`, vmaUnmapMemory(), vmaFlushAllocation().\n\nThis function can be called only for allocations created in a memory type that has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.\nIt can be ensured e.g. by using #VMA_MEMORY_USAGE_AUTO and #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or\n#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.\nOtherwise, the function will fail and generate a Validation Layers error.\n\n`dstAllocationLocalOffset` is relative to the contents of given `dstAllocation`.\nIf you mean whole allocation, you should pass 0.\nDo not pass allocation's offset within device memory block this parameter!\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyMemoryToAllocation(\n    VmaAllocator VMA_NOT_NULL allocator,\n    const void* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(size) pSrcHostPointer,\n    VmaAllocation VMA_NOT_NULL dstAllocation,\n    VkDeviceSize dstAllocationLocalOffset,\n    VkDeviceSize size);\n\n/** \\brief Invalidates memory in the host caches if needed, maps the allocation temporarily if needed, and copies data from it to a specified host pointer.\n\n\\param allocator\n\\param srcAllocation   Handle to the allocation that becomes source of the copy.\n\\param srcAllocationLocalOffset  Offset within `srcAllocation` where to read copied data, in bytes.\n\\param pDstHostPointer Pointer to the host memory that become destination of the copy.\n\\param size            Number of bytes to copy.\n\nThis is a convenience function that allows to copy data from an allocation to a host pointer easily.\nSame behavior can be achieved by calling vmaInvalidateAllocation(), vmaMapMemory(), `memcpy()`, vmaUnmapMemory().\n\nThis function should be called only for allocations created in a memory type that has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`\nand `VK_MEMORY_PROPERTY_HOST_CACHED_BIT` flag.\nIt can be ensured e.g. by using #VMA_MEMORY_USAGE_AUTO and #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.\nOtherwise, the function may fail and generate a Validation Layers error.\nIt may also work very slowly when reading from an uncached memory.\n\n`srcAllocationLocalOffset` is relative to the contents of given `srcAllocation`.\nIf you mean whole allocation, you should pass 0.\nDo not pass allocation's offset within device memory block as this parameter!\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyAllocationToMemory(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL srcAllocation,\n    VkDeviceSize srcAllocationLocalOffset,\n    void* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(size) pDstHostPointer,\n    VkDeviceSize size);\n\n/** \\brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.\n\n\\param allocator\n\\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.\n\nCorruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,\n`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are\n`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).\n\nPossible return values:\n\n- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.\n- `VK_SUCCESS` - corruption detection has been performed and succeeded.\n- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.\n  `VMA_ASSERT` is also fired in that case.\n- Other value: Error returned by Vulkan, e.g. memory mapping failure.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(\n    VmaAllocator VMA_NOT_NULL allocator,\n    uint32_t memoryTypeBits);\n\n/** \\brief Begins defragmentation process.\n\n\\param allocator Allocator object.\n\\param pInfo Structure filled with parameters of defragmentation.\n\\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation.\n\\returns\n- `VK_SUCCESS` if defragmentation can begin.\n- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported.\n\nFor more information about defragmentation, see documentation chapter:\n[Defragmentation](@ref defragmentation).\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(\n    VmaAllocator VMA_NOT_NULL allocator,\n    const VmaDefragmentationInfo* VMA_NOT_NULL pInfo,\n    VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext);\n\n/** \\brief Ends defragmentation process.\n\n\\param allocator Allocator object.\n\\param context Context object that has been created by vmaBeginDefragmentation().\n\\param[out] pStats Optional stats for the defragmentation. Can be null.\n\nUse this function to finish defragmentation started by vmaBeginDefragmentation().\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaDefragmentationContext VMA_NOT_NULL context,\n    VmaDefragmentationStats* VMA_NULLABLE pStats);\n\n/** \\brief Starts single defragmentation pass.\n\n\\param allocator Allocator object.\n\\param context Context object that has been created by vmaBeginDefragmentation().\n\\param[out] pPassInfo Computed information for current pass.\n\\returns\n- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation.\n- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(),\n  and then preferably try another pass with vmaBeginDefragmentationPass().\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaDefragmentationContext VMA_NOT_NULL context,\n    VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);\n\n/** \\brief Ends single defragmentation pass.\n\n\\param allocator Allocator object.\n\\param context Context object that has been created by vmaBeginDefragmentation().\n\\param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you.\n\nReturns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible.\n\nEnds incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`.\nAfter this call:\n\n- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY\n  (which is the default) will be pointing to the new destination place.\n- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY\n  will be freed.\n\nIf no more moves are possible you can end whole defragmentation.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaDefragmentationContext VMA_NOT_NULL context,\n    VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);\n\n/** \\brief Binds buffer to allocation.\n\nBinds specified buffer to region of memory represented by specified allocation.\nGets `VkDeviceMemory` handle and offset from the allocation.\nIf you want to create a buffer, allocate memory for it and bind them together separately,\nyou should use this function for binding instead of standard `vkBindBufferMemory()`,\nbecause it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple\nallocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously\n(which is illegal in Vulkan).\n\nIt is recommended to use function vmaCreateBuffer() instead of this one.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);\n\n/** \\brief Binds buffer to allocation with additional parameters.\n\n\\param allocator\n\\param allocation\n\\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.\n\\param buffer\n\\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null.\n\nThis function is similar to vmaBindBufferMemory(), but it provides additional parameters.\n\nIf `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag\nor with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VkDeviceSize allocationLocalOffset,\n    VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,\n    const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindBufferMemoryInfoKHR) pNext);\n\n/** \\brief Binds image to allocation.\n\nBinds specified image to region of memory represented by specified allocation.\nGets `VkDeviceMemory` handle and offset from the allocation.\nIf you want to create an image, allocate memory for it and bind them together separately,\nyou should use this function for binding instead of standard `vkBindImageMemory()`,\nbecause it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple\nallocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously\n(which is illegal in Vulkan).\n\nIt is recommended to use function vmaCreateImage() instead of this one.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);\n\n/** \\brief Binds image to allocation with additional parameters.\n\n\\param allocator\n\\param allocation\n\\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.\n\\param image\n\\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null.\n\nThis function is similar to vmaBindImageMemory(), but it provides additional parameters.\n\nIf `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag\nor with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VkDeviceSize allocationLocalOffset,\n    VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,\n    const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindImageMemoryInfoKHR) pNext);\n\n/** \\brief Creates a new `VkBuffer`, allocates and binds memory for it.\n\n\\param allocator\n\\param pBufferCreateInfo\n\\param pAllocationCreateInfo\n\\param[out] pBuffer Buffer that was created.\n\\param[out] pAllocation Allocation that was created.\n\\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().\n\nThis function automatically:\n\n-# Creates buffer.\n-# Allocates appropriate memory for it.\n-# Binds the buffer with the memory.\n\nIf any of these operations fail, buffer and allocation are not created,\nreturned value is negative error code, `*pBuffer` and `*pAllocation` are null.\n\nIf the function succeeded, you must destroy both buffer and allocation when you\nno longer need them using either convenience function vmaDestroyBuffer() or\nseparately, using `vkDestroyBuffer()` and vmaFreeMemory().\n\nIf #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,\nVK_KHR_dedicated_allocation extension is used internally to query driver whether\nit requires or prefers the new buffer to have dedicated allocation. If yes,\nand if dedicated allocation is possible\n(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated\nallocation for this buffer, just like when using\n#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.\n\n\\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer,\nalthough recommended as a good practice, is out of scope of this library and could be implemented\nby the user as a higher-level logic on top of VMA.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(\n    VmaAllocator VMA_NOT_NULL allocator,\n    const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,\n    const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,\n    VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,\n    VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,\n    VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);\n\n/** \\brief Creates a buffer with additional minimum alignment.\n\nSimilar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom,\nminimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g.\nfor interop with OpenGL.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(\n    VmaAllocator VMA_NOT_NULL allocator,\n    const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,\n    const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,\n    VkDeviceSize minAlignment,\n    VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,\n    VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,\n    VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);\n\n/** \\brief Creates a new `VkBuffer`, binds already created memory for it.\n\n\\param allocator\n\\param allocation Allocation that provides memory to be used for binding new buffer to it.\n\\param pBufferCreateInfo\n\\param[out] pBuffer Buffer that was created.\n\nThis function automatically:\n\n-# Creates buffer.\n-# Binds the buffer with the supplied memory.\n\nIf any of these operations fail, buffer is not created,\nreturned value is negative error code and `*pBuffer` is null.\n\nIf the function succeeded, you must destroy the buffer when you\nno longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding\nallocation you can use convenience function vmaDestroyBuffer().\n\n\\note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2().\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,\n    VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);\n\n/** \\brief Creates a new `VkBuffer`, binds already created memory for it.\n\n\\param allocator\n\\param allocation Allocation that provides memory to be used for binding new buffer to it.\n\\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0.\n\\param pBufferCreateInfo \n\\param[out] pBuffer Buffer that was created.\n\nThis function automatically:\n\n-# Creates buffer.\n-# Binds the buffer with the supplied memory.\n\nIf any of these operations fail, buffer is not created,\nreturned value is negative error code and `*pBuffer` is null.\n\nIf the function succeeded, you must destroy the buffer when you\nno longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding\nallocation you can use convenience function vmaDestroyBuffer().\n\n\\note This is a new version of the function augmented with parameter `allocationLocalOffset`.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VkDeviceSize allocationLocalOffset,\n    const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,\n    VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);\n\n/** \\brief Destroys Vulkan buffer and frees allocated memory.\n\nThis is just a convenience function equivalent to:\n\n\\code\nvkDestroyBuffer(device, buffer, allocationCallbacks);\nvmaFreeMemory(allocator, allocation);\n\\endcode\n\nIt is safe to pass null as buffer and/or allocation.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,\n    VmaAllocation VMA_NULLABLE allocation);\n\n/// Function similar to vmaCreateBuffer().\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(\n    VmaAllocator VMA_NOT_NULL allocator,\n    const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,\n    const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,\n    VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage,\n    VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,\n    VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);\n\n/// Function similar to vmaCreateAliasingBuffer() but for images.\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,\n    VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);\n\n/// Function similar to vmaCreateAliasingBuffer2() but for images.\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VkDeviceSize allocationLocalOffset,\n    const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,\n    VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);\n\n/** \\brief Destroys Vulkan image and frees allocated memory.\n\nThis is just a convenience function equivalent to:\n\n\\code\nvkDestroyImage(device, image, allocationCallbacks);\nvmaFreeMemory(allocator, allocation);\n\\endcode\n\nIt is safe to pass null as image and/or allocation.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VkImage VMA_NULLABLE_NON_DISPATCHABLE image,\n    VmaAllocation VMA_NULLABLE allocation);\n\n/** @} */\n\n/**\n\\addtogroup group_virtual\n@{\n*/\n\n/** \\brief Creates new #VmaVirtualBlock object.\n\n\\param pCreateInfo Parameters for creation.\n\\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(\n    const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,\n    VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock);\n\n/** \\brief Destroys #VmaVirtualBlock object.\n\nPlease note that you should consciously handle virtual allocations that could remain unfreed in the block.\nYou should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock()\nif you are sure this is what you want. If you do neither, an assert is called.\n\nIf you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`,\ndon't forget to free them.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(\n    VmaVirtualBlock VMA_NULLABLE virtualBlock);\n\n/** \\brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations.\n*/\nVMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(\n    VmaVirtualBlock VMA_NOT_NULL virtualBlock);\n\n/** \\brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(\n    VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo);\n\n/** \\brief Allocates new virtual allocation inside given #VmaVirtualBlock.\n\nIf the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned\n(despite the function doesn't ever allocate actual GPU memory).\n`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`.\n\n\\param virtualBlock Virtual block\n\\param pCreateInfo Parameters for the allocation\n\\param[out] pAllocation Returned handle of the new allocation\n\\param[out] pOffset Returned offset of the new allocation. Optional, can be null.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(\n    VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,\n    VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,\n    VkDeviceSize* VMA_NULLABLE pOffset);\n\n/** \\brief Frees virtual allocation inside given #VmaVirtualBlock.\n\nIt is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(\n    VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation);\n\n/** \\brief Frees all virtual allocations inside given #VmaVirtualBlock.\n\nYou must either call this function or free each virtual allocation individually with vmaVirtualFree()\nbefore destroying a virtual block. Otherwise, an assert is called.\n\nIf you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`,\ndon't forget to free it as well.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(\n    VmaVirtualBlock VMA_NOT_NULL virtualBlock);\n\n/** \\brief Changes custom pointer associated with given virtual allocation.\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(\n    VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,\n    void* VMA_NULLABLE pUserData);\n\n/** \\brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock.\n\nThis function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics().\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(\n    VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    VmaStatistics* VMA_NOT_NULL pStats);\n\n/** \\brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock.\n\nThis function is slow to call. Use for debugging purposes.\nFor less detailed statistics, see vmaGetVirtualBlockStatistics().\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(\n    VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    VmaDetailedStatistics* VMA_NOT_NULL pStats);\n\n/** @} */\n\n#if VMA_STATS_STRING_ENABLED\n/**\n\\addtogroup group_stats\n@{\n*/\n\n/** \\brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock.\n\\param virtualBlock Virtual block.\n\\param[out] ppStatsString Returned string.\n\\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces.\n\nReturned string must be freed using vmaFreeVirtualBlockStatsString().\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(\n    VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,\n    VkBool32 detailedMap);\n\n/// Frees a string returned by vmaBuildVirtualBlockStatsString().\nVMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(\n    VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    char* VMA_NULLABLE pStatsString);\n\n/** \\brief Builds and returns statistics as a null-terminated string in JSON format.\n\\param allocator\n\\param[out] ppStatsString Must be freed using vmaFreeStatsString() function.\n\\param detailedMap\n*/\nVMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(\n    VmaAllocator VMA_NOT_NULL allocator,\n    char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,\n    VkBool32 detailedMap);\n\nVMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(\n    VmaAllocator VMA_NOT_NULL allocator,\n    char* VMA_NULLABLE pStatsString);\n\n/** @} */\n\n#endif // VMA_STATS_STRING_ENABLED\n\n#endif // _VMA_FUNCTION_HEADERS\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H\n\n////////////////////////////////////////////////////////////////////////////////\n////////////////////////////////////////////////////////////////////////////////\n//\n//    IMPLEMENTATION\n//\n////////////////////////////////////////////////////////////////////////////////\n////////////////////////////////////////////////////////////////////////////////\n\n// For Visual Studio IntelliSense.\n#if defined(__cplusplus) && defined(__INTELLISENSE__)\n#define VMA_IMPLEMENTATION\n#endif\n\n#ifdef VMA_IMPLEMENTATION\n#undef VMA_IMPLEMENTATION\n\n#include <cstdint>\n#include <cstdlib>\n#include <cstring>\n#include <cinttypes>\n#include <utility>\n#include <type_traits>\n\n#if !defined(VMA_CPP20)\n    #if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20\n        #define VMA_CPP20 1\n    #else\n        #define VMA_CPP20 0\n    #endif\n#endif\n\n#ifdef _MSC_VER\n    #include <intrin.h> // For functions like __popcnt, _BitScanForward etc.\n#endif\n#if VMA_CPP20\n    #include <bit>\n#endif\n\n#if VMA_STATS_STRING_ENABLED\n    #include <cstdio> // For snprintf\n#endif\n\n/*******************************************************************************\nCONFIGURATION SECTION\n\nDefine some of these macros before each #include of this header or change them\nhere if you need other then default behavior depending on your environment.\n*/\n#ifndef _VMA_CONFIGURATION\n\n/*\nDefine this macro to 1 to make the library fetch pointers to Vulkan functions\ninternally, like:\n\n    vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;\n*/\n#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)\n    #define VMA_STATIC_VULKAN_FUNCTIONS 1\n#endif\n\n/*\nDefine this macro to 1 to make the library fetch pointers to Vulkan functions\ninternally, like:\n\n    vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, \"vkAllocateMemory\");\n\nTo use this feature in new versions of VMA you now have to pass\nVmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as\nVmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null.\n*/\n#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)\n    #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1\n#endif\n\n#ifndef VMA_USE_STL_SHARED_MUTEX\n    #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17\n        #define VMA_USE_STL_SHARED_MUTEX 1\n    // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus\n    // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.\n    #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L\n        #define VMA_USE_STL_SHARED_MUTEX 1\n    #else\n        #define VMA_USE_STL_SHARED_MUTEX 0\n    #endif\n#endif\n\n/*\nDefine this macro to include custom header files without having to edit this file directly, e.g.:\n\n    // Inside of \"my_vma_configuration_user_includes.h\":\n\n    #include \"my_custom_assert.h\" // for MY_CUSTOM_ASSERT\n    #include \"my_custom_min.h\" // for my_custom_min\n    #include <algorithm>\n    #include <mutex>\n\n    // Inside a different file, which includes \"vk_mem_alloc.h\":\n\n    #define VMA_CONFIGURATION_USER_INCLUDES_H \"my_vma_configuration_user_includes.h\"\n    #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr)\n    #define VMA_MIN(v1, v2)  (my_custom_min(v1, v2))\n    #include \"vk_mem_alloc.h\"\n    ...\n\nThe following headers are used in this CONFIGURATION section only, so feel free to\nremove them if not needed.\n*/\n#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H)\n    #include <cassert> // for assert\n    #include <algorithm> // for min, max, swap\n    #include <mutex>\n#else\n    #include VMA_CONFIGURATION_USER_INCLUDES_H\n#endif\n\n#ifndef VMA_NULL\n   // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.\n   #define VMA_NULL   nullptr\n#endif\n\n#ifndef VMA_FALLTHROUGH\n    #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17\n        #define VMA_FALLTHROUGH [[fallthrough]]\n    #else\n        #define VMA_FALLTHROUGH\n    #endif\n#endif\n\n// Normal assert to check for programmer's errors, especially in Debug configuration.\n#ifndef VMA_ASSERT\n   #ifdef NDEBUG\n       #define VMA_ASSERT(expr)\n   #else\n       #define VMA_ASSERT(expr)         assert(expr)\n   #endif\n#endif\n\n// Assert that will be called very often, like inside data structures e.g. operator[].\n// Making it non-empty can make program slow.\n#ifndef VMA_HEAVY_ASSERT\n   #ifdef NDEBUG\n       #define VMA_HEAVY_ASSERT(expr)\n   #else\n       #define VMA_HEAVY_ASSERT(expr)   //VMA_ASSERT(expr)\n   #endif\n#endif\n\n// Assert used for reporting memory leaks - unfreed allocations.\n#ifndef VMA_ASSERT_LEAK\n    #define VMA_ASSERT_LEAK(expr)   VMA_ASSERT(expr)\n#endif\n\n// If your compiler is not compatible with C++17 and definition of\n// aligned_alloc() function is missing, uncommenting following line may help:\n\n//#include <malloc.h>\n\n#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)\n#include <cstdlib>\nstatic void* vma_aligned_alloc(size_t alignment, size_t size)\n{\n    // alignment must be >= sizeof(void*)\n    if(alignment < sizeof(void*))\n    {\n        alignment = sizeof(void*);\n    }\n\n    return memalign(alignment, size);\n}\n#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))\n#include <cstdlib>\n\n#if defined(__APPLE__)\n#include <AvailabilityMacros.h>\n#endif\n\nstatic void* vma_aligned_alloc(size_t alignment, size_t size)\n{\n    // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)\n    // Therefore, for now disable this specific exception until a proper solution is found.\n    //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))\n    //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0\n    //    // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only\n    //    // with the MacOSX11.0 SDK in Xcode 12 (which is what adds\n    //    // MAC_OS_X_VERSION_10_16), even though the function is marked\n    //    // available for 10.15. That is why the preprocessor checks for 10.16 but\n    //    // the __builtin_available checks for 10.15.\n    //    // People who use C++17 could call aligned_alloc with the 10.15 SDK already.\n    //    if (__builtin_available(macOS 10.15, iOS 13, *))\n    //        return aligned_alloc(alignment, size);\n    //#endif\n    //#endif\n\n    // alignment must be >= sizeof(void*)\n    if(alignment < sizeof(void*))\n    {\n        alignment = sizeof(void*);\n    }\n\n    void *pointer;\n    if(posix_memalign(&pointer, alignment, size) == 0)\n        return pointer;\n    return VMA_NULL;\n}\n#elif defined(_WIN32)\nstatic void* vma_aligned_alloc(size_t alignment, size_t size)\n{\n    return _aligned_malloc(size, alignment);\n}\n#elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17\nstatic void* vma_aligned_alloc(size_t alignment, size_t size)\n{\n    return aligned_alloc(alignment, size);\n}\n#else\nstatic void* vma_aligned_alloc(size_t alignment, size_t size)\n{\n    VMA_ASSERT(0 && \"Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system.\");\n    return VMA_NULL;\n}\n#endif\n\n#if defined(_WIN32)\nstatic void vma_aligned_free(void* ptr)\n{\n    _aligned_free(ptr);\n}\n#else\nstatic void vma_aligned_free(void* VMA_NULLABLE ptr)\n{\n    free(ptr);\n}\n#endif\n\n#ifndef VMA_ALIGN_OF\n   #define VMA_ALIGN_OF(type)       (alignof(type))\n#endif\n\n#ifndef VMA_SYSTEM_ALIGNED_MALLOC\n   #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))\n#endif\n\n#ifndef VMA_SYSTEM_ALIGNED_FREE\n   // VMA_SYSTEM_FREE is the old name, but might have been defined by the user\n   #if defined(VMA_SYSTEM_FREE)\n      #define VMA_SYSTEM_ALIGNED_FREE(ptr)     VMA_SYSTEM_FREE(ptr)\n   #else\n      #define VMA_SYSTEM_ALIGNED_FREE(ptr)     vma_aligned_free(ptr)\n    #endif\n#endif\n\n#ifndef VMA_COUNT_BITS_SET\n    // Returns number of bits set to 1 in (v)\n    #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v)\n#endif\n\n#ifndef VMA_BITSCAN_LSB\n    // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX\n    #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask)\n#endif\n\n#ifndef VMA_BITSCAN_MSB\n    // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX\n    #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask)\n#endif\n\n#ifndef VMA_MIN\n   #define VMA_MIN(v1, v2)    ((std::min)((v1), (v2)))\n#endif\n\n#ifndef VMA_MAX\n   #define VMA_MAX(v1, v2)    ((std::max)((v1), (v2)))\n#endif\n\n#ifndef VMA_SORT\n   #define VMA_SORT(beg, end, cmp)  std::sort(beg, end, cmp)\n#endif\n\n#ifndef VMA_DEBUG_LOG_FORMAT\n   #define VMA_DEBUG_LOG_FORMAT(format, ...)\n   /*\n   #define VMA_DEBUG_LOG_FORMAT(format, ...) do { \\\n       printf((format), __VA_ARGS__); \\\n       printf(\"\\n\"); \\\n   } while(false)\n   */\n#endif\n\n#ifndef VMA_DEBUG_LOG\n    #define VMA_DEBUG_LOG(str)   VMA_DEBUG_LOG_FORMAT(\"%s\", (str))\n#endif\n\n#ifndef VMA_LEAK_LOG_FORMAT\n    #define VMA_LEAK_LOG_FORMAT(format, ...)   VMA_DEBUG_LOG_FORMAT(format, __VA_ARGS__)\n#endif\n\n#ifndef VMA_CLASS_NO_COPY\n    #define VMA_CLASS_NO_COPY(className) \\\n        private: \\\n            className(const className&) = delete; \\\n            className& operator=(const className&) = delete;\n#endif\n#ifndef VMA_CLASS_NO_COPY_NO_MOVE\n    #define VMA_CLASS_NO_COPY_NO_MOVE(className) \\\n        private: \\\n            className(const className&) = delete; \\\n            className(className&&) = delete; \\\n            className& operator=(const className&) = delete; \\\n            className& operator=(className&&) = delete;\n#endif\n\n// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.\n#if VMA_STATS_STRING_ENABLED\n    static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)\n    {\n        snprintf(outStr, strLen, \"%\" PRIu32, num);\n    }\n    static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)\n    {\n        snprintf(outStr, strLen, \"%\" PRIu64, num);\n    }\n    static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)\n    {\n        snprintf(outStr, strLen, \"%p\", ptr);\n    }\n#endif\n\n#ifndef VMA_MUTEX\n    class VmaMutex\n    {\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaMutex)\n    public:\n        VmaMutex() { }\n        void Lock() { m_Mutex.lock(); }\n        void Unlock() { m_Mutex.unlock(); }\n        bool TryLock() { return m_Mutex.try_lock(); }\n    private:\n        std::mutex m_Mutex;\n    };\n    #define VMA_MUTEX VmaMutex\n#endif\n\n// Read-write mutex, where \"read\" is shared access, \"write\" is exclusive access.\n#ifndef VMA_RW_MUTEX\n    #if VMA_USE_STL_SHARED_MUTEX\n        // Use std::shared_mutex from C++17.\n        #include <shared_mutex>\n        class VmaRWMutex\n        {\n        public:\n            void LockRead() { m_Mutex.lock_shared(); }\n            void UnlockRead() { m_Mutex.unlock_shared(); }\n            bool TryLockRead() { return m_Mutex.try_lock_shared(); }\n            void LockWrite() { m_Mutex.lock(); }\n            void UnlockWrite() { m_Mutex.unlock(); }\n            bool TryLockWrite() { return m_Mutex.try_lock(); }\n        private:\n            std::shared_mutex m_Mutex;\n        };\n        #define VMA_RW_MUTEX VmaRWMutex\n    #elif defined(_WIN32) && defined(WINVER) && defined(SRWLOCK_INIT) && WINVER >= 0x0600\n        // Use SRWLOCK from WinAPI.\n        // Minimum supported client = Windows Vista, server = Windows Server 2008.\n        class VmaRWMutex\n        {\n        public:\n            VmaRWMutex() { InitializeSRWLock(&m_Lock); }\n            void LockRead() { AcquireSRWLockShared(&m_Lock); }\n            void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }\n            bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }\n            void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }\n            void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }\n            bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }\n        private:\n            SRWLOCK m_Lock;\n        };\n        #define VMA_RW_MUTEX VmaRWMutex\n    #else\n        // Less efficient fallback: Use normal mutex.\n        class VmaRWMutex\n        {\n        public:\n            void LockRead() { m_Mutex.Lock(); }\n            void UnlockRead() { m_Mutex.Unlock(); }\n            bool TryLockRead() { return m_Mutex.TryLock(); }\n            void LockWrite() { m_Mutex.Lock(); }\n            void UnlockWrite() { m_Mutex.Unlock(); }\n            bool TryLockWrite() { return m_Mutex.TryLock(); }\n        private:\n            VMA_MUTEX m_Mutex;\n        };\n        #define VMA_RW_MUTEX VmaRWMutex\n    #endif // #if VMA_USE_STL_SHARED_MUTEX\n#endif // #ifndef VMA_RW_MUTEX\n\n/*\nIf providing your own implementation, you need to implement a subset of std::atomic.\n*/\n#ifndef VMA_ATOMIC_UINT32\n    #include <atomic>\n    #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>\n#endif\n\n#ifndef VMA_ATOMIC_UINT64\n    #include <atomic>\n    #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>\n#endif\n\n#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY\n    /**\n    Every allocation will have its own memory block.\n    Define to 1 for debugging purposes only.\n    */\n    #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)\n#endif\n\n#ifndef VMA_MIN_ALIGNMENT\n    /**\n    Minimum alignment of all allocations, in bytes.\n    Set to more than 1 for debugging purposes. Must be power of two.\n    */\n    #ifdef VMA_DEBUG_ALIGNMENT // Old name\n        #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT\n    #else\n        #define VMA_MIN_ALIGNMENT (1)\n    #endif\n#endif\n\n#ifndef VMA_DEBUG_MARGIN\n    /**\n    Minimum margin after every allocation, in bytes.\n    Set nonzero for debugging purposes only.\n    */\n    #define VMA_DEBUG_MARGIN (0)\n#endif\n\n#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS\n    /**\n    Define this macro to 1 to automatically fill new allocations and destroyed\n    allocations with some bit pattern.\n    */\n    #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)\n#endif\n\n#ifndef VMA_DEBUG_DETECT_CORRUPTION\n    /**\n    Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to\n    enable writing magic value to the margin after every allocation and\n    validating it, so that memory corruptions (out-of-bounds writes) are detected.\n    */\n    #define VMA_DEBUG_DETECT_CORRUPTION (0)\n#endif\n\n#ifndef VMA_DEBUG_GLOBAL_MUTEX\n    /**\n    Set this to 1 for debugging purposes only, to enable single mutex protecting all\n    entry calls to the library. Can be useful for debugging multithreading issues.\n    */\n    #define VMA_DEBUG_GLOBAL_MUTEX (0)\n#endif\n\n#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY\n    /**\n    Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.\n    Set to more than 1 for debugging purposes only. Must be power of two.\n    */\n    #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)\n#endif\n\n#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT\n    /*\n    Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount\n    and return error instead of leaving up to Vulkan implementation what to do in such cases.\n    */\n    #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)\n#endif\n\n#ifndef VMA_SMALL_HEAP_MAX_SIZE\n   /// Maximum size of a memory heap in Vulkan to consider it \"small\".\n   #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)\n#endif\n\n#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE\n   /// Default size of a block allocated as single VkDeviceMemory from a \"large\" heap.\n   #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)\n#endif\n\n/*\nMapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called\nor a persistently mapped allocation is created and destroyed several times in a row.\nIt keeps additional +1 mapping of a device memory block to prevent calling actual\nvkMapMemory/vkUnmapMemory too many times, which may improve performance and help\ntools like RenderDoc.\n*/\n#ifndef VMA_MAPPING_HYSTERESIS_ENABLED\n    #define VMA_MAPPING_HYSTERESIS_ENABLED 1\n#endif\n\n#define VMA_VALIDATE(cond) do { if(!(cond)) { \\\n        VMA_ASSERT(0 && \"Validation failed: \" #cond); \\\n        return false; \\\n    } } while(false)\n\n/*******************************************************************************\nEND OF CONFIGURATION\n*/\n#endif // _VMA_CONFIGURATION\n\n\nstatic const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;\nstatic const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;\n// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.\nstatic const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;\n\n// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.\nstatic const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;\nstatic const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;\nstatic const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;\nstatic const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;\nstatic const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;\nstatic const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;\nstatic const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;\nstatic const uint32_t VMA_VENDOR_ID_AMD = 4098;\n\n// This one is tricky. Vulkan specification defines this code as available since\n// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131.\n// See pull request #207.\n#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13)\n\n\n#if VMA_STATS_STRING_ENABLED\n// Correspond to values of enum VmaSuballocationType.\nstatic const char* VMA_SUBALLOCATION_TYPE_NAMES[] =\n{\n    \"FREE\",\n    \"UNKNOWN\",\n    \"BUFFER\",\n    \"IMAGE_UNKNOWN\",\n    \"IMAGE_LINEAR\",\n    \"IMAGE_OPTIMAL\",\n};\n#endif\n\nstatic VkAllocationCallbacks VmaEmptyAllocationCallbacks =\n    { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };\n\n\n#ifndef _VMA_ENUM_DECLARATIONS\n\nenum VmaSuballocationType\n{\n    VMA_SUBALLOCATION_TYPE_FREE = 0,\n    VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,\n    VMA_SUBALLOCATION_TYPE_BUFFER = 2,\n    VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,\n    VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,\n    VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,\n    VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF\n};\n\nenum VMA_CACHE_OPERATION\n{\n    VMA_CACHE_FLUSH,\n    VMA_CACHE_INVALIDATE\n};\n\nenum class VmaAllocationRequestType\n{\n    Normal,\n    TLSF,\n    // Used by \"Linear\" algorithm.\n    UpperAddress,\n    EndOf1st,\n    EndOf2nd,\n};\n\n#endif // _VMA_ENUM_DECLARATIONS\n\n#ifndef _VMA_FORWARD_DECLARATIONS\n// Opaque handle used by allocation algorithms to identify single allocation in any conforming way.\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle);\n\nstruct VmaMutexLock;\nstruct VmaMutexLockRead;\nstruct VmaMutexLockWrite;\n\ntemplate<typename T>\nstruct AtomicTransactionalIncrement;\n\ntemplate<typename T>\nstruct VmaStlAllocator;\n\ntemplate<typename T, typename AllocatorT>\nclass VmaVector;\n\ntemplate<typename T, typename AllocatorT, size_t N>\nclass VmaSmallVector;\n\ntemplate<typename T>\nclass VmaPoolAllocator;\n\ntemplate<typename T>\nstruct VmaListItem;\n\ntemplate<typename T>\nclass VmaRawList;\n\ntemplate<typename T, typename AllocatorT>\nclass VmaList;\n\ntemplate<typename ItemTypeTraits>\nclass VmaIntrusiveLinkedList;\n\n#if VMA_STATS_STRING_ENABLED\nclass VmaStringBuilder;\nclass VmaJsonWriter;\n#endif\n\nclass VmaDeviceMemoryBlock;\n\nstruct VmaDedicatedAllocationListItemTraits;\nclass VmaDedicatedAllocationList;\n\nstruct VmaSuballocation;\nstruct VmaSuballocationOffsetLess;\nstruct VmaSuballocationOffsetGreater;\nstruct VmaSuballocationItemSizeLess;\n\ntypedef VmaList<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> VmaSuballocationList;\n\nstruct VmaAllocationRequest;\n\nclass VmaBlockMetadata;\nclass VmaBlockMetadata_Linear;\nclass VmaBlockMetadata_TLSF;\n\nclass VmaBlockVector;\n\nstruct VmaPoolListItemTraits;\n\nstruct VmaCurrentBudgetData;\n\nclass VmaAllocationObjectAllocator;\n\n#endif // _VMA_FORWARD_DECLARATIONS\n\n\n#ifndef _VMA_FUNCTIONS\n\n/*\nReturns number of bits set to 1 in (v).\n\nOn specific platforms and compilers you can use intrinsics like:\n\nVisual Studio:\n    return __popcnt(v);\nGCC, Clang:\n    return static_cast<uint32_t>(__builtin_popcount(v));\n\nDefine macro VMA_COUNT_BITS_SET to provide your optimized implementation.\nBut you need to check in runtime whether user's CPU supports these, as some old processors don't.\n*/\nstatic inline uint32_t VmaCountBitsSet(uint32_t v)\n{\n#if VMA_CPP20\n    return std::popcount(v);\n#else\n    uint32_t c = v - ((v >> 1) & 0x55555555);\n    c = ((c >> 2) & 0x33333333) + (c & 0x33333333);\n    c = ((c >> 4) + c) & 0x0F0F0F0F;\n    c = ((c >> 8) + c) & 0x00FF00FF;\n    c = ((c >> 16) + c) & 0x0000FFFF;\n    return c;\n#endif\n}\n\nstatic inline uint8_t VmaBitScanLSB(uint64_t mask)\n{\n#if defined(_MSC_VER) && defined(_WIN64)\n    unsigned long pos;\n    if (_BitScanForward64(&pos, mask))\n        return static_cast<uint8_t>(pos);\n    return UINT8_MAX;\n#elif VMA_CPP20\n    if(mask)\n        return static_cast<uint8_t>(std::countr_zero(mask));\n    return UINT8_MAX;\n#elif defined __GNUC__ || defined __clang__\n    return static_cast<uint8_t>(__builtin_ffsll(mask)) - 1U;\n#else\n    uint8_t pos = 0;\n    uint64_t bit = 1;\n    do\n    {\n        if (mask & bit)\n            return pos;\n        bit <<= 1;\n    } while (pos++ < 63);\n    return UINT8_MAX;\n#endif\n}\n\nstatic inline uint8_t VmaBitScanLSB(uint32_t mask)\n{\n#ifdef _MSC_VER\n    unsigned long pos;\n    if (_BitScanForward(&pos, mask))\n        return static_cast<uint8_t>(pos);\n    return UINT8_MAX;\n#elif VMA_CPP20\n    if(mask)\n        return static_cast<uint8_t>(std::countr_zero(mask));\n    return UINT8_MAX;\n#elif defined __GNUC__ || defined __clang__\n    return static_cast<uint8_t>(__builtin_ffs(mask)) - 1U;\n#else\n    uint8_t pos = 0;\n    uint32_t bit = 1;\n    do\n    {\n        if (mask & bit)\n            return pos;\n        bit <<= 1;\n    } while (pos++ < 31);\n    return UINT8_MAX;\n#endif\n}\n\nstatic inline uint8_t VmaBitScanMSB(uint64_t mask)\n{\n#if defined(_MSC_VER) && defined(_WIN64)\n    unsigned long pos;\n    if (_BitScanReverse64(&pos, mask))\n        return static_cast<uint8_t>(pos);\n#elif VMA_CPP20\n    if(mask)\n        return 63 - static_cast<uint8_t>(std::countl_zero(mask));\n#elif defined __GNUC__ || defined __clang__\n    if (mask)\n        return 63 - static_cast<uint8_t>(__builtin_clzll(mask));\n#else\n    uint8_t pos = 63;\n    uint64_t bit = 1ULL << 63;\n    do\n    {\n        if (mask & bit)\n            return pos;\n        bit >>= 1;\n    } while (pos-- > 0);\n#endif\n    return UINT8_MAX;\n}\n\nstatic inline uint8_t VmaBitScanMSB(uint32_t mask)\n{\n#ifdef _MSC_VER\n    unsigned long pos;\n    if (_BitScanReverse(&pos, mask))\n        return static_cast<uint8_t>(pos);\n#elif VMA_CPP20\n    if(mask)\n        return 31 - static_cast<uint8_t>(std::countl_zero(mask));\n#elif defined __GNUC__ || defined __clang__\n    if (mask)\n        return 31 - static_cast<uint8_t>(__builtin_clz(mask));\n#else\n    uint8_t pos = 31;\n    uint32_t bit = 1UL << 31;\n    do\n    {\n        if (mask & bit)\n            return pos;\n        bit >>= 1;\n    } while (pos-- > 0);\n#endif\n    return UINT8_MAX;\n}\n\n/*\nReturns true if given number is a power of two.\nT must be unsigned integer number or signed integer but always nonnegative.\nFor 0 returns true.\n*/\ntemplate <typename T>\ninline bool VmaIsPow2(T x)\n{\n    return (x & (x - 1)) == 0;\n}\n\n// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.\n// Use types like uint32_t, uint64_t as T.\ntemplate <typename T>\nstatic inline T VmaAlignUp(T val, T alignment)\n{\n    VMA_HEAVY_ASSERT(VmaIsPow2(alignment));\n    return (val + alignment - 1) & ~(alignment - 1);\n}\n\n// Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8.\n// Use types like uint32_t, uint64_t as T.\ntemplate <typename T>\nstatic inline T VmaAlignDown(T val, T alignment)\n{\n    VMA_HEAVY_ASSERT(VmaIsPow2(alignment));\n    return val & ~(alignment - 1);\n}\n\n// Division with mathematical rounding to nearest number.\ntemplate <typename T>\nstatic inline T VmaRoundDiv(T x, T y)\n{\n    return (x + (y / (T)2)) / y;\n}\n\n// Divide by 'y' and round up to nearest integer.\ntemplate <typename T>\nstatic inline T VmaDivideRoundingUp(T x, T y)\n{\n    return (x + y - (T)1) / y;\n}\n\n// Returns smallest power of 2 greater or equal to v.\nstatic inline uint32_t VmaNextPow2(uint32_t v)\n{\n    v--;\n    v |= v >> 1;\n    v |= v >> 2;\n    v |= v >> 4;\n    v |= v >> 8;\n    v |= v >> 16;\n    v++;\n    return v;\n}\n\nstatic inline uint64_t VmaNextPow2(uint64_t v)\n{\n    v--;\n    v |= v >> 1;\n    v |= v >> 2;\n    v |= v >> 4;\n    v |= v >> 8;\n    v |= v >> 16;\n    v |= v >> 32;\n    v++;\n    return v;\n}\n\n// Returns largest power of 2 less or equal to v.\nstatic inline uint32_t VmaPrevPow2(uint32_t v)\n{\n    v |= v >> 1;\n    v |= v >> 2;\n    v |= v >> 4;\n    v |= v >> 8;\n    v |= v >> 16;\n    v = v ^ (v >> 1);\n    return v;\n}\n\nstatic inline uint64_t VmaPrevPow2(uint64_t v)\n{\n    v |= v >> 1;\n    v |= v >> 2;\n    v |= v >> 4;\n    v |= v >> 8;\n    v |= v >> 16;\n    v |= v >> 32;\n    v = v ^ (v >> 1);\n    return v;\n}\n\nstatic inline bool VmaStrIsEmpty(const char* pStr)\n{\n    return pStr == VMA_NULL || *pStr == '\\0';\n}\n\n/*\nReturns true if two memory blocks occupy overlapping pages.\nResourceA must be in less memory offset than ResourceB.\n\nAlgorithm is based on \"Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)\"\nchapter 11.6 \"Resource Memory Association\", paragraph \"Buffer-Image Granularity\".\n*/\nstatic inline bool VmaBlocksOnSamePage(\n    VkDeviceSize resourceAOffset,\n    VkDeviceSize resourceASize,\n    VkDeviceSize resourceBOffset,\n    VkDeviceSize pageSize)\n{\n    VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);\n    VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;\n    VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);\n    VkDeviceSize resourceBStart = resourceBOffset;\n    VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);\n    return resourceAEndPage == resourceBStartPage;\n}\n\n/*\nReturns true if given suballocation types could conflict and must respect\nVkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer\nor linear image and another one is optimal image. If type is unknown, behave\nconservatively.\n*/\nstatic inline bool VmaIsBufferImageGranularityConflict(\n    VmaSuballocationType suballocType1,\n    VmaSuballocationType suballocType2)\n{\n    if (suballocType1 > suballocType2)\n    {\n        std::swap(suballocType1, suballocType2);\n    }\n\n    switch (suballocType1)\n    {\n    case VMA_SUBALLOCATION_TYPE_FREE:\n        return false;\n    case VMA_SUBALLOCATION_TYPE_UNKNOWN:\n        return true;\n    case VMA_SUBALLOCATION_TYPE_BUFFER:\n        return\n            suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||\n            suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;\n    case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:\n        return\n            suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||\n            suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||\n            suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;\n    case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:\n        return\n            suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;\n    case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:\n        return false;\n    default:\n        VMA_ASSERT(0);\n        return true;\n    }\n}\n\nstatic void VmaWriteMagicValue(void* pData, VkDeviceSize offset)\n{\n#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION\n    uint32_t* pDst = (uint32_t*)((char*)pData + offset);\n    const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);\n    for (size_t i = 0; i < numberCount; ++i, ++pDst)\n    {\n        *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;\n    }\n#else\n    // no-op\n#endif\n}\n\nstatic bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)\n{\n#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION\n    const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);\n    const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);\n    for (size_t i = 0; i < numberCount; ++i, ++pSrc)\n    {\n        if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)\n        {\n            return false;\n        }\n    }\n#endif\n    return true;\n}\n\n/*\nFills structure with parameters of an example buffer to be used for transfers\nduring GPU memory defragmentation.\n*/\nstatic void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)\n{\n    memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));\n    outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;\n    outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;\n    outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.\n}\n\n\n/*\nPerforms binary search and returns iterator to first element that is greater or\nequal to (key), according to comparison (cmp).\n\nCmp should return true if first argument is less than second argument.\n\nReturned value is the found element, if present in the collection or place where\nnew element with value (key) should be inserted.\n*/\ntemplate <typename CmpLess, typename IterT, typename KeyT>\nstatic IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)\n{\n    size_t down = 0, up = size_t(end - beg);\n    while (down < up)\n    {\n        const size_t mid = down + (up - down) / 2;  // Overflow-safe midpoint calculation\n        if (cmp(*(beg + mid), key))\n        {\n            down = mid + 1;\n        }\n        else\n        {\n            up = mid;\n        }\n    }\n    return beg + down;\n}\n\ntemplate<typename CmpLess, typename IterT, typename KeyT>\nIterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)\n{\n    IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(\n        beg, end, value, cmp);\n    if (it == end ||\n        (!cmp(*it, value) && !cmp(value, *it)))\n    {\n        return it;\n    }\n    return end;\n}\n\n/*\nReturns true if all pointers in the array are not-null and unique.\nWarning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.\nT must be pointer type, e.g. VmaAllocation, VmaPool.\n*/\ntemplate<typename T>\nstatic bool VmaValidatePointerArray(uint32_t count, const T* arr)\n{\n    for (uint32_t i = 0; i < count; ++i)\n    {\n        const T iPtr = arr[i];\n        if (iPtr == VMA_NULL)\n        {\n            return false;\n        }\n        for (uint32_t j = i + 1; j < count; ++j)\n        {\n            if (iPtr == arr[j])\n            {\n                return false;\n            }\n        }\n    }\n    return true;\n}\n\ntemplate<typename MainT, typename NewT>\nstatic inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)\n{\n    newStruct->pNext = mainStruct->pNext;\n    mainStruct->pNext = newStruct;\n}\n// Finds structure with s->sType == sType in mainStruct->pNext chain.\n// Returns pointer to it. If not found, returns null.\ntemplate<typename FindT, typename MainT>\nstatic inline const FindT* VmaPnextChainFind(const MainT* mainStruct, VkStructureType sType)\n{\n    for(const VkBaseInStructure* s = (const VkBaseInStructure*)mainStruct->pNext;\n        s != VMA_NULL; s = s->pNext)\n    {\n        if(s->sType == sType)\n        {\n            return (const FindT*)s;\n        }\n    }\n    return VMA_NULL;\n}\n\n// An abstraction over buffer or image `usage` flags, depending on available extensions.\nstruct VmaBufferImageUsage\n{\n#if VMA_KHR_MAINTENANCE5\n    typedef uint64_t BaseType; // VkFlags64\n#else\n    typedef uint32_t BaseType; // VkFlags32\n#endif\n\n    static const VmaBufferImageUsage UNKNOWN;\n\n    BaseType Value;\n\n    VmaBufferImageUsage() { *this = UNKNOWN; }\n    explicit VmaBufferImageUsage(BaseType usage) : Value(usage) { }\n    VmaBufferImageUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5);\n    explicit VmaBufferImageUsage(const VkImageCreateInfo &createInfo);\n\n    bool operator==(const VmaBufferImageUsage& rhs) const { return Value == rhs.Value; }\n    bool operator!=(const VmaBufferImageUsage& rhs) const { return Value != rhs.Value; }\n\n    bool Contains(BaseType flag) const { return (Value & flag) != 0; }\n    bool ContainsDeviceAccess() const\n    {\n        // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same as VK_BUFFER_IMAGE_TRANSFER*.\n        return (Value & ~BaseType(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;\n    }\n};\n\nconst VmaBufferImageUsage VmaBufferImageUsage::UNKNOWN = VmaBufferImageUsage(0);\n\nVmaBufferImageUsage::VmaBufferImageUsage(const VkBufferCreateInfo &createInfo,\n    bool useKhrMaintenance5)\n{\n#if VMA_KHR_MAINTENANCE5\n    if(useKhrMaintenance5)\n    {\n        // If VkBufferCreateInfo::pNext chain contains VkBufferUsageFlags2CreateInfoKHR,\n        // take usage from it and ignore VkBufferCreateInfo::usage, per specification\n        // of the VK_KHR_maintenance5 extension.\n        const VkBufferUsageFlags2CreateInfoKHR* const usageFlags2 =\n            VmaPnextChainFind<VkBufferUsageFlags2CreateInfoKHR>(&createInfo, VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR);\n        if(usageFlags2)\n        {\n            this->Value = usageFlags2->usage;\n            return;\n        }\n    }\n#endif\n\n    this->Value = (BaseType)createInfo.usage;\n}\n\nVmaBufferImageUsage::VmaBufferImageUsage(const VkImageCreateInfo &createInfo)\n{\n    // Maybe in the future there will be VK_KHR_maintenanceN extension with structure\n    // VkImageUsageFlags2CreateInfoKHR, like the one for buffers...\n\n    this->Value = (BaseType)createInfo.usage;\n}\n\n// This is the main algorithm that guides the selection of a memory type best for an allocation -\n// converts usage to required/preferred/not preferred flags.\nstatic bool FindMemoryPreferences(\n    bool isIntegratedGPU,\n    const VmaAllocationCreateInfo& allocCreateInfo,\n    VmaBufferImageUsage bufImgUsage,\n    VkMemoryPropertyFlags& outRequiredFlags,\n    VkMemoryPropertyFlags& outPreferredFlags,\n    VkMemoryPropertyFlags& outNotPreferredFlags)\n{\n    outRequiredFlags = allocCreateInfo.requiredFlags;\n    outPreferredFlags = allocCreateInfo.preferredFlags;\n    outNotPreferredFlags = 0;\n\n    switch(allocCreateInfo.usage)\n    {\n    case VMA_MEMORY_USAGE_UNKNOWN:\n        break;\n    case VMA_MEMORY_USAGE_GPU_ONLY:\n        if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)\n        {\n            outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;\n        }\n        break;\n    case VMA_MEMORY_USAGE_CPU_ONLY:\n        outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;\n        break;\n    case VMA_MEMORY_USAGE_CPU_TO_GPU:\n        outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;\n        if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)\n        {\n            outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;\n        }\n        break;\n    case VMA_MEMORY_USAGE_GPU_TO_CPU:\n        outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;\n        outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;\n        break;\n    case VMA_MEMORY_USAGE_CPU_COPY:\n        outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;\n        break;\n    case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED:\n        outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;\n        break;\n    case VMA_MEMORY_USAGE_AUTO:\n    case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE:\n    case VMA_MEMORY_USAGE_AUTO_PREFER_HOST:\n    {\n        if(bufImgUsage == VmaBufferImageUsage::UNKNOWN)\n        {\n            VMA_ASSERT(0 && \"VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known.\"\n                \" Maybe you use VkBufferUsageFlags2CreateInfoKHR but forgot to use VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT?\" );\n            return false;\n        }\n\n        const bool deviceAccess = bufImgUsage.ContainsDeviceAccess();\n        const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0;\n        const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0;\n        const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0;\n        const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;\n        const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST;\n\n        // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU.\n        if(hostAccessRandom)\n        {\n            // Prefer cached. Cannot require it, because some platforms don't have it (e.g. Raspberry Pi - see #362)!\n            outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;\n\n            if (!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)\n            {\n                // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL.\n                // Omitting HOST_VISIBLE here is intentional.\n                // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one.\n                // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list.\n                outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;\n            }\n            else\n            {\n                // Always CPU memory.\n                outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;\n            }\n        }\n        // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined.\n        else if(hostAccessSequentialWrite)\n        {\n            // Want uncached and write-combined.\n            outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;\n\n            if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)\n            {\n                outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;\n            }\n            else\n            {\n                outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;\n                // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame)\n                if(deviceAccess)\n                {\n                    // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory.\n                    if(preferHost)\n                        outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;\n                    else\n                        outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;\n                }\n                // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU)\n                else\n                {\n                    // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory.\n                    if(preferDevice)\n                        outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;\n                    else\n                        outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;\n                }\n            }\n        }\n        // No CPU access\n        else\n        {\n            // if(deviceAccess)\n            //\n            // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory,\n            // unless there is a clear preference from the user not to do so.\n            //\n            // else:\n            //\n            // No direct GPU access, no CPU access, just transfers.\n            // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or\n            // a \"swap file\" copy to free some GPU memory (then better CPU memory).\n            // Up to the user to decide. If no preferece, assume the former and choose GPU memory.\n\n            if(preferHost)\n                outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;\n            else\n                outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;\n        }\n        break;\n    }\n    default:\n        VMA_ASSERT(0);\n    }\n\n    // Avoid DEVICE_COHERENT unless explicitly requested.\n    if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) &\n        (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)\n    {\n        outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY;\n    }\n\n    return true;\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Memory allocation\n\nstatic void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)\n{\n    void* result = VMA_NULL;\n    if ((pAllocationCallbacks != VMA_NULL) &&\n        (pAllocationCallbacks->pfnAllocation != VMA_NULL))\n    {\n        result = (*pAllocationCallbacks->pfnAllocation)(\n            pAllocationCallbacks->pUserData,\n            size,\n            alignment,\n            VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);\n    }\n    else\n    {\n        result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);\n    }\n    VMA_ASSERT(result != VMA_NULL && \"CPU memory allocation failed.\");\n    return result;\n}\n\nstatic void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)\n{\n    if ((pAllocationCallbacks != VMA_NULL) &&\n        (pAllocationCallbacks->pfnFree != VMA_NULL))\n    {\n        (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);\n    }\n    else\n    {\n        VMA_SYSTEM_ALIGNED_FREE(ptr);\n    }\n}\n\ntemplate<typename T>\nstatic T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)\n{\n    return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));\n}\n\ntemplate<typename T>\nstatic T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)\n{\n    return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));\n}\n\n#define vma_new(allocator, type)   new(VmaAllocate<type>(allocator))(type)\n\n#define vma_new_array(allocator, type, count)   new(VmaAllocateArray<type>((allocator), (count)))(type)\n\ntemplate<typename T>\nstatic void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)\n{\n    ptr->~T();\n    VmaFree(pAllocationCallbacks, ptr);\n}\n\ntemplate<typename T>\nstatic void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)\n{\n    if (ptr != VMA_NULL)\n    {\n        for (size_t i = count; i--; )\n        {\n            ptr[i].~T();\n        }\n        VmaFree(pAllocationCallbacks, ptr);\n    }\n}\n\nstatic char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)\n{\n    if (srcStr != VMA_NULL)\n    {\n        const size_t len = strlen(srcStr);\n        char* const result = vma_new_array(allocs, char, len + 1);\n        memcpy(result, srcStr, len + 1);\n        return result;\n    }\n    return VMA_NULL;\n}\n\n#if VMA_STATS_STRING_ENABLED\nstatic char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)\n{\n    if (srcStr != VMA_NULL)\n    {\n        char* const result = vma_new_array(allocs, char, strLen + 1);\n        memcpy(result, srcStr, strLen);\n        result[strLen] = '\\0';\n        return result;\n    }\n    return VMA_NULL;\n}\n#endif // VMA_STATS_STRING_ENABLED\n\nstatic void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)\n{\n    if (str != VMA_NULL)\n    {\n        const size_t len = strlen(str);\n        vma_delete_array(allocs, str, len + 1);\n    }\n}\n\ntemplate<typename CmpLess, typename VectorT>\nsize_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)\n{\n    const size_t indexToInsert = VmaBinaryFindFirstNotLess(\n        vector.data(),\n        vector.data() + vector.size(),\n        value,\n        CmpLess()) - vector.data();\n    VmaVectorInsert(vector, indexToInsert, value);\n    return indexToInsert;\n}\n\ntemplate<typename CmpLess, typename VectorT>\nbool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)\n{\n    CmpLess comparator;\n    typename VectorT::iterator it = VmaBinaryFindFirstNotLess(\n        vector.begin(),\n        vector.end(),\n        value,\n        comparator);\n    if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))\n    {\n        size_t indexToRemove = it - vector.begin();\n        VmaVectorRemove(vector, indexToRemove);\n        return true;\n    }\n    return false;\n}\n#endif // _VMA_FUNCTIONS\n\n#ifndef _VMA_STATISTICS_FUNCTIONS\n\nstatic void VmaClearStatistics(VmaStatistics& outStats)\n{\n    outStats.blockCount = 0;\n    outStats.allocationCount = 0;\n    outStats.blockBytes = 0;\n    outStats.allocationBytes = 0;\n}\n\nstatic void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)\n{\n    inoutStats.blockCount += src.blockCount;\n    inoutStats.allocationCount += src.allocationCount;\n    inoutStats.blockBytes += src.blockBytes;\n    inoutStats.allocationBytes += src.allocationBytes;\n}\n\nstatic void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)\n{\n    VmaClearStatistics(outStats.statistics);\n    outStats.unusedRangeCount = 0;\n    outStats.allocationSizeMin = VK_WHOLE_SIZE;\n    outStats.allocationSizeMax = 0;\n    outStats.unusedRangeSizeMin = VK_WHOLE_SIZE;\n    outStats.unusedRangeSizeMax = 0;\n}\n\nstatic void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)\n{\n    inoutStats.statistics.allocationCount++;\n    inoutStats.statistics.allocationBytes += size;\n    inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size);\n    inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size);\n}\n\nstatic void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)\n{\n    inoutStats.unusedRangeCount++;\n    inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size);\n    inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size);\n}\n\nstatic void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)\n{\n    VmaAddStatistics(inoutStats.statistics, src.statistics);\n    inoutStats.unusedRangeCount += src.unusedRangeCount;\n    inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin);\n    inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax);\n    inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin);\n    inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax);\n}\n\n#endif // _VMA_STATISTICS_FUNCTIONS\n\n#ifndef _VMA_MUTEX_LOCK\n// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).\nstruct VmaMutexLock\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLock)\npublic:\n    VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :\n        m_pMutex(useMutex ? &mutex : VMA_NULL)\n    {\n        if (m_pMutex) { m_pMutex->Lock(); }\n    }\n    ~VmaMutexLock() {  if (m_pMutex) { m_pMutex->Unlock(); } }\n\nprivate:\n    VMA_MUTEX* m_pMutex;\n};\n\n// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.\nstruct VmaMutexLockRead\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockRead)\npublic:\n    VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :\n        m_pMutex(useMutex ? &mutex : VMA_NULL)\n    {\n        if (m_pMutex) { m_pMutex->LockRead(); }\n    }\n    ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } }\n\nprivate:\n    VMA_RW_MUTEX* m_pMutex;\n};\n\n// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.\nstruct VmaMutexLockWrite\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockWrite)\npublic:\n    VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex)\n        : m_pMutex(useMutex ? &mutex : VMA_NULL)\n    {\n        if (m_pMutex) { m_pMutex->LockWrite(); }\n    }\n    ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } }\n\nprivate:\n    VMA_RW_MUTEX* m_pMutex;\n};\n\n#if VMA_DEBUG_GLOBAL_MUTEX\n    static VMA_MUTEX gDebugGlobalMutex;\n    #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);\n#else\n    #define VMA_DEBUG_GLOBAL_MUTEX_LOCK\n#endif\n#endif // _VMA_MUTEX_LOCK\n\n#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT\n// An object that increments given atomic but decrements it back in the destructor unless Commit() is called.\ntemplate<typename AtomicT>\nstruct AtomicTransactionalIncrement\n{\npublic:\n    using T = decltype(AtomicT().load());\n\n    ~AtomicTransactionalIncrement()\n    {\n        if(m_Atomic)\n            --(*m_Atomic);\n    }\n\n    void Commit() { m_Atomic = VMA_NULL; }\n    T Increment(AtomicT* atomic)\n    {\n        m_Atomic = atomic;\n        return m_Atomic->fetch_add(1);\n    }\n\nprivate:\n    AtomicT* m_Atomic = VMA_NULL;\n};\n#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT\n\n#ifndef _VMA_STL_ALLOCATOR\n// STL-compatible allocator.\ntemplate<typename T>\nstruct VmaStlAllocator\n{\n    const VkAllocationCallbacks* const m_pCallbacks;\n    typedef T value_type;\n\n    VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {}\n    template<typename U>\n    VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) {}\n    VmaStlAllocator(const VmaStlAllocator&) = default;\n    VmaStlAllocator& operator=(const VmaStlAllocator&) = delete;\n\n    T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }\n    void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }\n\n    template<typename U>\n    bool operator==(const VmaStlAllocator<U>& rhs) const\n    {\n        return m_pCallbacks == rhs.m_pCallbacks;\n    }\n    template<typename U>\n    bool operator!=(const VmaStlAllocator<U>& rhs) const\n    {\n        return m_pCallbacks != rhs.m_pCallbacks;\n    }\n};\n#endif // _VMA_STL_ALLOCATOR\n\n#ifndef _VMA_VECTOR\n/* Class with interface compatible with subset of std::vector.\nT must be POD because constructors and destructors are not called and memcpy is\nused for these objects. */\ntemplate<typename T, typename AllocatorT>\nclass VmaVector\n{\npublic:\n    typedef T value_type;\n    typedef T* iterator;\n    typedef const T* const_iterator;\n\n    VmaVector(const AllocatorT& allocator);\n    VmaVector(size_t count, const AllocatorT& allocator);\n    // This version of the constructor is here for compatibility with pre-C++14 std::vector.\n    // value is unused.\n    VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {}\n    VmaVector(const VmaVector<T, AllocatorT>& src);\n    VmaVector& operator=(const VmaVector& rhs);\n    ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); }\n\n    bool empty() const { return m_Count == 0; }\n    size_t size() const { return m_Count; }\n    T* data() { return m_pArray; }\n    T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }\n    T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }\n    const T* data() const { return m_pArray; }\n    const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }\n    const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }\n\n    iterator begin() { return m_pArray; }\n    iterator end() { return m_pArray + m_Count; }\n    const_iterator cbegin() const { return m_pArray; }\n    const_iterator cend() const { return m_pArray + m_Count; }\n    const_iterator begin() const { return cbegin(); }\n    const_iterator end() const { return cend(); }\n\n    void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }\n    void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }\n    void push_front(const T& src) { insert(0, src); }\n\n    void push_back(const T& src);\n    void reserve(size_t newCapacity, bool freeMemory = false);\n    void resize(size_t newCount);\n    void clear() { resize(0); }\n    void shrink_to_fit();\n    void insert(size_t index, const T& src);\n    void remove(size_t index);\n\n    T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }\n    const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }\n\nprivate:\n    AllocatorT m_Allocator;\n    T* m_pArray;\n    size_t m_Count;\n    size_t m_Capacity;\n};\n\n#ifndef _VMA_VECTOR_FUNCTIONS\ntemplate<typename T, typename AllocatorT>\nVmaVector<T, AllocatorT>::VmaVector(const AllocatorT& allocator)\n    : m_Allocator(allocator),\n    m_pArray(VMA_NULL),\n    m_Count(0),\n    m_Capacity(0) {}\n\ntemplate<typename T, typename AllocatorT>\nVmaVector<T, AllocatorT>::VmaVector(size_t count, const AllocatorT& allocator)\n    : m_Allocator(allocator),\n    m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),\n    m_Count(count),\n    m_Capacity(count) {}\n\ntemplate<typename T, typename AllocatorT>\nVmaVector<T, AllocatorT>::VmaVector(const VmaVector& src)\n    : m_Allocator(src.m_Allocator),\n    m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),\n    m_Count(src.m_Count),\n    m_Capacity(src.m_Count)\n{\n    if (m_Count != 0)\n    {\n        memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));\n    }\n}\n\ntemplate<typename T, typename AllocatorT>\nVmaVector<T, AllocatorT>& VmaVector<T, AllocatorT>::operator=(const VmaVector& rhs)\n{\n    if (&rhs != this)\n    {\n        resize(rhs.m_Count);\n        if (m_Count != 0)\n        {\n            memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));\n        }\n    }\n    return *this;\n}\n\ntemplate<typename T, typename AllocatorT>\nvoid VmaVector<T, AllocatorT>::push_back(const T& src)\n{\n    const size_t newIndex = size();\n    resize(newIndex + 1);\n    m_pArray[newIndex] = src;\n}\n\ntemplate<typename T, typename AllocatorT>\nvoid VmaVector<T, AllocatorT>::reserve(size_t newCapacity, bool freeMemory)\n{\n    newCapacity = VMA_MAX(newCapacity, m_Count);\n\n    if ((newCapacity < m_Capacity) && !freeMemory)\n    {\n        newCapacity = m_Capacity;\n    }\n\n    if (newCapacity != m_Capacity)\n    {\n        T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;\n        if (m_Count != 0)\n        {\n            memcpy(newArray, m_pArray, m_Count * sizeof(T));\n        }\n        VmaFree(m_Allocator.m_pCallbacks, m_pArray);\n        m_Capacity = newCapacity;\n        m_pArray = newArray;\n    }\n}\n\ntemplate<typename T, typename AllocatorT>\nvoid VmaVector<T, AllocatorT>::resize(size_t newCount)\n{\n    size_t newCapacity = m_Capacity;\n    if (newCount > m_Capacity)\n    {\n        newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));\n    }\n\n    if (newCapacity != m_Capacity)\n    {\n        T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;\n        const size_t elementsToCopy = VMA_MIN(m_Count, newCount);\n        if (elementsToCopy != 0)\n        {\n            memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));\n        }\n        VmaFree(m_Allocator.m_pCallbacks, m_pArray);\n        m_Capacity = newCapacity;\n        m_pArray = newArray;\n    }\n\n    m_Count = newCount;\n}\n\ntemplate<typename T, typename AllocatorT>\nvoid VmaVector<T, AllocatorT>::shrink_to_fit()\n{\n    if (m_Capacity > m_Count)\n    {\n        T* newArray = VMA_NULL;\n        if (m_Count > 0)\n        {\n            newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);\n            memcpy(newArray, m_pArray, m_Count * sizeof(T));\n        }\n        VmaFree(m_Allocator.m_pCallbacks, m_pArray);\n        m_Capacity = m_Count;\n        m_pArray = newArray;\n    }\n}\n\ntemplate<typename T, typename AllocatorT>\nvoid VmaVector<T, AllocatorT>::insert(size_t index, const T& src)\n{\n    VMA_HEAVY_ASSERT(index <= m_Count);\n    const size_t oldCount = size();\n    resize(oldCount + 1);\n    if (index < oldCount)\n    {\n        memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));\n    }\n    m_pArray[index] = src;\n}\n\ntemplate<typename T, typename AllocatorT>\nvoid VmaVector<T, AllocatorT>::remove(size_t index)\n{\n    VMA_HEAVY_ASSERT(index < m_Count);\n    const size_t oldCount = size();\n    if (index < oldCount - 1)\n    {\n        memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));\n    }\n    resize(oldCount - 1);\n}\n#endif // _VMA_VECTOR_FUNCTIONS\n\ntemplate<typename T, typename allocatorT>\nstatic void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)\n{\n    vec.insert(index, item);\n}\n\ntemplate<typename T, typename allocatorT>\nstatic void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)\n{\n    vec.remove(index);\n}\n#endif // _VMA_VECTOR\n\n#ifndef _VMA_SMALL_VECTOR\n/*\nThis is a vector (a variable-sized array), optimized for the case when the array is small.\n\nIt contains some number of elements in-place, which allows it to avoid heap allocation\nwhen the actual number of elements is below that threshold. This allows normal \"small\"\ncases to be fast without losing generality for large inputs.\n*/\ntemplate<typename T, typename AllocatorT, size_t N>\nclass VmaSmallVector\n{\npublic:\n    typedef T value_type;\n    typedef T* iterator;\n\n    VmaSmallVector(const AllocatorT& allocator);\n    VmaSmallVector(size_t count, const AllocatorT& allocator);\n    template<typename SrcT, typename SrcAllocatorT, size_t SrcN>\n    VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;\n    template<typename SrcT, typename SrcAllocatorT, size_t SrcN>\n    VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;\n    ~VmaSmallVector() = default;\n\n    bool empty() const { return m_Count == 0; }\n    size_t size() const { return m_Count; }\n    T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }\n    T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }\n    T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }\n    const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }\n    const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }\n    const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }\n\n    iterator begin() { return data(); }\n    iterator end() { return data() + m_Count; }\n\n    void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }\n    void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }\n    void push_front(const T& src) { insert(0, src); }\n\n    void push_back(const T& src);\n    void resize(size_t newCount, bool freeMemory = false);\n    void clear(bool freeMemory = false);\n    void insert(size_t index, const T& src);\n    void remove(size_t index);\n\n    T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }\n    const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }\n\nprivate:\n    size_t m_Count;\n    T m_StaticArray[N]; // Used when m_Size <= N\n    VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N\n};\n\n#ifndef _VMA_SMALL_VECTOR_FUNCTIONS\ntemplate<typename T, typename AllocatorT, size_t N>\nVmaSmallVector<T, AllocatorT, N>::VmaSmallVector(const AllocatorT& allocator)\n    : m_Count(0),\n    m_DynamicArray(allocator) {}\n\ntemplate<typename T, typename AllocatorT, size_t N>\nVmaSmallVector<T, AllocatorT, N>::VmaSmallVector(size_t count, const AllocatorT& allocator)\n    : m_Count(count),\n    m_DynamicArray(count > N ? count : 0, allocator) {}\n\ntemplate<typename T, typename AllocatorT, size_t N>\nvoid VmaSmallVector<T, AllocatorT, N>::push_back(const T& src)\n{\n    const size_t newIndex = size();\n    resize(newIndex + 1);\n    data()[newIndex] = src;\n}\n\ntemplate<typename T, typename AllocatorT, size_t N>\nvoid VmaSmallVector<T, AllocatorT, N>::resize(size_t newCount, bool freeMemory)\n{\n    if (newCount > N && m_Count > N)\n    {\n        // Any direction, staying in m_DynamicArray\n        m_DynamicArray.resize(newCount);\n        if (freeMemory)\n        {\n            m_DynamicArray.shrink_to_fit();\n        }\n    }\n    else if (newCount > N && m_Count <= N)\n    {\n        // Growing, moving from m_StaticArray to m_DynamicArray\n        m_DynamicArray.resize(newCount);\n        if (m_Count > 0)\n        {\n            memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));\n        }\n    }\n    else if (newCount <= N && m_Count > N)\n    {\n        // Shrinking, moving from m_DynamicArray to m_StaticArray\n        if (newCount > 0)\n        {\n            memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));\n        }\n        m_DynamicArray.resize(0);\n        if (freeMemory)\n        {\n            m_DynamicArray.shrink_to_fit();\n        }\n    }\n    else\n    {\n        // Any direction, staying in m_StaticArray - nothing to do here\n    }\n    m_Count = newCount;\n}\n\ntemplate<typename T, typename AllocatorT, size_t N>\nvoid VmaSmallVector<T, AllocatorT, N>::clear(bool freeMemory)\n{\n    m_DynamicArray.clear();\n    if (freeMemory)\n    {\n        m_DynamicArray.shrink_to_fit();\n    }\n    m_Count = 0;\n}\n\ntemplate<typename T, typename AllocatorT, size_t N>\nvoid VmaSmallVector<T, AllocatorT, N>::insert(size_t index, const T& src)\n{\n    VMA_HEAVY_ASSERT(index <= m_Count);\n    const size_t oldCount = size();\n    resize(oldCount + 1);\n    T* const dataPtr = data();\n    if (index < oldCount)\n    {\n        //  I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.\n        memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));\n    }\n    dataPtr[index] = src;\n}\n\ntemplate<typename T, typename AllocatorT, size_t N>\nvoid VmaSmallVector<T, AllocatorT, N>::remove(size_t index)\n{\n    VMA_HEAVY_ASSERT(index < m_Count);\n    const size_t oldCount = size();\n    if (index < oldCount - 1)\n    {\n        //  I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.\n        T* const dataPtr = data();\n        memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));\n    }\n    resize(oldCount - 1);\n}\n#endif // _VMA_SMALL_VECTOR_FUNCTIONS\n#endif // _VMA_SMALL_VECTOR\n\n#ifndef _VMA_POOL_ALLOCATOR\n/*\nAllocator for objects of type T using a list of arrays (pools) to speed up\nallocation. Number of elements that can be allocated is not bounded because\nallocator can create multiple blocks.\n*/\ntemplate<typename T>\nclass VmaPoolAllocator\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaPoolAllocator)\npublic:\n    VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);\n    ~VmaPoolAllocator();\n    template<typename... Types> T* Alloc(Types&&... args);\n    void Free(T* ptr);\n\nprivate:\n    union Item\n    {\n        uint32_t NextFreeIndex;\n        alignas(T) char Value[sizeof(T)];\n    };\n    struct ItemBlock\n    {\n        Item* pItems;\n        uint32_t Capacity;\n        uint32_t FirstFreeIndex;\n    };\n\n    const VkAllocationCallbacks* m_pAllocationCallbacks;\n    const uint32_t m_FirstBlockCapacity;\n    VmaVector<ItemBlock, VmaStlAllocator<ItemBlock>> m_ItemBlocks;\n\n    ItemBlock& CreateNewBlock();\n};\n\n#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS\ntemplate<typename T>\nVmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity)\n    : m_pAllocationCallbacks(pAllocationCallbacks),\n    m_FirstBlockCapacity(firstBlockCapacity),\n    m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))\n{\n    VMA_ASSERT(m_FirstBlockCapacity > 1);\n}\n\ntemplate<typename T>\nVmaPoolAllocator<T>::~VmaPoolAllocator()\n{\n    for (size_t i = m_ItemBlocks.size(); i--;)\n        vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);\n    m_ItemBlocks.clear();\n}\n\ntemplate<typename T>\ntemplate<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types&&... args)\n{\n    for (size_t i = m_ItemBlocks.size(); i--; )\n    {\n        ItemBlock& block = m_ItemBlocks[i];\n        // This block has some free items: Use first one.\n        if (block.FirstFreeIndex != UINT32_MAX)\n        {\n            Item* const pItem = &block.pItems[block.FirstFreeIndex];\n            block.FirstFreeIndex = pItem->NextFreeIndex;\n            T* result = (T*)&pItem->Value;\n            new(result)T(std::forward<Types>(args)...); // Explicit constructor call.\n            return result;\n        }\n    }\n\n    // No block has free item: Create new one and use it.\n    ItemBlock& newBlock = CreateNewBlock();\n    Item* const pItem = &newBlock.pItems[0];\n    newBlock.FirstFreeIndex = pItem->NextFreeIndex;\n    T* result = (T*)&pItem->Value;\n    new(result) T(std::forward<Types>(args)...); // Explicit constructor call.\n    return result;\n}\n\ntemplate<typename T>\nvoid VmaPoolAllocator<T>::Free(T* ptr)\n{\n    // Search all memory blocks to find ptr.\n    for (size_t i = m_ItemBlocks.size(); i--; )\n    {\n        ItemBlock& block = m_ItemBlocks[i];\n\n        // Casting to union.\n        Item* pItemPtr;\n        memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));\n\n        // Check if pItemPtr is in address range of this block.\n        if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))\n        {\n            ptr->~T(); // Explicit destructor call.\n            const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);\n            pItemPtr->NextFreeIndex = block.FirstFreeIndex;\n            block.FirstFreeIndex = index;\n            return;\n        }\n    }\n    VMA_ASSERT(0 && \"Pointer doesn't belong to this memory pool.\");\n}\n\ntemplate<typename T>\ntypename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()\n{\n    const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?\n        m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;\n\n    const ItemBlock newBlock =\n    {\n        vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),\n        newBlockCapacity,\n        0\n    };\n\n    m_ItemBlocks.push_back(newBlock);\n\n    // Setup singly-linked list of all free items in this block.\n    for (uint32_t i = 0; i < newBlockCapacity - 1; ++i)\n        newBlock.pItems[i].NextFreeIndex = i + 1;\n    newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;\n    return m_ItemBlocks.back();\n}\n#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS\n#endif // _VMA_POOL_ALLOCATOR\n\n#ifndef _VMA_RAW_LIST\ntemplate<typename T>\nstruct VmaListItem\n{\n    VmaListItem* pPrev;\n    VmaListItem* pNext;\n    T Value;\n};\n\n// Doubly linked list.\ntemplate<typename T>\nclass VmaRawList\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaRawList)\npublic:\n    typedef VmaListItem<T> ItemType;\n\n    VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);\n    // Intentionally not calling Clear, because that would be unnecessary\n    // computations to return all items to m_ItemAllocator as free.\n    ~VmaRawList() = default;\n\n    size_t GetCount() const { return m_Count; }\n    bool IsEmpty() const { return m_Count == 0; }\n\n    ItemType* Front() { return m_pFront; }\n    ItemType* Back() { return m_pBack; }\n    const ItemType* Front() const { return m_pFront; }\n    const ItemType* Back() const { return m_pBack; }\n\n    ItemType* PushFront();\n    ItemType* PushBack();\n    ItemType* PushFront(const T& value);\n    ItemType* PushBack(const T& value);\n    void PopFront();\n    void PopBack();\n\n    // Item can be null - it means PushBack.\n    ItemType* InsertBefore(ItemType* pItem);\n    // Item can be null - it means PushFront.\n    ItemType* InsertAfter(ItemType* pItem);\n    ItemType* InsertBefore(ItemType* pItem, const T& value);\n    ItemType* InsertAfter(ItemType* pItem, const T& value);\n\n    void Clear();\n    void Remove(ItemType* pItem);\n\nprivate:\n    const VkAllocationCallbacks* const m_pAllocationCallbacks;\n    VmaPoolAllocator<ItemType> m_ItemAllocator;\n    ItemType* m_pFront;\n    ItemType* m_pBack;\n    size_t m_Count;\n};\n\n#ifndef _VMA_RAW_LIST_FUNCTIONS\ntemplate<typename T>\nVmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks)\n    : m_pAllocationCallbacks(pAllocationCallbacks),\n    m_ItemAllocator(pAllocationCallbacks, 128),\n    m_pFront(VMA_NULL),\n    m_pBack(VMA_NULL),\n    m_Count(0) {}\n\ntemplate<typename T>\nVmaListItem<T>* VmaRawList<T>::PushFront()\n{\n    ItemType* const pNewItem = m_ItemAllocator.Alloc();\n    pNewItem->pPrev = VMA_NULL;\n    if (IsEmpty())\n    {\n        pNewItem->pNext = VMA_NULL;\n        m_pFront = pNewItem;\n        m_pBack = pNewItem;\n        m_Count = 1;\n    }\n    else\n    {\n        pNewItem->pNext = m_pFront;\n        m_pFront->pPrev = pNewItem;\n        m_pFront = pNewItem;\n        ++m_Count;\n    }\n    return pNewItem;\n}\n\ntemplate<typename T>\nVmaListItem<T>* VmaRawList<T>::PushBack()\n{\n    ItemType* const pNewItem = m_ItemAllocator.Alloc();\n    pNewItem->pNext = VMA_NULL;\n    if(IsEmpty())\n    {\n        pNewItem->pPrev = VMA_NULL;\n        m_pFront = pNewItem;\n        m_pBack = pNewItem;\n        m_Count = 1;\n    }\n    else\n    {\n        pNewItem->pPrev = m_pBack;\n        m_pBack->pNext = pNewItem;\n        m_pBack = pNewItem;\n        ++m_Count;\n    }\n    return pNewItem;\n}\n\ntemplate<typename T>\nVmaListItem<T>* VmaRawList<T>::PushFront(const T& value)\n{\n    ItemType* const pNewItem = PushFront();\n    pNewItem->Value = value;\n    return pNewItem;\n}\n\ntemplate<typename T>\nVmaListItem<T>* VmaRawList<T>::PushBack(const T& value)\n{\n    ItemType* const pNewItem = PushBack();\n    pNewItem->Value = value;\n    return pNewItem;\n}\n\ntemplate<typename T>\nvoid VmaRawList<T>::PopFront()\n{\n    VMA_HEAVY_ASSERT(m_Count > 0);\n    ItemType* const pFrontItem = m_pFront;\n    ItemType* const pNextItem = pFrontItem->pNext;\n    if (pNextItem != VMA_NULL)\n    {\n        pNextItem->pPrev = VMA_NULL;\n    }\n    m_pFront = pNextItem;\n    m_ItemAllocator.Free(pFrontItem);\n    --m_Count;\n}\n\ntemplate<typename T>\nvoid VmaRawList<T>::PopBack()\n{\n    VMA_HEAVY_ASSERT(m_Count > 0);\n    ItemType* const pBackItem = m_pBack;\n    ItemType* const pPrevItem = pBackItem->pPrev;\n    if(pPrevItem != VMA_NULL)\n    {\n        pPrevItem->pNext = VMA_NULL;\n    }\n    m_pBack = pPrevItem;\n    m_ItemAllocator.Free(pBackItem);\n    --m_Count;\n}\n\ntemplate<typename T>\nvoid VmaRawList<T>::Clear()\n{\n    if (IsEmpty() == false)\n    {\n        ItemType* pItem = m_pBack;\n        while (pItem != VMA_NULL)\n        {\n            ItemType* const pPrevItem = pItem->pPrev;\n            m_ItemAllocator.Free(pItem);\n            pItem = pPrevItem;\n        }\n        m_pFront = VMA_NULL;\n        m_pBack = VMA_NULL;\n        m_Count = 0;\n    }\n}\n\ntemplate<typename T>\nvoid VmaRawList<T>::Remove(ItemType* pItem)\n{\n    VMA_HEAVY_ASSERT(pItem != VMA_NULL);\n    VMA_HEAVY_ASSERT(m_Count > 0);\n\n    if(pItem->pPrev != VMA_NULL)\n    {\n        pItem->pPrev->pNext = pItem->pNext;\n    }\n    else\n    {\n        VMA_HEAVY_ASSERT(m_pFront == pItem);\n        m_pFront = pItem->pNext;\n    }\n\n    if(pItem->pNext != VMA_NULL)\n    {\n        pItem->pNext->pPrev = pItem->pPrev;\n    }\n    else\n    {\n        VMA_HEAVY_ASSERT(m_pBack == pItem);\n        m_pBack = pItem->pPrev;\n    }\n\n    m_ItemAllocator.Free(pItem);\n    --m_Count;\n}\n\ntemplate<typename T>\nVmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)\n{\n    if(pItem != VMA_NULL)\n    {\n        ItemType* const prevItem = pItem->pPrev;\n        ItemType* const newItem = m_ItemAllocator.Alloc();\n        newItem->pPrev = prevItem;\n        newItem->pNext = pItem;\n        pItem->pPrev = newItem;\n        if(prevItem != VMA_NULL)\n        {\n            prevItem->pNext = newItem;\n        }\n        else\n        {\n            VMA_HEAVY_ASSERT(m_pFront == pItem);\n            m_pFront = newItem;\n        }\n        ++m_Count;\n        return newItem;\n    }\n    else\n        return PushBack();\n}\n\ntemplate<typename T>\nVmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)\n{\n    if(pItem != VMA_NULL)\n    {\n        ItemType* const nextItem = pItem->pNext;\n        ItemType* const newItem = m_ItemAllocator.Alloc();\n        newItem->pNext = nextItem;\n        newItem->pPrev = pItem;\n        pItem->pNext = newItem;\n        if(nextItem != VMA_NULL)\n        {\n            nextItem->pPrev = newItem;\n        }\n        else\n        {\n            VMA_HEAVY_ASSERT(m_pBack == pItem);\n            m_pBack = newItem;\n        }\n        ++m_Count;\n        return newItem;\n    }\n    else\n        return PushFront();\n}\n\ntemplate<typename T>\nVmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)\n{\n    ItemType* const newItem = InsertBefore(pItem);\n    newItem->Value = value;\n    return newItem;\n}\n\ntemplate<typename T>\nVmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)\n{\n    ItemType* const newItem = InsertAfter(pItem);\n    newItem->Value = value;\n    return newItem;\n}\n#endif // _VMA_RAW_LIST_FUNCTIONS\n#endif // _VMA_RAW_LIST\n\n#ifndef _VMA_LIST\ntemplate<typename T, typename AllocatorT>\nclass VmaList\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaList)\npublic:\n    class reverse_iterator;\n    class const_iterator;\n    class const_reverse_iterator;\n\n    class iterator\n    {\n        friend class const_iterator;\n        friend class VmaList<T, AllocatorT>;\n    public:\n        iterator() :  m_pList(VMA_NULL), m_pItem(VMA_NULL) {}\n        iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}\n\n        T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }\n        T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }\n\n        bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }\n        bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }\n\n        iterator operator++(int) { iterator result = *this; ++*this; return result; }\n        iterator operator--(int) { iterator result = *this; --*this; return result; }\n\n        iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }\n        iterator& operator--();\n\n    private:\n        VmaRawList<T>* m_pList;\n        VmaListItem<T>* m_pItem;\n\n        iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList),  m_pItem(pItem) {}\n    };\n    class reverse_iterator\n    {\n        friend class const_reverse_iterator;\n        friend class VmaList<T, AllocatorT>;\n    public:\n        reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}\n        reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}\n\n        T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }\n        T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }\n\n        bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }\n        bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }\n\n        reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; }\n        reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; }\n\n        reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }\n        reverse_iterator& operator--();\n\n    private:\n        VmaRawList<T>* m_pList;\n        VmaListItem<T>* m_pItem;\n\n        reverse_iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList),  m_pItem(pItem) {}\n    };\n    class const_iterator\n    {\n        friend class VmaList<T, AllocatorT>;\n    public:\n        const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}\n        const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}\n        const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}\n\n        iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }\n\n        const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }\n        const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }\n\n        bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }\n        bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }\n\n        const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; }\n        const_iterator operator--(int) { const_iterator result = *this; --* this; return result; }\n\n        const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }\n        const_iterator& operator--();\n\n    private:\n        const VmaRawList<T>* m_pList;\n        const VmaListItem<T>* m_pItem;\n\n        const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}\n    };\n    class const_reverse_iterator\n    {\n        friend class VmaList<T, AllocatorT>;\n    public:\n        const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}\n        const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}\n        const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}\n\n        reverse_iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }\n\n        const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }\n        const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }\n\n        bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }\n        bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }\n\n        const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; }\n        const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; }\n\n        const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }\n        const_reverse_iterator& operator--();\n\n    private:\n        const VmaRawList<T>* m_pList;\n        const VmaListItem<T>* m_pItem;\n\n        const_reverse_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}\n    };\n\n    VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {}\n\n    bool empty() const { return m_RawList.IsEmpty(); }\n    size_t size() const { return m_RawList.GetCount(); }\n\n    iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }\n    iterator end() { return iterator(&m_RawList, VMA_NULL); }\n\n    const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }\n    const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }\n\n    const_iterator begin() const { return cbegin(); }\n    const_iterator end() const { return cend(); }\n\n    reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); }\n    reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); }\n\n    const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); }\n    const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); }\n\n    const_reverse_iterator rbegin() const { return crbegin(); }\n    const_reverse_iterator rend() const { return crend(); }\n\n    void push_back(const T& value) { m_RawList.PushBack(value); }\n    iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }\n\n    void clear() { m_RawList.Clear(); }\n    void erase(iterator it) { m_RawList.Remove(it.m_pItem); }\n\nprivate:\n    VmaRawList<T> m_RawList;\n};\n\n#ifndef _VMA_LIST_FUNCTIONS\ntemplate<typename T, typename AllocatorT>\ntypename VmaList<T, AllocatorT>::iterator& VmaList<T, AllocatorT>::iterator::operator--()\n{\n    if (m_pItem != VMA_NULL)\n    {\n        m_pItem = m_pItem->pPrev;\n    }\n    else\n    {\n        VMA_HEAVY_ASSERT(!m_pList->IsEmpty());\n        m_pItem = m_pList->Back();\n    }\n    return *this;\n}\n\ntemplate<typename T, typename AllocatorT>\ntypename VmaList<T, AllocatorT>::reverse_iterator& VmaList<T, AllocatorT>::reverse_iterator::operator--()\n{\n    if (m_pItem != VMA_NULL)\n    {\n        m_pItem = m_pItem->pNext;\n    }\n    else\n    {\n        VMA_HEAVY_ASSERT(!m_pList->IsEmpty());\n        m_pItem = m_pList->Front();\n    }\n    return *this;\n}\n\ntemplate<typename T, typename AllocatorT>\ntypename VmaList<T, AllocatorT>::const_iterator& VmaList<T, AllocatorT>::const_iterator::operator--()\n{\n    if (m_pItem != VMA_NULL)\n    {\n        m_pItem = m_pItem->pPrev;\n    }\n    else\n    {\n        VMA_HEAVY_ASSERT(!m_pList->IsEmpty());\n        m_pItem = m_pList->Back();\n    }\n    return *this;\n}\n\ntemplate<typename T, typename AllocatorT>\ntypename VmaList<T, AllocatorT>::const_reverse_iterator& VmaList<T, AllocatorT>::const_reverse_iterator::operator--()\n{\n    if (m_pItem != VMA_NULL)\n    {\n        m_pItem = m_pItem->pNext;\n    }\n    else\n    {\n        VMA_HEAVY_ASSERT(!m_pList->IsEmpty());\n        m_pItem = m_pList->Back();\n    }\n    return *this;\n}\n#endif // _VMA_LIST_FUNCTIONS\n#endif // _VMA_LIST\n\n#ifndef _VMA_INTRUSIVE_LINKED_LIST\n/*\nExpected interface of ItemTypeTraits:\nstruct MyItemTypeTraits\n{\n    typedef MyItem ItemType;\n    static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }\n    static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }\n    static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }\n    static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }\n};\n*/\ntemplate<typename ItemTypeTraits>\nclass VmaIntrusiveLinkedList\n{\npublic:\n    typedef typename ItemTypeTraits::ItemType ItemType;\n    static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }\n    static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }\n\n    // Movable, not copyable.\n    VmaIntrusiveLinkedList() = default;\n    VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src);\n    VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete;\n    VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src);\n    VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete;\n    ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); }\n\n    size_t GetCount() const { return m_Count; }\n    bool IsEmpty() const { return m_Count == 0; }\n    ItemType* Front() { return m_Front; }\n    ItemType* Back() { return m_Back; }\n    const ItemType* Front() const { return m_Front; }\n    const ItemType* Back() const { return m_Back; }\n\n    void PushBack(ItemType* item);\n    void PushFront(ItemType* item);\n    ItemType* PopBack();\n    ItemType* PopFront();\n\n    // MyItem can be null - it means PushBack.\n    void InsertBefore(ItemType* existingItem, ItemType* newItem);\n    // MyItem can be null - it means PushFront.\n    void InsertAfter(ItemType* existingItem, ItemType* newItem);\n    void Remove(ItemType* item);\n    void RemoveAll();\n\nprivate:\n    ItemType* m_Front = VMA_NULL;\n    ItemType* m_Back = VMA_NULL;\n    size_t m_Count = 0;\n};\n\n#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS\ntemplate<typename ItemTypeTraits>\nVmaIntrusiveLinkedList<ItemTypeTraits>::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src)\n    : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)\n{\n    src.m_Front = src.m_Back = VMA_NULL;\n    src.m_Count = 0;\n}\n\ntemplate<typename ItemTypeTraits>\nVmaIntrusiveLinkedList<ItemTypeTraits>& VmaIntrusiveLinkedList<ItemTypeTraits>::operator=(VmaIntrusiveLinkedList&& src)\n{\n    if (&src != this)\n    {\n        VMA_HEAVY_ASSERT(IsEmpty());\n        m_Front = src.m_Front;\n        m_Back = src.m_Back;\n        m_Count = src.m_Count;\n        src.m_Front = src.m_Back = VMA_NULL;\n        src.m_Count = 0;\n    }\n    return *this;\n}\n\ntemplate<typename ItemTypeTraits>\nvoid VmaIntrusiveLinkedList<ItemTypeTraits>::PushBack(ItemType* item)\n{\n    VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);\n    if (IsEmpty())\n    {\n        m_Front = item;\n        m_Back = item;\n        m_Count = 1;\n    }\n    else\n    {\n        ItemTypeTraits::AccessPrev(item) = m_Back;\n        ItemTypeTraits::AccessNext(m_Back) = item;\n        m_Back = item;\n        ++m_Count;\n    }\n}\n\ntemplate<typename ItemTypeTraits>\nvoid VmaIntrusiveLinkedList<ItemTypeTraits>::PushFront(ItemType* item)\n{\n    VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);\n    if (IsEmpty())\n    {\n        m_Front = item;\n        m_Back = item;\n        m_Count = 1;\n    }\n    else\n    {\n        ItemTypeTraits::AccessNext(item) = m_Front;\n        ItemTypeTraits::AccessPrev(m_Front) = item;\n        m_Front = item;\n        ++m_Count;\n    }\n}\n\ntemplate<typename ItemTypeTraits>\ntypename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopBack()\n{\n    VMA_HEAVY_ASSERT(m_Count > 0);\n    ItemType* const backItem = m_Back;\n    ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);\n    if (prevItem != VMA_NULL)\n    {\n        ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;\n    }\n    m_Back = prevItem;\n    --m_Count;\n    ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;\n    ItemTypeTraits::AccessNext(backItem) = VMA_NULL;\n    return backItem;\n}\n\ntemplate<typename ItemTypeTraits>\ntypename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopFront()\n{\n    VMA_HEAVY_ASSERT(m_Count > 0);\n    ItemType* const frontItem = m_Front;\n    ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);\n    if (nextItem != VMA_NULL)\n    {\n        ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;\n    }\n    m_Front = nextItem;\n    --m_Count;\n    ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;\n    ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;\n    return frontItem;\n}\n\ntemplate<typename ItemTypeTraits>\nvoid VmaIntrusiveLinkedList<ItemTypeTraits>::InsertBefore(ItemType* existingItem, ItemType* newItem)\n{\n    VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);\n    if (existingItem != VMA_NULL)\n    {\n        ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);\n        ItemTypeTraits::AccessPrev(newItem) = prevItem;\n        ItemTypeTraits::AccessNext(newItem) = existingItem;\n        ItemTypeTraits::AccessPrev(existingItem) = newItem;\n        if (prevItem != VMA_NULL)\n        {\n            ItemTypeTraits::AccessNext(prevItem) = newItem;\n        }\n        else\n        {\n            VMA_HEAVY_ASSERT(m_Front == existingItem);\n            m_Front = newItem;\n        }\n        ++m_Count;\n    }\n    else\n        PushBack(newItem);\n}\n\ntemplate<typename ItemTypeTraits>\nvoid VmaIntrusiveLinkedList<ItemTypeTraits>::InsertAfter(ItemType* existingItem, ItemType* newItem)\n{\n    VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);\n    if (existingItem != VMA_NULL)\n    {\n        ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);\n        ItemTypeTraits::AccessNext(newItem) = nextItem;\n        ItemTypeTraits::AccessPrev(newItem) = existingItem;\n        ItemTypeTraits::AccessNext(existingItem) = newItem;\n        if (nextItem != VMA_NULL)\n        {\n            ItemTypeTraits::AccessPrev(nextItem) = newItem;\n        }\n        else\n        {\n            VMA_HEAVY_ASSERT(m_Back == existingItem);\n            m_Back = newItem;\n        }\n        ++m_Count;\n    }\n    else\n        return PushFront(newItem);\n}\n\ntemplate<typename ItemTypeTraits>\nvoid VmaIntrusiveLinkedList<ItemTypeTraits>::Remove(ItemType* item)\n{\n    VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);\n    if (ItemTypeTraits::GetPrev(item) != VMA_NULL)\n    {\n        ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);\n    }\n    else\n    {\n        VMA_HEAVY_ASSERT(m_Front == item);\n        m_Front = ItemTypeTraits::GetNext(item);\n    }\n\n    if (ItemTypeTraits::GetNext(item) != VMA_NULL)\n    {\n        ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);\n    }\n    else\n    {\n        VMA_HEAVY_ASSERT(m_Back == item);\n        m_Back = ItemTypeTraits::GetPrev(item);\n    }\n    ItemTypeTraits::AccessPrev(item) = VMA_NULL;\n    ItemTypeTraits::AccessNext(item) = VMA_NULL;\n    --m_Count;\n}\n\ntemplate<typename ItemTypeTraits>\nvoid VmaIntrusiveLinkedList<ItemTypeTraits>::RemoveAll()\n{\n    if (!IsEmpty())\n    {\n        ItemType* item = m_Back;\n        while (item != VMA_NULL)\n        {\n            ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);\n            ItemTypeTraits::AccessPrev(item) = VMA_NULL;\n            ItemTypeTraits::AccessNext(item) = VMA_NULL;\n            item = prevItem;\n        }\n        m_Front = VMA_NULL;\n        m_Back = VMA_NULL;\n        m_Count = 0;\n    }\n}\n#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS\n#endif // _VMA_INTRUSIVE_LINKED_LIST\n\n#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED\nclass VmaStringBuilder\n{\npublic:\n    VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator<char>(allocationCallbacks)) {}\n    ~VmaStringBuilder() = default;\n\n    size_t GetLength() const { return m_Data.size(); }\n    const char* GetData() const { return m_Data.data(); }\n    void AddNewLine() { Add('\\n'); }\n    void Add(char ch) { m_Data.push_back(ch); }\n\n    void Add(const char* pStr);\n    void AddNumber(uint32_t num);\n    void AddNumber(uint64_t num);\n    void AddPointer(const void* ptr);\n\nprivate:\n    VmaVector<char, VmaStlAllocator<char>> m_Data;\n};\n\n#ifndef _VMA_STRING_BUILDER_FUNCTIONS\nvoid VmaStringBuilder::Add(const char* pStr)\n{\n    const size_t strLen = strlen(pStr);\n    if (strLen > 0)\n    {\n        const size_t oldCount = m_Data.size();\n        m_Data.resize(oldCount + strLen);\n        memcpy(m_Data.data() + oldCount, pStr, strLen);\n    }\n}\n\nvoid VmaStringBuilder::AddNumber(uint32_t num)\n{\n    char buf[11];\n    buf[10] = '\\0';\n    char* p = &buf[10];\n    do\n    {\n        *--p = '0' + (char)(num % 10);\n        num /= 10;\n    } while (num);\n    Add(p);\n}\n\nvoid VmaStringBuilder::AddNumber(uint64_t num)\n{\n    char buf[21];\n    buf[20] = '\\0';\n    char* p = &buf[20];\n    do\n    {\n        *--p = '0' + (char)(num % 10);\n        num /= 10;\n    } while (num);\n    Add(p);\n}\n\nvoid VmaStringBuilder::AddPointer(const void* ptr)\n{\n    char buf[21];\n    VmaPtrToStr(buf, sizeof(buf), ptr);\n    Add(buf);\n}\n#endif //_VMA_STRING_BUILDER_FUNCTIONS\n#endif // _VMA_STRING_BUILDER\n\n#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED\n/*\nAllows to conveniently build a correct JSON document to be written to the\nVmaStringBuilder passed to the constructor.\n*/\nclass VmaJsonWriter\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaJsonWriter)\npublic:\n    // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object.\n    VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);\n    ~VmaJsonWriter();\n\n    // Begins object by writing \"{\".\n    // Inside an object, you must call pairs of WriteString and a value, e.g.:\n    // j.BeginObject(true); j.WriteString(\"A\"); j.WriteNumber(1); j.WriteString(\"B\"); j.WriteNumber(2); j.EndObject();\n    // Will write: { \"A\": 1, \"B\": 2 }\n    void BeginObject(bool singleLine = false);\n    // Ends object by writing \"}\".\n    void EndObject();\n\n    // Begins array by writing \"[\".\n    // Inside an array, you can write a sequence of any values.\n    void BeginArray(bool singleLine = false);\n    // Ends array by writing \"[\".\n    void EndArray();\n\n    // Writes a string value inside \"\".\n    // pStr can contain any ANSI characters, including '\"', new line etc. - they will be properly escaped.\n    void WriteString(const char* pStr);\n\n    // Begins writing a string value.\n    // Call BeginString, ContinueString, ContinueString, ..., EndString instead of\n    // WriteString to conveniently build the string content incrementally, made of\n    // parts including numbers.\n    void BeginString(const char* pStr = VMA_NULL);\n    // Posts next part of an open string.\n    void ContinueString(const char* pStr);\n    // Posts next part of an open string. The number is converted to decimal characters.\n    void ContinueString(uint32_t n);\n    void ContinueString(uint64_t n);\n    // Posts next part of an open string. Pointer value is converted to characters\n    // using \"%p\" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00\n    void ContinueString_Pointer(const void* ptr);\n    // Ends writing a string value by writing '\"'.\n    void EndString(const char* pStr = VMA_NULL);\n\n    // Writes a number value.\n    void WriteNumber(uint32_t n);\n    void WriteNumber(uint64_t n);\n    // Writes a boolean value - false or true.\n    void WriteBool(bool b);\n    // Writes a null value.\n    void WriteNull();\n\nprivate:\n    enum COLLECTION_TYPE\n    {\n        COLLECTION_TYPE_OBJECT,\n        COLLECTION_TYPE_ARRAY,\n    };\n    struct StackItem\n    {\n        COLLECTION_TYPE type;\n        uint32_t valueCount;\n        bool singleLineMode;\n    };\n\n    static const char* const INDENT;\n\n    VmaStringBuilder& m_SB;\n    VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;\n    bool m_InsideString;\n\n    void BeginValue(bool isString);\n    void WriteIndent(bool oneLess = false);\n};\nconst char* const VmaJsonWriter::INDENT = \"  \";\n\n#ifndef _VMA_JSON_WRITER_FUNCTIONS\nVmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb)\n    : m_SB(sb),\n    m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),\n    m_InsideString(false) {}\n\nVmaJsonWriter::~VmaJsonWriter()\n{\n    VMA_ASSERT(!m_InsideString);\n    VMA_ASSERT(m_Stack.empty());\n}\n\nvoid VmaJsonWriter::BeginObject(bool singleLine)\n{\n    VMA_ASSERT(!m_InsideString);\n\n    BeginValue(false);\n    m_SB.Add('{');\n\n    StackItem item;\n    item.type = COLLECTION_TYPE_OBJECT;\n    item.valueCount = 0;\n    item.singleLineMode = singleLine;\n    m_Stack.push_back(item);\n}\n\nvoid VmaJsonWriter::EndObject()\n{\n    VMA_ASSERT(!m_InsideString);\n\n    WriteIndent(true);\n    m_SB.Add('}');\n\n    VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);\n    m_Stack.pop_back();\n}\n\nvoid VmaJsonWriter::BeginArray(bool singleLine)\n{\n    VMA_ASSERT(!m_InsideString);\n\n    BeginValue(false);\n    m_SB.Add('[');\n\n    StackItem item;\n    item.type = COLLECTION_TYPE_ARRAY;\n    item.valueCount = 0;\n    item.singleLineMode = singleLine;\n    m_Stack.push_back(item);\n}\n\nvoid VmaJsonWriter::EndArray()\n{\n    VMA_ASSERT(!m_InsideString);\n\n    WriteIndent(true);\n    m_SB.Add(']');\n\n    VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);\n    m_Stack.pop_back();\n}\n\nvoid VmaJsonWriter::WriteString(const char* pStr)\n{\n    BeginString(pStr);\n    EndString();\n}\n\nvoid VmaJsonWriter::BeginString(const char* pStr)\n{\n    VMA_ASSERT(!m_InsideString);\n\n    BeginValue(true);\n    m_SB.Add('\"');\n    m_InsideString = true;\n    if (pStr != VMA_NULL && pStr[0] != '\\0')\n    {\n        ContinueString(pStr);\n    }\n}\n\nvoid VmaJsonWriter::ContinueString(const char* pStr)\n{\n    VMA_ASSERT(m_InsideString);\n\n    const size_t strLen = strlen(pStr);\n    for (size_t i = 0; i < strLen; ++i)\n    {\n        char ch = pStr[i];\n        if (ch == '\\\\')\n        {\n            m_SB.Add(\"\\\\\\\\\");\n        }\n        else if (ch == '\"')\n        {\n            m_SB.Add(\"\\\\\\\"\");\n        }\n        else if ((uint8_t)ch >= 32)\n        {\n            m_SB.Add(ch);\n        }\n        else switch (ch)\n        {\n        case '\\b':\n            m_SB.Add(\"\\\\b\");\n            break;\n        case '\\f':\n            m_SB.Add(\"\\\\f\");\n            break;\n        case '\\n':\n            m_SB.Add(\"\\\\n\");\n            break;\n        case '\\r':\n            m_SB.Add(\"\\\\r\");\n            break;\n        case '\\t':\n            m_SB.Add(\"\\\\t\");\n            break;\n        default:\n            VMA_ASSERT(0 && \"Character not currently supported.\");\n        }\n    }\n}\n\nvoid VmaJsonWriter::ContinueString(uint32_t n)\n{\n    VMA_ASSERT(m_InsideString);\n    m_SB.AddNumber(n);\n}\n\nvoid VmaJsonWriter::ContinueString(uint64_t n)\n{\n    VMA_ASSERT(m_InsideString);\n    m_SB.AddNumber(n);\n}\n\nvoid VmaJsonWriter::ContinueString_Pointer(const void* ptr)\n{\n    VMA_ASSERT(m_InsideString);\n    m_SB.AddPointer(ptr);\n}\n\nvoid VmaJsonWriter::EndString(const char* pStr)\n{\n    VMA_ASSERT(m_InsideString);\n    if (pStr != VMA_NULL && pStr[0] != '\\0')\n    {\n        ContinueString(pStr);\n    }\n    m_SB.Add('\"');\n    m_InsideString = false;\n}\n\nvoid VmaJsonWriter::WriteNumber(uint32_t n)\n{\n    VMA_ASSERT(!m_InsideString);\n    BeginValue(false);\n    m_SB.AddNumber(n);\n}\n\nvoid VmaJsonWriter::WriteNumber(uint64_t n)\n{\n    VMA_ASSERT(!m_InsideString);\n    BeginValue(false);\n    m_SB.AddNumber(n);\n}\n\nvoid VmaJsonWriter::WriteBool(bool b)\n{\n    VMA_ASSERT(!m_InsideString);\n    BeginValue(false);\n    m_SB.Add(b ? \"true\" : \"false\");\n}\n\nvoid VmaJsonWriter::WriteNull()\n{\n    VMA_ASSERT(!m_InsideString);\n    BeginValue(false);\n    m_SB.Add(\"null\");\n}\n\nvoid VmaJsonWriter::BeginValue(bool isString)\n{\n    if (!m_Stack.empty())\n    {\n        StackItem& currItem = m_Stack.back();\n        if (currItem.type == COLLECTION_TYPE_OBJECT &&\n            currItem.valueCount % 2 == 0)\n        {\n            VMA_ASSERT(isString);\n        }\n\n        if (currItem.type == COLLECTION_TYPE_OBJECT &&\n            currItem.valueCount % 2 != 0)\n        {\n            m_SB.Add(\": \");\n        }\n        else if (currItem.valueCount > 0)\n        {\n            m_SB.Add(\", \");\n            WriteIndent();\n        }\n        else\n        {\n            WriteIndent();\n        }\n        ++currItem.valueCount;\n    }\n}\n\nvoid VmaJsonWriter::WriteIndent(bool oneLess)\n{\n    if (!m_Stack.empty() && !m_Stack.back().singleLineMode)\n    {\n        m_SB.AddNewLine();\n\n        size_t count = m_Stack.size();\n        if (count > 0 && oneLess)\n        {\n            --count;\n        }\n        for (size_t i = 0; i < count; ++i)\n        {\n            m_SB.Add(INDENT);\n        }\n    }\n}\n#endif // _VMA_JSON_WRITER_FUNCTIONS\n\nstatic void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat)\n{\n    json.BeginObject();\n\n    json.WriteString(\"BlockCount\");\n    json.WriteNumber(stat.statistics.blockCount);\n    json.WriteString(\"BlockBytes\");\n    json.WriteNumber(stat.statistics.blockBytes);\n    json.WriteString(\"AllocationCount\");\n    json.WriteNumber(stat.statistics.allocationCount);\n    json.WriteString(\"AllocationBytes\");\n    json.WriteNumber(stat.statistics.allocationBytes);\n    json.WriteString(\"UnusedRangeCount\");\n    json.WriteNumber(stat.unusedRangeCount);\n\n    if (stat.statistics.allocationCount > 1)\n    {\n        json.WriteString(\"AllocationSizeMin\");\n        json.WriteNumber(stat.allocationSizeMin);\n        json.WriteString(\"AllocationSizeMax\");\n        json.WriteNumber(stat.allocationSizeMax);\n    }\n    if (stat.unusedRangeCount > 1)\n    {\n        json.WriteString(\"UnusedRangeSizeMin\");\n        json.WriteNumber(stat.unusedRangeSizeMin);\n        json.WriteString(\"UnusedRangeSizeMax\");\n        json.WriteNumber(stat.unusedRangeSizeMax);\n    }\n    json.EndObject();\n}\n#endif // _VMA_JSON_WRITER\n\n#ifndef _VMA_MAPPING_HYSTERESIS\n\nclass VmaMappingHysteresis\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaMappingHysteresis)\npublic:\n    VmaMappingHysteresis() = default;\n\n    uint32_t GetExtraMapping() const { return m_ExtraMapping; }\n\n    // Call when Map was called.\n    // Returns true if switched to extra +1 mapping reference count.\n    bool PostMap()\n    {\n#if VMA_MAPPING_HYSTERESIS_ENABLED\n        if(m_ExtraMapping == 0)\n        {\n            ++m_MajorCounter;\n            if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING)\n            {\n                m_ExtraMapping = 1;\n                m_MajorCounter = 0;\n                m_MinorCounter = 0;\n                return true;\n            }\n        }\n        else // m_ExtraMapping == 1\n            PostMinorCounter();\n#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED\n        return false;\n    }\n\n    // Call when Unmap was called.\n    void PostUnmap()\n    {\n#if VMA_MAPPING_HYSTERESIS_ENABLED\n        if(m_ExtraMapping == 0)\n            ++m_MajorCounter;\n        else // m_ExtraMapping == 1\n            PostMinorCounter();\n#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED\n    }\n\n    // Call when allocation was made from the memory block.\n    void PostAlloc()\n    {\n#if VMA_MAPPING_HYSTERESIS_ENABLED\n        if(m_ExtraMapping == 1)\n            ++m_MajorCounter;\n        else // m_ExtraMapping == 0\n            PostMinorCounter();\n#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED\n    }\n\n    // Call when allocation was freed from the memory block.\n    // Returns true if switched to extra -1 mapping reference count.\n    bool PostFree()\n    {\n#if VMA_MAPPING_HYSTERESIS_ENABLED\n        if(m_ExtraMapping == 1)\n        {\n            ++m_MajorCounter;\n            if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING &&\n                m_MajorCounter > m_MinorCounter + 1)\n            {\n                m_ExtraMapping = 0;\n                m_MajorCounter = 0;\n                m_MinorCounter = 0;\n                return true;\n            }\n        }\n        else // m_ExtraMapping == 0\n            PostMinorCounter();\n#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED\n        return false;\n    }\n\nprivate:\n    static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7;\n\n    uint32_t m_MinorCounter = 0;\n    uint32_t m_MajorCounter = 0;\n    uint32_t m_ExtraMapping = 0; // 0 or 1.\n\n    void PostMinorCounter()\n    {\n        if(m_MinorCounter < m_MajorCounter)\n        {\n            ++m_MinorCounter;\n        }\n        else if(m_MajorCounter > 0)\n        {\n            --m_MajorCounter;\n            --m_MinorCounter;\n        }\n    }\n};\n\n#endif // _VMA_MAPPING_HYSTERESIS\n\n#if VMA_EXTERNAL_MEMORY_WIN32\nclass VmaWin32Handle\n{\npublic:\n    VmaWin32Handle() noexcept : m_hHandle(VMA_NULL) { }\n    explicit VmaWin32Handle(HANDLE hHandle) noexcept : m_hHandle(hHandle) { }\n    ~VmaWin32Handle() noexcept { if (m_hHandle != VMA_NULL) { ::CloseHandle(m_hHandle); } }\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaWin32Handle)\n\npublic:\n    // Strengthened\n    VkResult GetHandle(VkDevice device, VkDeviceMemory memory, PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, HANDLE hTargetProcess, bool useMutex, HANDLE* pHandle) noexcept\n    {\n        *pHandle = VMA_NULL;\n        // Try to get handle first.\n        if (m_hHandle != VMA_NULL)\n        {\n            *pHandle = Duplicate(hTargetProcess);\n            return VK_SUCCESS;\n        }\n\n        VkResult res = VK_SUCCESS;\n        // If failed, try to create it.\n        {\n            VmaMutexLockWrite lock(m_Mutex, useMutex);\n            if (m_hHandle == VMA_NULL)\n            {\n                res = Create(device, memory, pvkGetMemoryWin32HandleKHR, &m_hHandle);\n            }\n        }\n\n        *pHandle = Duplicate(hTargetProcess);\n        return res;\n    }\n\n    operator bool() const noexcept { return m_hHandle != VMA_NULL; }\nprivate:\n    // Not atomic\n    static VkResult Create(VkDevice device, VkDeviceMemory memory, PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, HANDLE* pHandle) noexcept\n    {\n        VkResult res = VK_ERROR_FEATURE_NOT_PRESENT;\n        if (pvkGetMemoryWin32HandleKHR != VMA_NULL)\n        {\n            VkMemoryGetWin32HandleInfoKHR handleInfo{ };\n            handleInfo.sType = VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR;\n            handleInfo.memory = memory;\n            handleInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR;\n            res = pvkGetMemoryWin32HandleKHR(device, &handleInfo, pHandle);\n        }\n        return res;\n    }\n    HANDLE Duplicate(HANDLE hTargetProcess = VMA_NULL) const noexcept\n    {\n        if (!m_hHandle)\n            return m_hHandle;\n\n        HANDLE hCurrentProcess = ::GetCurrentProcess();\n        HANDLE hDupHandle = VMA_NULL;\n        if (!::DuplicateHandle(hCurrentProcess, m_hHandle, hTargetProcess ? hTargetProcess : hCurrentProcess, &hDupHandle, 0, FALSE, DUPLICATE_SAME_ACCESS))\n        {\n            VMA_ASSERT(0 && \"Failed to duplicate handle.\");\n        }\n        return hDupHandle;\n    }\nprivate:\n    HANDLE m_hHandle;\n    VMA_RW_MUTEX m_Mutex; // Protects access m_Handle\n};\n#else \nclass VmaWin32Handle\n{\n    // ABI compatibility\n    void* placeholder = VMA_NULL;\n    VMA_RW_MUTEX placeholder2;\n};\n#endif // VMA_EXTERNAL_MEMORY_WIN32\n\n\n#ifndef _VMA_DEVICE_MEMORY_BLOCK\n/*\nRepresents a single block of device memory (`VkDeviceMemory`) with all the\ndata about its regions (aka suballocations, #VmaAllocation), assigned and free.\n\nThread-safety:\n- Access to m_pMetadata must be externally synchronized.\n- Map, Unmap, Bind* are synchronized internally.\n*/\nclass VmaDeviceMemoryBlock\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaDeviceMemoryBlock)\npublic:\n    VmaBlockMetadata* m_pMetadata;\n\n    VmaDeviceMemoryBlock(VmaAllocator hAllocator);\n    ~VmaDeviceMemoryBlock();\n\n    // Always call after construction.\n    void Init(\n        VmaAllocator hAllocator,\n        VmaPool hParentPool,\n        uint32_t newMemoryTypeIndex,\n        VkDeviceMemory newMemory,\n        VkDeviceSize newSize,\n        uint32_t id,\n        uint32_t algorithm,\n        VkDeviceSize bufferImageGranularity);\n    // Always call before destruction.\n    void Destroy(VmaAllocator allocator);\n\n    VmaPool GetParentPool() const { return m_hParentPool; }\n    VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }\n    uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }\n    uint32_t GetId() const { return m_Id; }\n    void* GetMappedData() const { return m_pMappedData; }\n    uint32_t GetMapRefCount() const { return m_MapCount; }\n\n    // Call when allocation/free was made from m_pMetadata.\n    // Used for m_MappingHysteresis.\n    void PostAlloc(VmaAllocator hAllocator);\n    void PostFree(VmaAllocator hAllocator);\n\n    // Validates all data structures inside this object. If not valid, returns false.\n    bool Validate() const;\n    VkResult CheckCorruption(VmaAllocator hAllocator);\n\n    // ppData can be null.\n    VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);\n    void Unmap(VmaAllocator hAllocator, uint32_t count);\n\n    VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);\n    VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);\n\n    VkResult BindBufferMemory(\n        const VmaAllocator hAllocator,\n        const VmaAllocation hAllocation,\n        VkDeviceSize allocationLocalOffset,\n        VkBuffer hBuffer,\n        const void* pNext);\n    VkResult BindImageMemory(\n        const VmaAllocator hAllocator,\n        const VmaAllocation hAllocation,\n        VkDeviceSize allocationLocalOffset,\n        VkImage hImage,\n        const void* pNext);\n#if VMA_EXTERNAL_MEMORY_WIN32\n    VkResult CreateWin32Handle(\n        const VmaAllocator hAllocator,\n        PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR,\n        HANDLE hTargetProcess,\n        HANDLE* pHandle)noexcept;\n#endif // VMA_EXTERNAL_MEMORY_WIN32\nprivate:\n    VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.\n    uint32_t m_MemoryTypeIndex;\n    uint32_t m_Id;\n    VkDeviceMemory m_hMemory;\n\n    /*\n    Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.\n    Also protects m_MapCount, m_pMappedData.\n    Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.\n    */\n    VMA_MUTEX m_MapAndBindMutex;\n    VmaMappingHysteresis m_MappingHysteresis;\n    uint32_t m_MapCount;\n    void* m_pMappedData;\n\n    VmaWin32Handle m_Handle;\n};\n#endif // _VMA_DEVICE_MEMORY_BLOCK\n\n#ifndef _VMA_ALLOCATION_T\nstruct VmaAllocationExtraData\n{\n    void* m_pMappedData = VMA_NULL; // Not null means memory is mapped.\n    VmaWin32Handle m_Handle;\n};\n\nstruct VmaAllocation_T\n{\n    friend struct VmaDedicatedAllocationListItemTraits;\n\n    enum FLAGS\n    {\n        FLAG_PERSISTENT_MAP   = 0x01,\n        FLAG_MAPPING_ALLOWED  = 0x02,\n    };\n\npublic:\n    enum ALLOCATION_TYPE\n    {\n        ALLOCATION_TYPE_NONE,\n        ALLOCATION_TYPE_BLOCK,\n        ALLOCATION_TYPE_DEDICATED,\n    };\n\n    // This struct is allocated using VmaPoolAllocator.\n    VmaAllocation_T(bool mappingAllowed);\n    ~VmaAllocation_T();\n\n    void InitBlockAllocation(\n        VmaDeviceMemoryBlock* block,\n        VmaAllocHandle allocHandle,\n        VkDeviceSize alignment,\n        VkDeviceSize size,\n        uint32_t memoryTypeIndex,\n        VmaSuballocationType suballocationType,\n        bool mapped);\n    // pMappedData not null means allocation is created with MAPPED flag.\n    void InitDedicatedAllocation(\n        VmaAllocator allocator,\n        VmaPool hParentPool,\n        uint32_t memoryTypeIndex,\n        VkDeviceMemory hMemory,\n        VmaSuballocationType suballocationType,\n        void* pMappedData,\n        VkDeviceSize size);\n    void Destroy(VmaAllocator allocator);\n\n    ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }\n    VkDeviceSize GetAlignment() const { return m_Alignment; }\n    VkDeviceSize GetSize() const { return m_Size; }\n    void* GetUserData() const { return m_pUserData; }\n    const char* GetName() const { return m_pName; }\n    VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }\n\n    VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; }\n    uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }\n    bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; }\n    bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; }\n\n    void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; }\n    void SetName(VmaAllocator hAllocator, const char* pName);\n    void FreeName(VmaAllocator hAllocator);\n    uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation);\n    VmaAllocHandle GetAllocHandle() const;\n    VkDeviceSize GetOffset() const;\n    VmaPool GetParentPool() const;\n    VkDeviceMemory GetMemory() const;\n    void* GetMappedData() const;\n\n    void BlockAllocMap();\n    void BlockAllocUnmap();\n    VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);\n    void DedicatedAllocUnmap(VmaAllocator hAllocator);\n\n#if VMA_STATS_STRING_ENABLED\n    VmaBufferImageUsage GetBufferImageUsage() const { return m_BufferImageUsage; }\n    void InitBufferUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5)\n    {\n        VMA_ASSERT(m_BufferImageUsage == VmaBufferImageUsage::UNKNOWN);\n        m_BufferImageUsage = VmaBufferImageUsage(createInfo, useKhrMaintenance5);\n    }\n    void InitImageUsage(const VkImageCreateInfo &createInfo)\n    {\n        VMA_ASSERT(m_BufferImageUsage == VmaBufferImageUsage::UNKNOWN);\n        m_BufferImageUsage = VmaBufferImageUsage(createInfo);\n    }\n    void PrintParameters(class VmaJsonWriter& json) const;\n#endif\n\n#if VMA_EXTERNAL_MEMORY_WIN32\n    VkResult GetWin32Handle(VmaAllocator hAllocator, HANDLE hTargetProcess, HANDLE* hHandle) noexcept;\n#endif // VMA_EXTERNAL_MEMORY_WIN32\n\nprivate:\n    // Allocation out of VmaDeviceMemoryBlock.\n    struct BlockAllocation\n    {\n        VmaDeviceMemoryBlock* m_Block;\n        VmaAllocHandle m_AllocHandle;\n    };\n    // Allocation for an object that has its own private VkDeviceMemory.\n    struct DedicatedAllocation\n    {\n        VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.\n        VkDeviceMemory m_hMemory;\n        VmaAllocationExtraData* m_ExtraData;\n        VmaAllocation_T* m_Prev;\n        VmaAllocation_T* m_Next;\n    };\n    union\n    {\n        // Allocation out of VmaDeviceMemoryBlock.\n        BlockAllocation m_BlockAllocation;\n        // Allocation for an object that has its own private VkDeviceMemory.\n        DedicatedAllocation m_DedicatedAllocation;\n    };\n\n    VkDeviceSize m_Alignment;\n    VkDeviceSize m_Size;\n    void* m_pUserData;\n    char* m_pName;\n    uint32_t m_MemoryTypeIndex;\n    uint8_t m_Type; // ALLOCATION_TYPE\n    uint8_t m_SuballocationType; // VmaSuballocationType\n    // Reference counter for vmaMapMemory()/vmaUnmapMemory().\n    uint8_t m_MapCount;\n    uint8_t m_Flags; // enum FLAGS\n#if VMA_STATS_STRING_ENABLED\n    VmaBufferImageUsage m_BufferImageUsage; // 0 if unknown.\n#endif\n\n    void EnsureExtraData(VmaAllocator hAllocator);\n};\n#endif // _VMA_ALLOCATION_T\n\n#ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS\nstruct VmaDedicatedAllocationListItemTraits\n{\n    typedef VmaAllocation_T ItemType;\n\n    static ItemType* GetPrev(const ItemType* item)\n    {\n        VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);\n        return item->m_DedicatedAllocation.m_Prev;\n    }\n    static ItemType* GetNext(const ItemType* item)\n    {\n        VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);\n        return item->m_DedicatedAllocation.m_Next;\n    }\n    static ItemType*& AccessPrev(ItemType* item)\n    {\n        VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);\n        return item->m_DedicatedAllocation.m_Prev;\n    }\n    static ItemType*& AccessNext(ItemType* item)\n    {\n        VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);\n        return item->m_DedicatedAllocation.m_Next;\n    }\n};\n#endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS\n\n#ifndef _VMA_DEDICATED_ALLOCATION_LIST\n/*\nStores linked list of VmaAllocation_T objects.\nThread-safe, synchronized internally.\n*/\nclass VmaDedicatedAllocationList\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaDedicatedAllocationList)\npublic:\n    VmaDedicatedAllocationList() {}\n    ~VmaDedicatedAllocationList();\n\n    void Init(bool useMutex) { m_UseMutex = useMutex; }\n    bool Validate();\n\n    void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);\n    void AddStatistics(VmaStatistics& inoutStats);\n#if VMA_STATS_STRING_ENABLED\n    // Writes JSON array with the list of allocations.\n    void BuildStatsString(VmaJsonWriter& json);\n#endif\n\n    bool IsEmpty();\n    void Register(VmaAllocation alloc);\n    void Unregister(VmaAllocation alloc);\n\nprivate:\n    typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;\n\n    bool m_UseMutex = true;\n    VMA_RW_MUTEX m_Mutex;\n    DedicatedAllocationLinkedList m_AllocationList;\n};\n\n#ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS\n\nVmaDedicatedAllocationList::~VmaDedicatedAllocationList()\n{\n    VMA_HEAVY_ASSERT(Validate());\n\n    if (!m_AllocationList.IsEmpty())\n    {\n        VMA_ASSERT_LEAK(false && \"Unfreed dedicated allocations found!\");\n    }\n}\n\nbool VmaDedicatedAllocationList::Validate()\n{\n    const size_t declaredCount = m_AllocationList.GetCount();\n    size_t actualCount = 0;\n    VmaMutexLockRead lock(m_Mutex, m_UseMutex);\n    for (VmaAllocation alloc = m_AllocationList.Front();\n        alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))\n    {\n        ++actualCount;\n    }\n    VMA_VALIDATE(actualCount == declaredCount);\n\n    return true;\n}\n\nvoid VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)\n{\n    for(auto* item = m_AllocationList.Front(); item != VMA_NULL; item = DedicatedAllocationLinkedList::GetNext(item))\n    {\n        const VkDeviceSize size = item->GetSize();\n        inoutStats.statistics.blockCount++;\n        inoutStats.statistics.blockBytes += size;\n        VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize());\n    }\n}\n\nvoid VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats)\n{\n    VmaMutexLockRead lock(m_Mutex, m_UseMutex);\n\n    const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount();\n    inoutStats.blockCount += allocCount;\n    inoutStats.allocationCount += allocCount;\n\n    for(auto* item = m_AllocationList.Front(); item != VMA_NULL; item = DedicatedAllocationLinkedList::GetNext(item))\n    {\n        const VkDeviceSize size = item->GetSize();\n        inoutStats.blockBytes += size;\n        inoutStats.allocationBytes += size;\n    }\n}\n\n#if VMA_STATS_STRING_ENABLED\nvoid VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json)\n{\n    VmaMutexLockRead lock(m_Mutex, m_UseMutex);\n    json.BeginArray();\n    for (VmaAllocation alloc = m_AllocationList.Front();\n        alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))\n    {\n        json.BeginObject(true);\n        alloc->PrintParameters(json);\n        json.EndObject();\n    }\n    json.EndArray();\n}\n#endif // VMA_STATS_STRING_ENABLED\n\nbool VmaDedicatedAllocationList::IsEmpty()\n{\n    VmaMutexLockRead lock(m_Mutex, m_UseMutex);\n    return m_AllocationList.IsEmpty();\n}\n\nvoid VmaDedicatedAllocationList::Register(VmaAllocation alloc)\n{\n    VmaMutexLockWrite lock(m_Mutex, m_UseMutex);\n    m_AllocationList.PushBack(alloc);\n}\n\nvoid VmaDedicatedAllocationList::Unregister(VmaAllocation alloc)\n{\n    VmaMutexLockWrite lock(m_Mutex, m_UseMutex);\n    m_AllocationList.Remove(alloc);\n}\n#endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS\n#endif // _VMA_DEDICATED_ALLOCATION_LIST\n\n#ifndef _VMA_SUBALLOCATION\n/*\nRepresents a region of VmaDeviceMemoryBlock that is either assigned and returned as\nallocated memory block or free.\n*/\nstruct VmaSuballocation\n{\n    VkDeviceSize offset;\n    VkDeviceSize size;\n    void* userData;\n    VmaSuballocationType type;\n};\n\n// Comparator for offsets.\nstruct VmaSuballocationOffsetLess\n{\n    bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const\n    {\n        return lhs.offset < rhs.offset;\n    }\n};\n\nstruct VmaSuballocationOffsetGreater\n{\n    bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const\n    {\n        return lhs.offset > rhs.offset;\n    }\n};\n\nstruct VmaSuballocationItemSizeLess\n{\n    bool operator()(const VmaSuballocationList::iterator lhs,\n        const VmaSuballocationList::iterator rhs) const\n    {\n        return lhs->size < rhs->size;\n    }\n\n    bool operator()(const VmaSuballocationList::iterator lhs,\n        VkDeviceSize rhsSize) const\n    {\n        return lhs->size < rhsSize;\n    }\n};\n#endif // _VMA_SUBALLOCATION\n\n#ifndef _VMA_ALLOCATION_REQUEST\n/*\nParameters of planned allocation inside a VmaDeviceMemoryBlock.\nitem points to a FREE suballocation.\n*/\nstruct VmaAllocationRequest\n{\n    VmaAllocHandle allocHandle;\n    VkDeviceSize size;\n    VmaSuballocationList::iterator item;\n    void* customData;\n    uint64_t algorithmData;\n    VmaAllocationRequestType type;\n};\n#endif // _VMA_ALLOCATION_REQUEST\n\n#ifndef _VMA_BLOCK_METADATA\n/*\nData structure used for bookkeeping of allocations and unused ranges of memory\nin a single VkDeviceMemory block.\n*/\nclass VmaBlockMetadata\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata)\npublic:\n    // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object.\n    VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,\n        VkDeviceSize bufferImageGranularity, bool isVirtual);\n    virtual ~VmaBlockMetadata() = default;\n\n    virtual void Init(VkDeviceSize size) { m_Size = size; }\n    bool IsVirtual() const { return m_IsVirtual; }\n    VkDeviceSize GetSize() const { return m_Size; }\n\n    // Validates all data structures inside this object. If not valid, returns false.\n    virtual bool Validate() const = 0;\n    virtual size_t GetAllocationCount() const = 0;\n    virtual size_t GetFreeRegionsCount() const = 0;\n    virtual VkDeviceSize GetSumFreeSize() const = 0;\n    // Returns true if this block is empty - contains only single free suballocation.\n    virtual bool IsEmpty() const = 0;\n    virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0;\n    virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0;\n    virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0;\n\n    virtual VmaAllocHandle GetAllocationListBegin() const = 0;\n    virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0;\n    virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0;\n\n    // Shouldn't modify blockCount.\n    virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0;\n    virtual void AddStatistics(VmaStatistics& inoutStats) const = 0;\n\n#if VMA_STATS_STRING_ENABLED\n    virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;\n#endif\n\n    // Tries to find a place for suballocation with given parameters inside this block.\n    // If succeeded, fills pAllocationRequest and returns true.\n    // If failed, returns false.\n    virtual bool CreateAllocationRequest(\n        VkDeviceSize allocSize,\n        VkDeviceSize allocAlignment,\n        bool upperAddress,\n        VmaSuballocationType allocType,\n        // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.\n        uint32_t strategy,\n        VmaAllocationRequest* pAllocationRequest) = 0;\n\n    virtual VkResult CheckCorruption(const void* pBlockData) = 0;\n\n    // Makes actual allocation based on request. Request must already be checked and valid.\n    virtual void Alloc(\n        const VmaAllocationRequest& request,\n        VmaSuballocationType type,\n        void* userData) = 0;\n\n    // Frees suballocation assigned to given memory region.\n    virtual void Free(VmaAllocHandle allocHandle) = 0;\n\n    // Frees all allocations.\n    // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations!\n    virtual void Clear() = 0;\n\n    virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0;\n    virtual void DebugLogAllAllocations() const = 0;\n\nprotected:\n    const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }\n    VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }\n    VkDeviceSize GetDebugMargin() const { return VkDeviceSize(IsVirtual() ? 0 : VMA_DEBUG_MARGIN); }\n\n    void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const;\n#if VMA_STATS_STRING_ENABLED\n    // mapRefCount == UINT32_MAX means unspecified.\n    void PrintDetailedMap_Begin(class VmaJsonWriter& json,\n        VkDeviceSize unusedBytes,\n        size_t allocationCount,\n        size_t unusedRangeCount) const;\n    void PrintDetailedMap_Allocation(class VmaJsonWriter& json,\n        VkDeviceSize offset, VkDeviceSize size, void* userData) const;\n    void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,\n        VkDeviceSize offset,\n        VkDeviceSize size) const;\n    void PrintDetailedMap_End(class VmaJsonWriter& json) const;\n#endif\n\nprivate:\n    VkDeviceSize m_Size;\n    const VkAllocationCallbacks* m_pAllocationCallbacks;\n    const VkDeviceSize m_BufferImageGranularity;\n    const bool m_IsVirtual;\n};\n\n#ifndef _VMA_BLOCK_METADATA_FUNCTIONS\nVmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,\n    VkDeviceSize bufferImageGranularity, bool isVirtual)\n    : m_Size(0),\n    m_pAllocationCallbacks(pAllocationCallbacks),\n    m_BufferImageGranularity(bufferImageGranularity),\n    m_IsVirtual(isVirtual) {}\n\nvoid VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const\n{\n    if (IsVirtual())\n    {\n        VMA_LEAK_LOG_FORMAT(\"UNFREED VIRTUAL ALLOCATION; Offset: %\" PRIu64 \"; Size: %\" PRIu64 \"; UserData: %p\", offset, size, userData);\n    }\n    else\n    {\n        VMA_ASSERT(userData != VMA_NULL);\n        VmaAllocation allocation = reinterpret_cast<VmaAllocation>(userData);\n\n        userData = allocation->GetUserData();\n        const char* name = allocation->GetName();\n\n#if VMA_STATS_STRING_ENABLED\n        VMA_LEAK_LOG_FORMAT(\"UNFREED ALLOCATION; Offset: %\" PRIu64 \"; Size: %\" PRIu64 \"; UserData: %p; Name: %s; Type: %s; Usage: %\" PRIu64,\n            offset, size, userData, name ? name : \"vma_empty\",\n            VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()],\n            (uint64_t)allocation->GetBufferImageUsage().Value);\n#else\n        VMA_LEAK_LOG_FORMAT(\"UNFREED ALLOCATION; Offset: %\" PRIu64 \"; Size: %\" PRIu64 \"; UserData: %p; Name: %s; Type: %u\",\n            offset, size, userData, name ? name : \"vma_empty\",\n            (unsigned)allocation->GetSuballocationType());\n#endif // VMA_STATS_STRING_ENABLED\n    }\n\n}\n\n#if VMA_STATS_STRING_ENABLED\nvoid VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,\n    VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const\n{\n    json.WriteString(\"TotalBytes\");\n    json.WriteNumber(GetSize());\n\n    json.WriteString(\"UnusedBytes\");\n    json.WriteNumber(unusedBytes);\n\n    json.WriteString(\"Allocations\");\n    json.WriteNumber((uint64_t)allocationCount);\n\n    json.WriteString(\"UnusedRanges\");\n    json.WriteNumber((uint64_t)unusedRangeCount);\n\n    json.WriteString(\"Suballocations\");\n    json.BeginArray();\n}\n\nvoid VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,\n    VkDeviceSize offset, VkDeviceSize size, void* userData) const\n{\n    json.BeginObject(true);\n\n    json.WriteString(\"Offset\");\n    json.WriteNumber(offset);\n\n    if (IsVirtual())\n    {\n        json.WriteString(\"Size\");\n        json.WriteNumber(size);\n        if (userData)\n        {\n            json.WriteString(\"CustomData\");\n            json.BeginString();\n            json.ContinueString_Pointer(userData);\n            json.EndString();\n        }\n    }\n    else\n    {\n        ((VmaAllocation)userData)->PrintParameters(json);\n    }\n\n    json.EndObject();\n}\n\nvoid VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,\n    VkDeviceSize offset, VkDeviceSize size) const\n{\n    json.BeginObject(true);\n\n    json.WriteString(\"Offset\");\n    json.WriteNumber(offset);\n\n    json.WriteString(\"Type\");\n    json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);\n\n    json.WriteString(\"Size\");\n    json.WriteNumber(size);\n\n    json.EndObject();\n}\n\nvoid VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const\n{\n    json.EndArray();\n}\n#endif // VMA_STATS_STRING_ENABLED\n#endif // _VMA_BLOCK_METADATA_FUNCTIONS\n#endif // _VMA_BLOCK_METADATA\n\n#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY\n// Before deleting object of this class remember to call 'Destroy()'\nclass VmaBlockBufferImageGranularity final\n{\npublic:\n    struct ValidationContext\n    {\n        const VkAllocationCallbacks* allocCallbacks;\n        uint16_t* pageAllocs;\n    };\n\n    VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity);\n    ~VmaBlockBufferImageGranularity();\n\n    bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; }\n\n    void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size);\n    // Before destroying object you must call free it's memory\n    void Destroy(const VkAllocationCallbacks* pAllocationCallbacks);\n\n    void RoundupAllocRequest(VmaSuballocationType allocType,\n        VkDeviceSize& inOutAllocSize,\n        VkDeviceSize& inOutAllocAlignment) const;\n\n    bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,\n        VkDeviceSize allocSize,\n        VkDeviceSize blockOffset,\n        VkDeviceSize blockSize,\n        VmaSuballocationType allocType) const;\n\n    void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size);\n    void FreePages(VkDeviceSize offset, VkDeviceSize size);\n    void Clear();\n\n    ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks,\n        bool isVirutal) const;\n    bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const;\n    bool FinishValidation(ValidationContext& ctx) const;\n\nprivate:\n    static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256;\n\n    struct RegionInfo\n    {\n        uint8_t allocType;\n        uint16_t allocCount;\n    };\n\n    VkDeviceSize m_BufferImageGranularity;\n    uint32_t m_RegionCount;\n    RegionInfo* m_RegionInfo;\n\n    uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); }\n    uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); }\n\n    uint32_t OffsetToPageIndex(VkDeviceSize offset) const;\n    void AllocPage(RegionInfo& page, uint8_t allocType);\n};\n\n#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS\nVmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity)\n    : m_BufferImageGranularity(bufferImageGranularity),\n    m_RegionCount(0),\n    m_RegionInfo(VMA_NULL) {}\n\nVmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity()\n{\n    VMA_ASSERT(m_RegionInfo == VMA_NULL && \"Free not called before destroying object!\");\n}\n\nvoid VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size)\n{\n    if (IsEnabled())\n    {\n        m_RegionCount = static_cast<uint32_t>(VmaDivideRoundingUp(size, m_BufferImageGranularity));\n        m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount);\n        memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));\n    }\n}\n\nvoid VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks)\n{\n    if (m_RegionInfo)\n    {\n        vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount);\n        m_RegionInfo = VMA_NULL;\n    }\n}\n\nvoid VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType,\n    VkDeviceSize& inOutAllocSize,\n    VkDeviceSize& inOutAllocAlignment) const\n{\n    if (m_BufferImageGranularity > 1 &&\n        m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY)\n    {\n        if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||\n            allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||\n            allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)\n        {\n            inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity);\n            inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity);\n        }\n    }\n}\n\nbool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,\n    VkDeviceSize allocSize,\n    VkDeviceSize blockOffset,\n    VkDeviceSize blockSize,\n    VmaSuballocationType allocType) const\n{\n    if (IsEnabled())\n    {\n        uint32_t startPage = GetStartPage(inOutAllocOffset);\n        if (m_RegionInfo[startPage].allocCount > 0 &&\n            VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[startPage].allocType), allocType))\n        {\n            inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity);\n            if (blockSize < allocSize + inOutAllocOffset - blockOffset)\n                return true;\n            ++startPage;\n        }\n        uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize);\n        if (endPage != startPage &&\n            m_RegionInfo[endPage].allocCount > 0 &&\n            VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[endPage].allocType), allocType))\n        {\n            return true;\n        }\n    }\n    return false;\n}\n\nvoid VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size)\n{\n    if (IsEnabled())\n    {\n        uint32_t startPage = GetStartPage(offset);\n        AllocPage(m_RegionInfo[startPage], allocType);\n\n        uint32_t endPage = GetEndPage(offset, size);\n        if (startPage != endPage)\n            AllocPage(m_RegionInfo[endPage], allocType);\n    }\n}\n\nvoid VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size)\n{\n    if (IsEnabled())\n    {\n        uint32_t startPage = GetStartPage(offset);\n        --m_RegionInfo[startPage].allocCount;\n        if (m_RegionInfo[startPage].allocCount == 0)\n            m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;\n        uint32_t endPage = GetEndPage(offset, size);\n        if (startPage != endPage)\n        {\n            --m_RegionInfo[endPage].allocCount;\n            if (m_RegionInfo[endPage].allocCount == 0)\n                m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;\n        }\n    }\n}\n\nvoid VmaBlockBufferImageGranularity::Clear()\n{\n    if (m_RegionInfo)\n        memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));\n}\n\nVmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation(\n    const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const\n{\n    ValidationContext ctx{ pAllocationCallbacks, VMA_NULL };\n    if (!isVirutal && IsEnabled())\n    {\n        ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount);\n        memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t));\n    }\n    return ctx;\n}\n\nbool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx,\n    VkDeviceSize offset, VkDeviceSize size) const\n{\n    if (IsEnabled())\n    {\n        uint32_t start = GetStartPage(offset);\n        ++ctx.pageAllocs[start];\n        VMA_VALIDATE(m_RegionInfo[start].allocCount > 0);\n\n        uint32_t end = GetEndPage(offset, size);\n        if (start != end)\n        {\n            ++ctx.pageAllocs[end];\n            VMA_VALIDATE(m_RegionInfo[end].allocCount > 0);\n        }\n    }\n    return true;\n}\n\nbool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const\n{\n    // Check proper page structure\n    if (IsEnabled())\n    {\n        VMA_ASSERT(ctx.pageAllocs != VMA_NULL && \"Validation context not initialized!\");\n\n        for (uint32_t page = 0; page < m_RegionCount; ++page)\n        {\n            VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount);\n        }\n        vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount);\n        ctx.pageAllocs = VMA_NULL;\n    }\n    return true;\n}\n\nuint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const\n{\n    return static_cast<uint32_t>(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity));\n}\n\nvoid VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType)\n{\n    // When current alloc type is free then it can be overridden by new type\n    if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE))\n        page.allocType = allocType;\n\n    ++page.allocCount;\n}\n#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS\n#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY\n\n#ifndef _VMA_BLOCK_METADATA_LINEAR\n/*\nAllocations and their references in internal data structure look like this:\n\nif(m_2ndVectorMode == SECOND_VECTOR_EMPTY):\n\n        0 +-------+\n          |       |\n          |       |\n          |       |\n          +-------+\n          | Alloc |  1st[m_1stNullItemsBeginCount]\n          +-------+\n          | Alloc |  1st[m_1stNullItemsBeginCount + 1]\n          +-------+\n          |  ...  |\n          +-------+\n          | Alloc |  1st[1st.size() - 1]\n          +-------+\n          |       |\n          |       |\n          |       |\nGetSize() +-------+\n\nif(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):\n\n        0 +-------+\n          | Alloc |  2nd[0]\n          +-------+\n          | Alloc |  2nd[1]\n          +-------+\n          |  ...  |\n          +-------+\n          | Alloc |  2nd[2nd.size() - 1]\n          +-------+\n          |       |\n          |       |\n          |       |\n          +-------+\n          | Alloc |  1st[m_1stNullItemsBeginCount]\n          +-------+\n          | Alloc |  1st[m_1stNullItemsBeginCount + 1]\n          +-------+\n          |  ...  |\n          +-------+\n          | Alloc |  1st[1st.size() - 1]\n          +-------+\n          |       |\nGetSize() +-------+\n\nif(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):\n\n        0 +-------+\n          |       |\n          |       |\n          |       |\n          +-------+\n          | Alloc |  1st[m_1stNullItemsBeginCount]\n          +-------+\n          | Alloc |  1st[m_1stNullItemsBeginCount + 1]\n          +-------+\n          |  ...  |\n          +-------+\n          | Alloc |  1st[1st.size() - 1]\n          +-------+\n          |       |\n          |       |\n          |       |\n          +-------+\n          | Alloc |  2nd[2nd.size() - 1]\n          +-------+\n          |  ...  |\n          +-------+\n          | Alloc |  2nd[1]\n          +-------+\n          | Alloc |  2nd[0]\nGetSize() +-------+\n\n*/\nclass VmaBlockMetadata_Linear : public VmaBlockMetadata\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Linear)\npublic:\n    VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,\n        VkDeviceSize bufferImageGranularity, bool isVirtual);\n    virtual ~VmaBlockMetadata_Linear() = default;\n\n    VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }\n    bool IsEmpty() const override { return GetAllocationCount() == 0; }\n    VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }\n\n    void Init(VkDeviceSize size) override;\n    bool Validate() const override;\n    size_t GetAllocationCount() const override;\n    size_t GetFreeRegionsCount() const override;\n\n    void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;\n    void AddStatistics(VmaStatistics& inoutStats) const override;\n\n#if VMA_STATS_STRING_ENABLED\n    void PrintDetailedMap(class VmaJsonWriter& json) const override;\n#endif\n\n    bool CreateAllocationRequest(\n        VkDeviceSize allocSize,\n        VkDeviceSize allocAlignment,\n        bool upperAddress,\n        VmaSuballocationType allocType,\n        uint32_t strategy,\n        VmaAllocationRequest* pAllocationRequest) override;\n\n    VkResult CheckCorruption(const void* pBlockData) override;\n\n    void Alloc(\n        const VmaAllocationRequest& request,\n        VmaSuballocationType type,\n        void* userData) override;\n\n    void Free(VmaAllocHandle allocHandle) override;\n    void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;\n    void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;\n    VmaAllocHandle GetAllocationListBegin() const override;\n    VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;\n    VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;\n    void Clear() override;\n    void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;\n    void DebugLogAllAllocations() const override;\n\nprivate:\n    /*\n    There are two suballocation vectors, used in ping-pong way.\n    The one with index m_1stVectorIndex is called 1st.\n    The one with index (m_1stVectorIndex ^ 1) is called 2nd.\n    2nd can be non-empty only when 1st is not empty.\n    When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.\n    */\n    typedef VmaVector<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> SuballocationVectorType;\n\n    enum SECOND_VECTOR_MODE\n    {\n        SECOND_VECTOR_EMPTY,\n        /*\n        Suballocations in 2nd vector are created later than the ones in 1st, but they\n        all have smaller offset.\n        */\n        SECOND_VECTOR_RING_BUFFER,\n        /*\n        Suballocations in 2nd vector are upper side of double stack.\n        They all have offsets higher than those in 1st vector.\n        Top of this stack means smaller offsets, but higher indices in this vector.\n        */\n        SECOND_VECTOR_DOUBLE_STACK,\n    };\n\n    VkDeviceSize m_SumFreeSize;\n    SuballocationVectorType m_Suballocations0, m_Suballocations1;\n    uint32_t m_1stVectorIndex;\n    SECOND_VECTOR_MODE m_2ndVectorMode;\n    // Number of items in 1st vector with hAllocation = null at the beginning.\n    size_t m_1stNullItemsBeginCount;\n    // Number of other items in 1st vector with hAllocation = null somewhere in the middle.\n    size_t m_1stNullItemsMiddleCount;\n    // Number of items in 2nd vector with hAllocation = null.\n    size_t m_2ndNullItemsCount;\n\n    SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }\n    SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }\n    const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }\n    const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }\n\n    VmaSuballocation& FindSuballocation(VkDeviceSize offset) const;\n    bool ShouldCompact1st() const;\n    void CleanupAfterFree();\n\n    bool CreateAllocationRequest_LowerAddress(\n        VkDeviceSize allocSize,\n        VkDeviceSize allocAlignment,\n        VmaSuballocationType allocType,\n        uint32_t strategy,\n        VmaAllocationRequest* pAllocationRequest);\n    bool CreateAllocationRequest_UpperAddress(\n        VkDeviceSize allocSize,\n        VkDeviceSize allocAlignment,\n        VmaSuballocationType allocType,\n        uint32_t strategy,\n        VmaAllocationRequest* pAllocationRequest);\n};\n\n#ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS\nVmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,\n    VkDeviceSize bufferImageGranularity, bool isVirtual)\n    : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),\n    m_SumFreeSize(0),\n    m_Suballocations0(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),\n    m_Suballocations1(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),\n    m_1stVectorIndex(0),\n    m_2ndVectorMode(SECOND_VECTOR_EMPTY),\n    m_1stNullItemsBeginCount(0),\n    m_1stNullItemsMiddleCount(0),\n    m_2ndNullItemsCount(0) {}\n\nvoid VmaBlockMetadata_Linear::Init(VkDeviceSize size)\n{\n    VmaBlockMetadata::Init(size);\n    m_SumFreeSize = size;\n}\n\nbool VmaBlockMetadata_Linear::Validate() const\n{\n    const SuballocationVectorType& suballocations1st = AccessSuballocations1st();\n    const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();\n\n    VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));\n    VMA_VALIDATE(!suballocations1st.empty() ||\n        suballocations2nd.empty() ||\n        m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);\n\n    if (!suballocations1st.empty())\n    {\n        // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.\n        VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE);\n        // Null item at the end should be just pop_back().\n        VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE);\n    }\n    if (!suballocations2nd.empty())\n    {\n        // Null item at the end should be just pop_back().\n        VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE);\n    }\n\n    VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());\n    VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());\n\n    VkDeviceSize sumUsedSize = 0;\n    const size_t suballoc1stCount = suballocations1st.size();\n    const VkDeviceSize debugMargin = GetDebugMargin();\n    VkDeviceSize offset = 0;\n\n    if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)\n    {\n        const size_t suballoc2ndCount = suballocations2nd.size();\n        size_t nullItem2ndCount = 0;\n        for (size_t i = 0; i < suballoc2ndCount; ++i)\n        {\n            const VmaSuballocation& suballoc = suballocations2nd[i];\n            const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);\n\n            VmaAllocation const alloc = (VmaAllocation)suballoc.userData;\n            if (!IsVirtual())\n            {\n                VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));\n            }\n            VMA_VALIDATE(suballoc.offset >= offset);\n\n            if (!currFree)\n            {\n                if (!IsVirtual())\n                {\n                    VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);\n                    VMA_VALIDATE(alloc->GetSize() == suballoc.size);\n                }\n                sumUsedSize += suballoc.size;\n            }\n            else\n            {\n                ++nullItem2ndCount;\n            }\n\n            offset = suballoc.offset + suballoc.size + debugMargin;\n        }\n\n        VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);\n    }\n\n    for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i)\n    {\n        const VmaSuballocation& suballoc = suballocations1st[i];\n        VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&\n            suballoc.userData == VMA_NULL);\n    }\n\n    size_t nullItem1stCount = m_1stNullItemsBeginCount;\n\n    for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)\n    {\n        const VmaSuballocation& suballoc = suballocations1st[i];\n        const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);\n\n        VmaAllocation const alloc = (VmaAllocation)suballoc.userData;\n        if (!IsVirtual())\n        {\n            VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));\n        }\n        VMA_VALIDATE(suballoc.offset >= offset);\n        VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);\n\n        if (!currFree)\n        {\n            if (!IsVirtual())\n            {\n                VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);\n                VMA_VALIDATE(alloc->GetSize() == suballoc.size);\n            }\n            sumUsedSize += suballoc.size;\n        }\n        else\n        {\n            ++nullItem1stCount;\n        }\n\n        offset = suballoc.offset + suballoc.size + debugMargin;\n    }\n    VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);\n\n    if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)\n    {\n        const size_t suballoc2ndCount = suballocations2nd.size();\n        size_t nullItem2ndCount = 0;\n        for (size_t i = suballoc2ndCount; i--; )\n        {\n            const VmaSuballocation& suballoc = suballocations2nd[i];\n            const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);\n\n            VmaAllocation const alloc = (VmaAllocation)suballoc.userData;\n            if (!IsVirtual())\n            {\n                VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));\n            }\n            VMA_VALIDATE(suballoc.offset >= offset);\n\n            if (!currFree)\n            {\n                if (!IsVirtual())\n                {\n                    VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);\n                    VMA_VALIDATE(alloc->GetSize() == suballoc.size);\n                }\n                sumUsedSize += suballoc.size;\n            }\n            else\n            {\n                ++nullItem2ndCount;\n            }\n\n            offset = suballoc.offset + suballoc.size + debugMargin;\n        }\n\n        VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);\n    }\n\n    VMA_VALIDATE(offset <= GetSize());\n    VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);\n\n    return true;\n}\n\nsize_t VmaBlockMetadata_Linear::GetAllocationCount() const\n{\n    return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount +\n        AccessSuballocations2nd().size() - m_2ndNullItemsCount;\n}\n\nsize_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const\n{\n    // Function only used for defragmentation, which is disabled for this algorithm\n    VMA_ASSERT(0);\n    return SIZE_MAX;\n}\n\nvoid VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const\n{\n    const VkDeviceSize size = GetSize();\n    const SuballocationVectorType& suballocations1st = AccessSuballocations1st();\n    const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();\n    const size_t suballoc1stCount = suballocations1st.size();\n    const size_t suballoc2ndCount = suballocations2nd.size();\n\n    inoutStats.statistics.blockCount++;\n    inoutStats.statistics.blockBytes += size;\n\n    VkDeviceSize lastOffset = 0;\n\n    if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)\n    {\n        const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;\n        size_t nextAlloc2ndIndex = 0;\n        while (lastOffset < freeSpace2ndTo1stEnd)\n        {\n            // Find next non-null allocation or move nextAllocIndex to the end.\n            while (nextAlloc2ndIndex < suballoc2ndCount &&\n                suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)\n            {\n                ++nextAlloc2ndIndex;\n            }\n\n            // Found non-null allocation.\n            if (nextAlloc2ndIndex < suballoc2ndCount)\n            {\n                const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];\n\n                // 1. Process free space before this allocation.\n                if (lastOffset < suballoc.offset)\n                {\n                    // There is free space from lastOffset to suballoc.offset.\n                    const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;\n                    VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);\n                }\n\n                // 2. Process this allocation.\n                // There is allocation with suballoc.offset, suballoc.size.\n                VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);\n\n                // 3. Prepare for next iteration.\n                lastOffset = suballoc.offset + suballoc.size;\n                ++nextAlloc2ndIndex;\n            }\n            // We are at the end.\n            else\n            {\n                // There is free space from lastOffset to freeSpace2ndTo1stEnd.\n                if (lastOffset < freeSpace2ndTo1stEnd)\n                {\n                    const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;\n                    VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);\n                }\n\n                // End of loop.\n                lastOffset = freeSpace2ndTo1stEnd;\n            }\n        }\n    }\n\n    size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;\n    const VkDeviceSize freeSpace1stTo2ndEnd =\n        m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;\n    while (lastOffset < freeSpace1stTo2ndEnd)\n    {\n        // Find next non-null allocation or move nextAllocIndex to the end.\n        while (nextAlloc1stIndex < suballoc1stCount &&\n            suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)\n        {\n            ++nextAlloc1stIndex;\n        }\n\n        // Found non-null allocation.\n        if (nextAlloc1stIndex < suballoc1stCount)\n        {\n            const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];\n\n            // 1. Process free space before this allocation.\n            if (lastOffset < suballoc.offset)\n            {\n                // There is free space from lastOffset to suballoc.offset.\n                const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;\n                VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);\n            }\n\n            // 2. Process this allocation.\n            // There is allocation with suballoc.offset, suballoc.size.\n            VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);\n\n            // 3. Prepare for next iteration.\n            lastOffset = suballoc.offset + suballoc.size;\n            ++nextAlloc1stIndex;\n        }\n        // We are at the end.\n        else\n        {\n            // There is free space from lastOffset to freeSpace1stTo2ndEnd.\n            if (lastOffset < freeSpace1stTo2ndEnd)\n            {\n                const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;\n                VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);\n            }\n\n            // End of loop.\n            lastOffset = freeSpace1stTo2ndEnd;\n        }\n    }\n\n    if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)\n    {\n        size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;\n        while (lastOffset < size)\n        {\n            // Find next non-null allocation or move nextAllocIndex to the end.\n            while (nextAlloc2ndIndex != SIZE_MAX &&\n                suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)\n            {\n                --nextAlloc2ndIndex;\n            }\n\n            // Found non-null allocation.\n            if (nextAlloc2ndIndex != SIZE_MAX)\n            {\n                const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];\n\n                // 1. Process free space before this allocation.\n                if (lastOffset < suballoc.offset)\n                {\n                    // There is free space from lastOffset to suballoc.offset.\n                    const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;\n                    VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);\n                }\n\n                // 2. Process this allocation.\n                // There is allocation with suballoc.offset, suballoc.size.\n                VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);\n\n                // 3. Prepare for next iteration.\n                lastOffset = suballoc.offset + suballoc.size;\n                --nextAlloc2ndIndex;\n            }\n            // We are at the end.\n            else\n            {\n                // There is free space from lastOffset to size.\n                if (lastOffset < size)\n                {\n                    const VkDeviceSize unusedRangeSize = size - lastOffset;\n                    VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);\n                }\n\n                // End of loop.\n                lastOffset = size;\n            }\n        }\n    }\n}\n\nvoid VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const\n{\n    const SuballocationVectorType& suballocations1st = AccessSuballocations1st();\n    const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();\n    const VkDeviceSize size = GetSize();\n    const size_t suballoc1stCount = suballocations1st.size();\n    const size_t suballoc2ndCount = suballocations2nd.size();\n\n    inoutStats.blockCount++;\n    inoutStats.blockBytes += size;\n    inoutStats.allocationBytes += size - m_SumFreeSize;\n\n    VkDeviceSize lastOffset = 0;\n\n    if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)\n    {\n        const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;\n        size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;\n        while (lastOffset < freeSpace2ndTo1stEnd)\n        {\n            // Find next non-null allocation or move nextAlloc2ndIndex to the end.\n            while (nextAlloc2ndIndex < suballoc2ndCount &&\n                suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)\n            {\n                ++nextAlloc2ndIndex;\n            }\n\n            // Found non-null allocation.\n            if (nextAlloc2ndIndex < suballoc2ndCount)\n            {\n                const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];\n\n                // Process this allocation.\n                // There is allocation with suballoc.offset, suballoc.size.\n                ++inoutStats.allocationCount;\n\n                // Prepare for next iteration.\n                lastOffset = suballoc.offset + suballoc.size;\n                ++nextAlloc2ndIndex;\n            }\n            // We are at the end.\n            else\n            {\n                // End of loop.\n                lastOffset = freeSpace2ndTo1stEnd;\n            }\n        }\n    }\n\n    size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;\n    const VkDeviceSize freeSpace1stTo2ndEnd =\n        m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;\n    while (lastOffset < freeSpace1stTo2ndEnd)\n    {\n        // Find next non-null allocation or move nextAllocIndex to the end.\n        while (nextAlloc1stIndex < suballoc1stCount &&\n            suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)\n        {\n            ++nextAlloc1stIndex;\n        }\n\n        // Found non-null allocation.\n        if (nextAlloc1stIndex < suballoc1stCount)\n        {\n            const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];\n\n            // Process this allocation.\n            // There is allocation with suballoc.offset, suballoc.size.\n            ++inoutStats.allocationCount;\n\n            // Prepare for next iteration.\n            lastOffset = suballoc.offset + suballoc.size;\n            ++nextAlloc1stIndex;\n        }\n        // We are at the end.\n        else\n        {\n            // End of loop.\n            lastOffset = freeSpace1stTo2ndEnd;\n        }\n    }\n\n    if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)\n    {\n        size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;\n        while (lastOffset < size)\n        {\n            // Find next non-null allocation or move nextAlloc2ndIndex to the end.\n            while (nextAlloc2ndIndex != SIZE_MAX &&\n                suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)\n            {\n                --nextAlloc2ndIndex;\n            }\n\n            // Found non-null allocation.\n            if (nextAlloc2ndIndex != SIZE_MAX)\n            {\n                const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];\n\n                // Process this allocation.\n                // There is allocation with suballoc.offset, suballoc.size.\n                ++inoutStats.allocationCount;\n\n                // Prepare for next iteration.\n                lastOffset = suballoc.offset + suballoc.size;\n                --nextAlloc2ndIndex;\n            }\n            // We are at the end.\n            else\n            {\n                // End of loop.\n                lastOffset = size;\n            }\n        }\n    }\n}\n\n#if VMA_STATS_STRING_ENABLED\nvoid VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const\n{\n    const VkDeviceSize size = GetSize();\n    const SuballocationVectorType& suballocations1st = AccessSuballocations1st();\n    const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();\n    const size_t suballoc1stCount = suballocations1st.size();\n    const size_t suballoc2ndCount = suballocations2nd.size();\n\n    // FIRST PASS\n\n    size_t unusedRangeCount = 0;\n    VkDeviceSize usedBytes = 0;\n\n    VkDeviceSize lastOffset = 0;\n\n    size_t alloc2ndCount = 0;\n    if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)\n    {\n        const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;\n        size_t nextAlloc2ndIndex = 0;\n        while (lastOffset < freeSpace2ndTo1stEnd)\n        {\n            // Find next non-null allocation or move nextAlloc2ndIndex to the end.\n            while (nextAlloc2ndIndex < suballoc2ndCount &&\n                suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)\n            {\n                ++nextAlloc2ndIndex;\n            }\n\n            // Found non-null allocation.\n            if (nextAlloc2ndIndex < suballoc2ndCount)\n            {\n                const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];\n\n                // 1. Process free space before this allocation.\n                if (lastOffset < suballoc.offset)\n                {\n                    // There is free space from lastOffset to suballoc.offset.\n                    ++unusedRangeCount;\n                }\n\n                // 2. Process this allocation.\n                // There is allocation with suballoc.offset, suballoc.size.\n                ++alloc2ndCount;\n                usedBytes += suballoc.size;\n\n                // 3. Prepare for next iteration.\n                lastOffset = suballoc.offset + suballoc.size;\n                ++nextAlloc2ndIndex;\n            }\n            // We are at the end.\n            else\n            {\n                if (lastOffset < freeSpace2ndTo1stEnd)\n                {\n                    // There is free space from lastOffset to freeSpace2ndTo1stEnd.\n                    ++unusedRangeCount;\n                }\n\n                // End of loop.\n                lastOffset = freeSpace2ndTo1stEnd;\n            }\n        }\n    }\n\n    size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;\n    size_t alloc1stCount = 0;\n    const VkDeviceSize freeSpace1stTo2ndEnd =\n        m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;\n    while (lastOffset < freeSpace1stTo2ndEnd)\n    {\n        // Find next non-null allocation or move nextAllocIndex to the end.\n        while (nextAlloc1stIndex < suballoc1stCount &&\n            suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)\n        {\n            ++nextAlloc1stIndex;\n        }\n\n        // Found non-null allocation.\n        if (nextAlloc1stIndex < suballoc1stCount)\n        {\n            const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];\n\n            // 1. Process free space before this allocation.\n            if (lastOffset < suballoc.offset)\n            {\n                // There is free space from lastOffset to suballoc.offset.\n                ++unusedRangeCount;\n            }\n\n            // 2. Process this allocation.\n            // There is allocation with suballoc.offset, suballoc.size.\n            ++alloc1stCount;\n            usedBytes += suballoc.size;\n\n            // 3. Prepare for next iteration.\n            lastOffset = suballoc.offset + suballoc.size;\n            ++nextAlloc1stIndex;\n        }\n        // We are at the end.\n        else\n        {\n            if (lastOffset < freeSpace1stTo2ndEnd)\n            {\n                // There is free space from lastOffset to freeSpace1stTo2ndEnd.\n                ++unusedRangeCount;\n            }\n\n            // End of loop.\n            lastOffset = freeSpace1stTo2ndEnd;\n        }\n    }\n\n    if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)\n    {\n        size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;\n        while (lastOffset < size)\n        {\n            // Find next non-null allocation or move nextAlloc2ndIndex to the end.\n            while (nextAlloc2ndIndex != SIZE_MAX &&\n                suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)\n            {\n                --nextAlloc2ndIndex;\n            }\n\n            // Found non-null allocation.\n            if (nextAlloc2ndIndex != SIZE_MAX)\n            {\n                const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];\n\n                // 1. Process free space before this allocation.\n                if (lastOffset < suballoc.offset)\n                {\n                    // There is free space from lastOffset to suballoc.offset.\n                    ++unusedRangeCount;\n                }\n\n                // 2. Process this allocation.\n                // There is allocation with suballoc.offset, suballoc.size.\n                ++alloc2ndCount;\n                usedBytes += suballoc.size;\n\n                // 3. Prepare for next iteration.\n                lastOffset = suballoc.offset + suballoc.size;\n                --nextAlloc2ndIndex;\n            }\n            // We are at the end.\n            else\n            {\n                if (lastOffset < size)\n                {\n                    // There is free space from lastOffset to size.\n                    ++unusedRangeCount;\n                }\n\n                // End of loop.\n                lastOffset = size;\n            }\n        }\n    }\n\n    const VkDeviceSize unusedBytes = size - usedBytes;\n    PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);\n\n    // SECOND PASS\n    lastOffset = 0;\n\n    if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)\n    {\n        const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;\n        size_t nextAlloc2ndIndex = 0;\n        while (lastOffset < freeSpace2ndTo1stEnd)\n        {\n            // Find next non-null allocation or move nextAlloc2ndIndex to the end.\n            while (nextAlloc2ndIndex < suballoc2ndCount &&\n                suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)\n            {\n                ++nextAlloc2ndIndex;\n            }\n\n            // Found non-null allocation.\n            if (nextAlloc2ndIndex < suballoc2ndCount)\n            {\n                const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];\n\n                // 1. Process free space before this allocation.\n                if (lastOffset < suballoc.offset)\n                {\n                    // There is free space from lastOffset to suballoc.offset.\n                    const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;\n                    PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);\n                }\n\n                // 2. Process this allocation.\n                // There is allocation with suballoc.offset, suballoc.size.\n                PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);\n\n                // 3. Prepare for next iteration.\n                lastOffset = suballoc.offset + suballoc.size;\n                ++nextAlloc2ndIndex;\n            }\n            // We are at the end.\n            else\n            {\n                if (lastOffset < freeSpace2ndTo1stEnd)\n                {\n                    // There is free space from lastOffset to freeSpace2ndTo1stEnd.\n                    const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;\n                    PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);\n                }\n\n                // End of loop.\n                lastOffset = freeSpace2ndTo1stEnd;\n            }\n        }\n    }\n\n    nextAlloc1stIndex = m_1stNullItemsBeginCount;\n    while (lastOffset < freeSpace1stTo2ndEnd)\n    {\n        // Find next non-null allocation or move nextAllocIndex to the end.\n        while (nextAlloc1stIndex < suballoc1stCount &&\n            suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)\n        {\n            ++nextAlloc1stIndex;\n        }\n\n        // Found non-null allocation.\n        if (nextAlloc1stIndex < suballoc1stCount)\n        {\n            const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];\n\n            // 1. Process free space before this allocation.\n            if (lastOffset < suballoc.offset)\n            {\n                // There is free space from lastOffset to suballoc.offset.\n                const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;\n                PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);\n            }\n\n            // 2. Process this allocation.\n            // There is allocation with suballoc.offset, suballoc.size.\n            PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);\n\n            // 3. Prepare for next iteration.\n            lastOffset = suballoc.offset + suballoc.size;\n            ++nextAlloc1stIndex;\n        }\n        // We are at the end.\n        else\n        {\n            if (lastOffset < freeSpace1stTo2ndEnd)\n            {\n                // There is free space from lastOffset to freeSpace1stTo2ndEnd.\n                const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;\n                PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);\n            }\n\n            // End of loop.\n            lastOffset = freeSpace1stTo2ndEnd;\n        }\n    }\n\n    if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)\n    {\n        size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;\n        while (lastOffset < size)\n        {\n            // Find next non-null allocation or move nextAlloc2ndIndex to the end.\n            while (nextAlloc2ndIndex != SIZE_MAX &&\n                suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)\n            {\n                --nextAlloc2ndIndex;\n            }\n\n            // Found non-null allocation.\n            if (nextAlloc2ndIndex != SIZE_MAX)\n            {\n                const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];\n\n                // 1. Process free space before this allocation.\n                if (lastOffset < suballoc.offset)\n                {\n                    // There is free space from lastOffset to suballoc.offset.\n                    const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;\n                    PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);\n                }\n\n                // 2. Process this allocation.\n                // There is allocation with suballoc.offset, suballoc.size.\n                PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);\n\n                // 3. Prepare for next iteration.\n                lastOffset = suballoc.offset + suballoc.size;\n                --nextAlloc2ndIndex;\n            }\n            // We are at the end.\n            else\n            {\n                if (lastOffset < size)\n                {\n                    // There is free space from lastOffset to size.\n                    const VkDeviceSize unusedRangeSize = size - lastOffset;\n                    PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);\n                }\n\n                // End of loop.\n                lastOffset = size;\n            }\n        }\n    }\n\n    PrintDetailedMap_End(json);\n}\n#endif // VMA_STATS_STRING_ENABLED\n\nbool VmaBlockMetadata_Linear::CreateAllocationRequest(\n    VkDeviceSize allocSize,\n    VkDeviceSize allocAlignment,\n    bool upperAddress,\n    VmaSuballocationType allocType,\n    uint32_t strategy,\n    VmaAllocationRequest* pAllocationRequest)\n{\n    VMA_ASSERT(allocSize > 0);\n    VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);\n    VMA_ASSERT(pAllocationRequest != VMA_NULL);\n    VMA_HEAVY_ASSERT(Validate());\n\n    if(allocSize > GetSize())\n        return false;\n\n    pAllocationRequest->size = allocSize;\n    return upperAddress ?\n        CreateAllocationRequest_UpperAddress(\n            allocSize, allocAlignment, allocType, strategy, pAllocationRequest) :\n        CreateAllocationRequest_LowerAddress(\n            allocSize, allocAlignment, allocType, strategy, pAllocationRequest);\n}\n\nVkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)\n{\n    VMA_ASSERT(!IsVirtual());\n    SuballocationVectorType& suballocations1st = AccessSuballocations1st();\n    for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)\n    {\n        const VmaSuballocation& suballoc = suballocations1st[i];\n        if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)\n        {\n            if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))\n            {\n                VMA_ASSERT(0 && \"MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!\");\n                return VK_ERROR_UNKNOWN_COPY;\n            }\n        }\n    }\n\n    SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();\n    for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i)\n    {\n        const VmaSuballocation& suballoc = suballocations2nd[i];\n        if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)\n        {\n            if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))\n            {\n                VMA_ASSERT(0 && \"MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!\");\n                return VK_ERROR_UNKNOWN_COPY;\n            }\n        }\n    }\n\n    return VK_SUCCESS;\n}\n\nvoid VmaBlockMetadata_Linear::Alloc(\n    const VmaAllocationRequest& request,\n    VmaSuballocationType type,\n    void* userData)\n{\n    const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;\n    const VmaSuballocation newSuballoc = { offset, request.size, userData, type };\n\n    switch (request.type)\n    {\n    case VmaAllocationRequestType::UpperAddress:\n    {\n        VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&\n            \"CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.\");\n        SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();\n        suballocations2nd.push_back(newSuballoc);\n        m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;\n    }\n    break;\n    case VmaAllocationRequestType::EndOf1st:\n    {\n        SuballocationVectorType& suballocations1st = AccessSuballocations1st();\n\n        VMA_ASSERT(suballocations1st.empty() ||\n            offset >= suballocations1st.back().offset + suballocations1st.back().size);\n        // Check if it fits before the end of the block.\n        VMA_ASSERT(offset + request.size <= GetSize());\n\n        suballocations1st.push_back(newSuballoc);\n    }\n    break;\n    case VmaAllocationRequestType::EndOf2nd:\n    {\n        SuballocationVectorType& suballocations1st = AccessSuballocations1st();\n        // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.\n        VMA_ASSERT(!suballocations1st.empty() &&\n            offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset);\n        SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();\n\n        switch (m_2ndVectorMode)\n        {\n        case SECOND_VECTOR_EMPTY:\n            // First allocation from second part ring buffer.\n            VMA_ASSERT(suballocations2nd.empty());\n            m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;\n            break;\n        case SECOND_VECTOR_RING_BUFFER:\n            // 2-part ring buffer is already started.\n            VMA_ASSERT(!suballocations2nd.empty());\n            break;\n        case SECOND_VECTOR_DOUBLE_STACK:\n            VMA_ASSERT(0 && \"CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.\");\n            break;\n        default:\n            VMA_ASSERT(0);\n        }\n\n        suballocations2nd.push_back(newSuballoc);\n    }\n    break;\n    default:\n        VMA_ASSERT(0 && \"CRITICAL INTERNAL ERROR.\");\n    }\n\n    m_SumFreeSize -= newSuballoc.size;\n}\n\nvoid VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle)\n{\n    SuballocationVectorType& suballocations1st = AccessSuballocations1st();\n    SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();\n    VkDeviceSize offset = (VkDeviceSize)allocHandle - 1;\n\n    if (!suballocations1st.empty())\n    {\n        // First allocation: Mark it as next empty at the beginning.\n        VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];\n        if (firstSuballoc.offset == offset)\n        {\n            firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;\n            firstSuballoc.userData = VMA_NULL;\n            m_SumFreeSize += firstSuballoc.size;\n            ++m_1stNullItemsBeginCount;\n            CleanupAfterFree();\n            return;\n        }\n    }\n\n    // Last allocation in 2-part ring buffer or top of upper stack (same logic).\n    if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||\n        m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)\n    {\n        VmaSuballocation& lastSuballoc = suballocations2nd.back();\n        if (lastSuballoc.offset == offset)\n        {\n            m_SumFreeSize += lastSuballoc.size;\n            suballocations2nd.pop_back();\n            CleanupAfterFree();\n            return;\n        }\n    }\n    // Last allocation in 1st vector.\n    else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY)\n    {\n        VmaSuballocation& lastSuballoc = suballocations1st.back();\n        if (lastSuballoc.offset == offset)\n        {\n            m_SumFreeSize += lastSuballoc.size;\n            suballocations1st.pop_back();\n            CleanupAfterFree();\n            return;\n        }\n    }\n\n    VmaSuballocation refSuballoc;\n    refSuballoc.offset = offset;\n    // Rest of members stays uninitialized intentionally for better performance.\n\n    // Item from the middle of 1st vector.\n    {\n        const SuballocationVectorType::iterator it = VmaBinaryFindSorted(\n            suballocations1st.begin() + m_1stNullItemsBeginCount,\n            suballocations1st.end(),\n            refSuballoc,\n            VmaSuballocationOffsetLess());\n        if (it != suballocations1st.end())\n        {\n            it->type = VMA_SUBALLOCATION_TYPE_FREE;\n            it->userData = VMA_NULL;\n            ++m_1stNullItemsMiddleCount;\n            m_SumFreeSize += it->size;\n            CleanupAfterFree();\n            return;\n        }\n    }\n\n    if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)\n    {\n        // Item from the middle of 2nd vector.\n        const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?\n            VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :\n            VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());\n        if (it != suballocations2nd.end())\n        {\n            it->type = VMA_SUBALLOCATION_TYPE_FREE;\n            it->userData = VMA_NULL;\n            ++m_2ndNullItemsCount;\n            m_SumFreeSize += it->size;\n            CleanupAfterFree();\n            return;\n        }\n    }\n\n    VMA_ASSERT(0 && \"Allocation to free not found in linear allocator!\");\n}\n\nvoid VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)\n{\n    outInfo.offset = (VkDeviceSize)allocHandle - 1;\n    VmaSuballocation& suballoc = FindSuballocation(outInfo.offset);\n    outInfo.size = suballoc.size;\n    outInfo.pUserData = suballoc.userData;\n}\n\nvoid* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const\n{\n    return FindSuballocation((VkDeviceSize)allocHandle - 1).userData;\n}\n\nVmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const\n{\n    // Function only used for defragmentation, which is disabled for this algorithm\n    VMA_ASSERT(0);\n    return VK_NULL_HANDLE;\n}\n\nVmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const\n{\n    // Function only used for defragmentation, which is disabled for this algorithm\n    VMA_ASSERT(0);\n    return VK_NULL_HANDLE;\n}\n\nVkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const\n{\n    // Function only used for defragmentation, which is disabled for this algorithm\n    VMA_ASSERT(0);\n    return 0;\n}\n\nvoid VmaBlockMetadata_Linear::Clear()\n{\n    m_SumFreeSize = GetSize();\n    m_Suballocations0.clear();\n    m_Suballocations1.clear();\n    // Leaving m_1stVectorIndex unchanged - it doesn't matter.\n    m_2ndVectorMode = SECOND_VECTOR_EMPTY;\n    m_1stNullItemsBeginCount = 0;\n    m_1stNullItemsMiddleCount = 0;\n    m_2ndNullItemsCount = 0;\n}\n\nvoid VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)\n{\n    VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1);\n    suballoc.userData = userData;\n}\n\nvoid VmaBlockMetadata_Linear::DebugLogAllAllocations() const\n{\n    const SuballocationVectorType& suballocations1st = AccessSuballocations1st();\n    for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it)\n        if (it->type != VMA_SUBALLOCATION_TYPE_FREE)\n            DebugLogAllocation(it->offset, it->size, it->userData);\n\n    const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();\n    for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it)\n        if (it->type != VMA_SUBALLOCATION_TYPE_FREE)\n            DebugLogAllocation(it->offset, it->size, it->userData);\n}\n\nVmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const\n{\n    const SuballocationVectorType& suballocations1st = AccessSuballocations1st();\n    const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();\n\n    VmaSuballocation refSuballoc;\n    refSuballoc.offset = offset;\n    // Rest of members stays uninitialized intentionally for better performance.\n\n    // Item from the 1st vector.\n    {\n        SuballocationVectorType::const_iterator it = VmaBinaryFindSorted(\n            suballocations1st.begin() + m_1stNullItemsBeginCount,\n            suballocations1st.end(),\n            refSuballoc,\n            VmaSuballocationOffsetLess());\n        if (it != suballocations1st.end())\n        {\n            return const_cast<VmaSuballocation&>(*it);\n        }\n    }\n\n    if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)\n    {\n        // Rest of members stays uninitialized intentionally for better performance.\n        SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?\n            VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :\n            VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());\n        if (it != suballocations2nd.end())\n        {\n            return const_cast<VmaSuballocation&>(*it);\n        }\n    }\n\n    VMA_ASSERT(0 && \"Allocation not found in linear allocator!\");\n    return const_cast<VmaSuballocation&>(suballocations1st.back()); // Should never occur.\n}\n\nbool VmaBlockMetadata_Linear::ShouldCompact1st() const\n{\n    const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;\n    const size_t suballocCount = AccessSuballocations1st().size();\n    return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;\n}\n\nvoid VmaBlockMetadata_Linear::CleanupAfterFree()\n{\n    SuballocationVectorType& suballocations1st = AccessSuballocations1st();\n    SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();\n\n    if (IsEmpty())\n    {\n        suballocations1st.clear();\n        suballocations2nd.clear();\n        m_1stNullItemsBeginCount = 0;\n        m_1stNullItemsMiddleCount = 0;\n        m_2ndNullItemsCount = 0;\n        m_2ndVectorMode = SECOND_VECTOR_EMPTY;\n    }\n    else\n    {\n        const size_t suballoc1stCount = suballocations1st.size();\n        const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;\n        VMA_ASSERT(nullItem1stCount <= suballoc1stCount);\n\n        // Find more null items at the beginning of 1st vector.\n        while (m_1stNullItemsBeginCount < suballoc1stCount &&\n            suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)\n        {\n            ++m_1stNullItemsBeginCount;\n            --m_1stNullItemsMiddleCount;\n        }\n\n        // Find more null items at the end of 1st vector.\n        while (m_1stNullItemsMiddleCount > 0 &&\n            suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE)\n        {\n            --m_1stNullItemsMiddleCount;\n            suballocations1st.pop_back();\n        }\n\n        // Find more null items at the end of 2nd vector.\n        while (m_2ndNullItemsCount > 0 &&\n            suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE)\n        {\n            --m_2ndNullItemsCount;\n            suballocations2nd.pop_back();\n        }\n\n        // Find more null items at the beginning of 2nd vector.\n        while (m_2ndNullItemsCount > 0 &&\n            suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE)\n        {\n            --m_2ndNullItemsCount;\n            VmaVectorRemove(suballocations2nd, 0);\n        }\n\n        if (ShouldCompact1st())\n        {\n            const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;\n            size_t srcIndex = m_1stNullItemsBeginCount;\n            for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)\n            {\n                while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE)\n                {\n                    ++srcIndex;\n                }\n                if (dstIndex != srcIndex)\n                {\n                    suballocations1st[dstIndex] = suballocations1st[srcIndex];\n                }\n                ++srcIndex;\n            }\n            suballocations1st.resize(nonNullItemCount);\n            m_1stNullItemsBeginCount = 0;\n            m_1stNullItemsMiddleCount = 0;\n        }\n\n        // 2nd vector became empty.\n        if (suballocations2nd.empty())\n        {\n            m_2ndVectorMode = SECOND_VECTOR_EMPTY;\n        }\n\n        // 1st vector became empty.\n        if (suballocations1st.size() - m_1stNullItemsBeginCount == 0)\n        {\n            suballocations1st.clear();\n            m_1stNullItemsBeginCount = 0;\n\n            if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)\n            {\n                // Swap 1st with 2nd. Now 2nd is empty.\n                m_2ndVectorMode = SECOND_VECTOR_EMPTY;\n                m_1stNullItemsMiddleCount = m_2ndNullItemsCount;\n                while (m_1stNullItemsBeginCount < suballocations2nd.size() &&\n                    suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)\n                {\n                    ++m_1stNullItemsBeginCount;\n                    --m_1stNullItemsMiddleCount;\n                }\n                m_2ndNullItemsCount = 0;\n                m_1stVectorIndex ^= 1;\n            }\n        }\n    }\n\n    VMA_HEAVY_ASSERT(Validate());\n}\n\nbool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(\n    VkDeviceSize allocSize,\n    VkDeviceSize allocAlignment,\n    VmaSuballocationType allocType,\n    uint32_t strategy,\n    VmaAllocationRequest* pAllocationRequest)\n{\n    const VkDeviceSize blockSize = GetSize();\n    const VkDeviceSize debugMargin = GetDebugMargin();\n    const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();\n    SuballocationVectorType& suballocations1st = AccessSuballocations1st();\n    SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();\n\n    if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)\n    {\n        // Try to allocate at the end of 1st vector.\n\n        VkDeviceSize resultBaseOffset = 0;\n        if (!suballocations1st.empty())\n        {\n            const VmaSuballocation& lastSuballoc = suballocations1st.back();\n            resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;\n        }\n\n        // Start from offset equal to beginning of free space.\n        VkDeviceSize resultOffset = resultBaseOffset;\n\n        // Apply alignment.\n        resultOffset = VmaAlignUp(resultOffset, allocAlignment);\n\n        // Check previous suballocations for BufferImageGranularity conflicts.\n        // Make bigger alignment if necessary.\n        if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())\n        {\n            bool bufferImageGranularityConflict = false;\n            for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )\n            {\n                const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];\n                if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))\n                {\n                    if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))\n                    {\n                        bufferImageGranularityConflict = true;\n                        break;\n                    }\n                }\n                else\n                    // Already on previous page.\n                    break;\n            }\n            if (bufferImageGranularityConflict)\n            {\n                resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);\n            }\n        }\n\n        const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?\n            suballocations2nd.back().offset : blockSize;\n\n        // There is enough free space at the end after alignment.\n        if (resultOffset + allocSize + debugMargin <= freeSpaceEnd)\n        {\n            // Check next suballocations for BufferImageGranularity conflicts.\n            // If conflict exists, allocation cannot be made here.\n            if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)\n            {\n                for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )\n                {\n                    const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];\n                    if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))\n                    {\n                        if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))\n                        {\n                            return false;\n                        }\n                    }\n                    else\n                    {\n                        // Already on previous page.\n                        break;\n                    }\n                }\n            }\n\n            // All tests passed: Success.\n            pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);\n            // pAllocationRequest->item, customData unused.\n            pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;\n            return true;\n        }\n    }\n\n    // Wrap-around to end of 2nd vector. Try to allocate there, watching for the\n    // beginning of 1st vector as the end of free space.\n    if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)\n    {\n        VMA_ASSERT(!suballocations1st.empty());\n\n        VkDeviceSize resultBaseOffset = 0;\n        if (!suballocations2nd.empty())\n        {\n            const VmaSuballocation& lastSuballoc = suballocations2nd.back();\n            resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;\n        }\n\n        // Start from offset equal to beginning of free space.\n        VkDeviceSize resultOffset = resultBaseOffset;\n\n        // Apply alignment.\n        resultOffset = VmaAlignUp(resultOffset, allocAlignment);\n\n        // Check previous suballocations for BufferImageGranularity conflicts.\n        // Make bigger alignment if necessary.\n        if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())\n        {\n            bool bufferImageGranularityConflict = false;\n            for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )\n            {\n                const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];\n                if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))\n                {\n                    if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))\n                    {\n                        bufferImageGranularityConflict = true;\n                        break;\n                    }\n                }\n                else\n                    // Already on previous page.\n                    break;\n            }\n            if (bufferImageGranularityConflict)\n            {\n                resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);\n            }\n        }\n\n        size_t index1st = m_1stNullItemsBeginCount;\n\n        // There is enough free space at the end after alignment.\n        if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) ||\n            (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset))\n        {\n            // Check next suballocations for BufferImageGranularity conflicts.\n            // If conflict exists, allocation cannot be made here.\n            if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)\n            {\n                for (size_t nextSuballocIndex = index1st;\n                    nextSuballocIndex < suballocations1st.size();\n                    nextSuballocIndex++)\n                {\n                    const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];\n                    if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))\n                    {\n                        if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))\n                        {\n                            return false;\n                        }\n                    }\n                    else\n                    {\n                        // Already on next page.\n                        break;\n                    }\n                }\n            }\n\n            // All tests passed: Success.\n            pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);\n            pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;\n            // pAllocationRequest->item, customData unused.\n            return true;\n        }\n    }\n\n    return false;\n}\n\nbool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(\n    VkDeviceSize allocSize,\n    VkDeviceSize allocAlignment,\n    VmaSuballocationType allocType,\n    uint32_t strategy,\n    VmaAllocationRequest* pAllocationRequest)\n{\n    const VkDeviceSize blockSize = GetSize();\n    const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();\n    SuballocationVectorType& suballocations1st = AccessSuballocations1st();\n    SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();\n\n    if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)\n    {\n        VMA_ASSERT(0 && \"Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.\");\n        return false;\n    }\n\n    // Try to allocate before 2nd.back(), or end of block if 2nd.empty().\n    if (allocSize > blockSize)\n    {\n        return false;\n    }\n    VkDeviceSize resultBaseOffset = blockSize - allocSize;\n    if (!suballocations2nd.empty())\n    {\n        const VmaSuballocation& lastSuballoc = suballocations2nd.back();\n        resultBaseOffset = lastSuballoc.offset - allocSize;\n        if (allocSize > lastSuballoc.offset)\n        {\n            return false;\n        }\n    }\n\n    // Start from offset equal to end of free space.\n    VkDeviceSize resultOffset = resultBaseOffset;\n\n    const VkDeviceSize debugMargin = GetDebugMargin();\n\n    // Apply debugMargin at the end.\n    if (debugMargin > 0)\n    {\n        if (resultOffset < debugMargin)\n        {\n            return false;\n        }\n        resultOffset -= debugMargin;\n    }\n\n    // Apply alignment.\n    resultOffset = VmaAlignDown(resultOffset, allocAlignment);\n\n    // Check next suballocations from 2nd for BufferImageGranularity conflicts.\n    // Make bigger alignment if necessary.\n    if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())\n    {\n        bool bufferImageGranularityConflict = false;\n        for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )\n        {\n            const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];\n            if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))\n            {\n                if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))\n                {\n                    bufferImageGranularityConflict = true;\n                    break;\n                }\n            }\n            else\n                // Already on previous page.\n                break;\n        }\n        if (bufferImageGranularityConflict)\n        {\n            resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);\n        }\n    }\n\n    // There is enough free space.\n    const VkDeviceSize endOf1st = !suballocations1st.empty() ?\n        suballocations1st.back().offset + suballocations1st.back().size :\n        0;\n    if (endOf1st + debugMargin <= resultOffset)\n    {\n        // Check previous suballocations for BufferImageGranularity conflicts.\n        // If conflict exists, allocation cannot be made here.\n        if (bufferImageGranularity > 1)\n        {\n            for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )\n            {\n                const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];\n                if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))\n                {\n                    if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))\n                    {\n                        return false;\n                    }\n                }\n                else\n                {\n                    // Already on next page.\n                    break;\n                }\n            }\n        }\n\n        // All tests passed: Success.\n        pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);\n        // pAllocationRequest->item unused.\n        pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;\n        return true;\n    }\n\n    return false;\n}\n#endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS\n#endif // _VMA_BLOCK_METADATA_LINEAR\n\n#ifndef _VMA_BLOCK_METADATA_TLSF\n// To not search current larger region if first allocation won't succeed and skip to smaller range\n// use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest().\n// When fragmentation and reusal of previous blocks doesn't matter then use with\n// VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible.\nclass VmaBlockMetadata_TLSF : public VmaBlockMetadata\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_TLSF)\npublic:\n    VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,\n        VkDeviceSize bufferImageGranularity, bool isVirtual);\n    virtual ~VmaBlockMetadata_TLSF();\n\n    size_t GetAllocationCount() const override { return m_AllocCount; }\n    size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; }\n    VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; }\n    bool IsEmpty() const override { return m_NullBlock->offset == 0; }\n    VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; }\n\n    void Init(VkDeviceSize size) override;\n    bool Validate() const override;\n\n    void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;\n    void AddStatistics(VmaStatistics& inoutStats) const override;\n\n#if VMA_STATS_STRING_ENABLED\n    void PrintDetailedMap(class VmaJsonWriter& json) const override;\n#endif\n\n    bool CreateAllocationRequest(\n        VkDeviceSize allocSize,\n        VkDeviceSize allocAlignment,\n        bool upperAddress,\n        VmaSuballocationType allocType,\n        uint32_t strategy,\n        VmaAllocationRequest* pAllocationRequest) override;\n\n    VkResult CheckCorruption(const void* pBlockData) override;\n    void Alloc(\n        const VmaAllocationRequest& request,\n        VmaSuballocationType type,\n        void* userData) override;\n\n    void Free(VmaAllocHandle allocHandle) override;\n    void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;\n    void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;\n    VmaAllocHandle GetAllocationListBegin() const override;\n    VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;\n    VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;\n    void Clear() override;\n    void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;\n    void DebugLogAllAllocations() const override;\n\nprivate:\n    // According to original paper it should be preferable 4 or 5:\n    // M. Masmano, I. Ripoll, A. Crespo, and J. Real \"TLSF: a New Dynamic Memory Allocator for Real-Time Systems\"\n    // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf\n    static const uint8_t SECOND_LEVEL_INDEX = 5;\n    static const uint16_t SMALL_BUFFER_SIZE = 256;\n    static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16;\n    static const uint8_t MEMORY_CLASS_SHIFT = 7;\n    static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;\n\n    class Block\n    {\n    public:\n        VkDeviceSize offset;\n        VkDeviceSize size;\n        Block* prevPhysical;\n        Block* nextPhysical;\n\n        void MarkFree() { prevFree = VMA_NULL; }\n        void MarkTaken() { prevFree = this; }\n        bool IsFree() const { return prevFree != this; }\n        void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; }\n        Block*& PrevFree() { return prevFree; }\n        Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; }\n\n    private:\n        Block* prevFree; // Address of the same block here indicates that block is taken\n        union\n        {\n            Block* nextFree;\n            void* userData;\n        };\n    };\n\n    size_t m_AllocCount;\n    // Total number of free blocks besides null block\n    size_t m_BlocksFreeCount;\n    // Total size of free blocks excluding null block\n    VkDeviceSize m_BlocksFreeSize;\n    uint32_t m_IsFreeBitmap;\n    uint8_t m_MemoryClasses;\n    uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES];\n    uint32_t m_ListsCount;\n    /*\n    * 0: 0-3 lists for small buffers\n    * 1+: 0-(2^SLI-1) lists for normal buffers\n    */\n    Block** m_FreeList;\n    VmaPoolAllocator<Block> m_BlockAllocator;\n    Block* m_NullBlock;\n    VmaBlockBufferImageGranularity m_GranularityHandler;\n\n    uint8_t SizeToMemoryClass(VkDeviceSize size) const;\n    uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const;\n    uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const;\n    uint32_t GetListIndex(VkDeviceSize size) const;\n\n    void RemoveFreeBlock(Block* block);\n    void InsertFreeBlock(Block* block);\n    void MergeBlock(Block* block, Block* prev);\n\n    Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const;\n    bool CheckBlock(\n        Block& block,\n        uint32_t listIndex,\n        VkDeviceSize allocSize,\n        VkDeviceSize allocAlignment,\n        VmaSuballocationType allocType,\n        VmaAllocationRequest* pAllocationRequest);\n};\n\n#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS\nVmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,\n    VkDeviceSize bufferImageGranularity, bool isVirtual)\n    : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),\n    m_AllocCount(0),\n    m_BlocksFreeCount(0),\n    m_BlocksFreeSize(0),\n    m_IsFreeBitmap(0),\n    m_MemoryClasses(0),\n    m_ListsCount(0),\n    m_FreeList(VMA_NULL),\n    m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT),\n    m_NullBlock(VMA_NULL),\n    m_GranularityHandler(bufferImageGranularity) {}\n\nVmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF()\n{\n    if (m_FreeList)\n        vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount);\n    m_GranularityHandler.Destroy(GetAllocationCallbacks());\n}\n\nvoid VmaBlockMetadata_TLSF::Init(VkDeviceSize size)\n{\n    VmaBlockMetadata::Init(size);\n\n    if (!IsVirtual())\n        m_GranularityHandler.Init(GetAllocationCallbacks(), size);\n\n    m_NullBlock = m_BlockAllocator.Alloc();\n    m_NullBlock->size = size;\n    m_NullBlock->offset = 0;\n    m_NullBlock->prevPhysical = VMA_NULL;\n    m_NullBlock->nextPhysical = VMA_NULL;\n    m_NullBlock->MarkFree();\n    m_NullBlock->NextFree() = VMA_NULL;\n    m_NullBlock->PrevFree() = VMA_NULL;\n    uint8_t memoryClass = SizeToMemoryClass(size);\n    uint16_t sli = SizeToSecondIndex(size, memoryClass);\n    m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1;\n    if (IsVirtual())\n        m_ListsCount += 1UL << SECOND_LEVEL_INDEX;\n    else\n        m_ListsCount += 4;\n\n    m_MemoryClasses = memoryClass + uint8_t(2);\n    memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t));\n\n    m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount);\n    memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));\n}\n\nbool VmaBlockMetadata_TLSF::Validate() const\n{\n    VMA_VALIDATE(GetSumFreeSize() <= GetSize());\n\n    VkDeviceSize calculatedSize = m_NullBlock->size;\n    VkDeviceSize calculatedFreeSize = m_NullBlock->size;\n    size_t allocCount = 0;\n    size_t freeCount = 0;\n\n    // Check integrity of free lists\n    for (uint32_t list = 0; list < m_ListsCount; ++list)\n    {\n        Block* block = m_FreeList[list];\n        if (block != VMA_NULL)\n        {\n            VMA_VALIDATE(block->IsFree());\n            VMA_VALIDATE(block->PrevFree() == VMA_NULL);\n            while (block->NextFree())\n            {\n                VMA_VALIDATE(block->NextFree()->IsFree());\n                VMA_VALIDATE(block->NextFree()->PrevFree() == block);\n                block = block->NextFree();\n            }\n        }\n    }\n\n    VkDeviceSize nextOffset = m_NullBlock->offset;\n    auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual());\n\n    VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL);\n    if (m_NullBlock->prevPhysical)\n    {\n        VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock);\n    }\n    // Check all blocks\n    for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical)\n    {\n        VMA_VALIDATE(prev->offset + prev->size == nextOffset);\n        nextOffset = prev->offset;\n        calculatedSize += prev->size;\n\n        uint32_t listIndex = GetListIndex(prev->size);\n        if (prev->IsFree())\n        {\n            ++freeCount;\n            // Check if free block belongs to free list\n            Block* freeBlock = m_FreeList[listIndex];\n            VMA_VALIDATE(freeBlock != VMA_NULL);\n\n            bool found = false;\n            do\n            {\n                if (freeBlock == prev)\n                    found = true;\n\n                freeBlock = freeBlock->NextFree();\n            } while (!found && freeBlock != VMA_NULL);\n\n            VMA_VALIDATE(found);\n            calculatedFreeSize += prev->size;\n        }\n        else\n        {\n            ++allocCount;\n            // Check if taken block is not on a free list\n            Block* freeBlock = m_FreeList[listIndex];\n            while (freeBlock)\n            {\n                VMA_VALIDATE(freeBlock != prev);\n                freeBlock = freeBlock->NextFree();\n            }\n\n            if (!IsVirtual())\n            {\n                VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size));\n            }\n        }\n\n        if (prev->prevPhysical)\n        {\n            VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev);\n        }\n    }\n\n    if (!IsVirtual())\n    {\n        VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx));\n    }\n\n    VMA_VALIDATE(nextOffset == 0);\n    VMA_VALIDATE(calculatedSize == GetSize());\n    VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize());\n    VMA_VALIDATE(allocCount == m_AllocCount);\n    VMA_VALIDATE(freeCount == m_BlocksFreeCount);\n\n    return true;\n}\n\nvoid VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const\n{\n    inoutStats.statistics.blockCount++;\n    inoutStats.statistics.blockBytes += GetSize();\n    if (m_NullBlock->size > 0)\n        VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size);\n\n    for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)\n    {\n        if (block->IsFree())\n            VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size);\n        else\n            VmaAddDetailedStatisticsAllocation(inoutStats, block->size);\n    }\n}\n\nvoid VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const\n{\n    inoutStats.blockCount++;\n    inoutStats.allocationCount += (uint32_t)m_AllocCount;\n    inoutStats.blockBytes += GetSize();\n    inoutStats.allocationBytes += GetSize() - GetSumFreeSize();\n}\n\n#if VMA_STATS_STRING_ENABLED\nvoid VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const\n{\n    size_t blockCount = m_AllocCount + m_BlocksFreeCount;\n    VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());\n    VmaVector<Block*, VmaStlAllocator<Block*>> blockList(blockCount, allocator);\n\n    size_t i = blockCount;\n    for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)\n    {\n        blockList[--i] = block;\n    }\n    VMA_ASSERT(i == 0);\n\n    VmaDetailedStatistics stats;\n    VmaClearDetailedStatistics(stats);\n    AddDetailedStatistics(stats);\n\n    PrintDetailedMap_Begin(json,\n        stats.statistics.blockBytes - stats.statistics.allocationBytes,\n        stats.statistics.allocationCount,\n        stats.unusedRangeCount);\n\n    for (; i < blockCount; ++i)\n    {\n        Block* block = blockList[i];\n        if (block->IsFree())\n            PrintDetailedMap_UnusedRange(json, block->offset, block->size);\n        else\n            PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData());\n    }\n    if (m_NullBlock->size > 0)\n        PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size);\n\n    PrintDetailedMap_End(json);\n}\n#endif\n\nbool VmaBlockMetadata_TLSF::CreateAllocationRequest(\n    VkDeviceSize allocSize,\n    VkDeviceSize allocAlignment,\n    bool upperAddress,\n    VmaSuballocationType allocType,\n    uint32_t strategy,\n    VmaAllocationRequest* pAllocationRequest)\n{\n    VMA_ASSERT(allocSize > 0 && \"Cannot allocate empty block!\");\n    VMA_ASSERT(!upperAddress && \"VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.\");\n\n    // For small granularity round up\n    if (!IsVirtual())\n        m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment);\n\n    allocSize += GetDebugMargin();\n    // Quick check for too small pool\n    if (allocSize > GetSumFreeSize())\n        return false;\n\n    // If no free blocks in pool then check only null block\n    if (m_BlocksFreeCount == 0)\n        return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest);\n\n    // Round up to the next block\n    VkDeviceSize sizeForNextList = allocSize;\n    VkDeviceSize smallSizeStep = VkDeviceSize(SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4));\n    if (allocSize > SMALL_BUFFER_SIZE)\n    {\n        sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX));\n    }\n    else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep)\n        sizeForNextList = SMALL_BUFFER_SIZE + 1;\n    else\n        sizeForNextList += smallSizeStep;\n\n    uint32_t nextListIndex = m_ListsCount;\n    uint32_t prevListIndex = m_ListsCount;\n    Block* nextListBlock = VMA_NULL;\n    Block* prevListBlock = VMA_NULL;\n\n    // Check blocks according to strategies\n    if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT)\n    {\n        // Quick check for larger block first\n        nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);\n        if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))\n            return true;\n\n        // If not fitted then null block\n        if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))\n            return true;\n\n        // Null block failed, search larger bucket\n        while (nextListBlock)\n        {\n            if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))\n                return true;\n            nextListBlock = nextListBlock->NextFree();\n        }\n\n        // Failed again, check best fit bucket\n        prevListBlock = FindFreeBlock(allocSize, prevListIndex);\n        while (prevListBlock)\n        {\n            if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))\n                return true;\n            prevListBlock = prevListBlock->NextFree();\n        }\n    }\n    else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)\n    {\n        // Check best fit bucket\n        prevListBlock = FindFreeBlock(allocSize, prevListIndex);\n        while (prevListBlock)\n        {\n            if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))\n                return true;\n            prevListBlock = prevListBlock->NextFree();\n        }\n\n        // If failed check null block\n        if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))\n            return true;\n\n        // Check larger bucket\n        nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);\n        while (nextListBlock)\n        {\n            if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))\n                return true;\n            nextListBlock = nextListBlock->NextFree();\n        }\n    }\n    else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT )\n    {\n        // Perform search from the start\n        VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());\n        VmaVector<Block*, VmaStlAllocator<Block*>> blockList(m_BlocksFreeCount, allocator);\n\n        size_t i = m_BlocksFreeCount;\n        for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)\n        {\n            if (block->IsFree() && block->size >= allocSize)\n                blockList[--i] = block;\n        }\n\n        for (; i < m_BlocksFreeCount; ++i)\n        {\n            Block& block = *blockList[i];\n            if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest))\n                return true;\n        }\n\n        // If failed check null block\n        if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))\n            return true;\n\n        // Whole range searched, no more memory\n        return false;\n    }\n    else\n    {\n        // Check larger bucket\n        nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);\n        while (nextListBlock)\n        {\n            if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))\n                return true;\n            nextListBlock = nextListBlock->NextFree();\n        }\n\n        // If failed check null block\n        if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))\n            return true;\n\n        // Check best fit bucket\n        prevListBlock = FindFreeBlock(allocSize, prevListIndex);\n        while (prevListBlock)\n        {\n            if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))\n                return true;\n            prevListBlock = prevListBlock->NextFree();\n        }\n    }\n\n    // Worst case, full search has to be done\n    while (++nextListIndex < m_ListsCount)\n    {\n        nextListBlock = m_FreeList[nextListIndex];\n        while (nextListBlock)\n        {\n            if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))\n                return true;\n            nextListBlock = nextListBlock->NextFree();\n        }\n    }\n\n    // No more memory sadly\n    return false;\n}\n\nVkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData)\n{\n    for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)\n    {\n        if (!block->IsFree())\n        {\n            if (!VmaValidateMagicValue(pBlockData, block->offset + block->size))\n            {\n                VMA_ASSERT(0 && \"MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!\");\n                return VK_ERROR_UNKNOWN_COPY;\n            }\n        }\n    }\n\n    return VK_SUCCESS;\n}\n\nvoid VmaBlockMetadata_TLSF::Alloc(\n    const VmaAllocationRequest& request,\n    VmaSuballocationType type,\n    void* userData)\n{\n    VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF);\n\n    // Get block and pop it from the free list\n    Block* currentBlock = (Block*)request.allocHandle;\n    VkDeviceSize offset = request.algorithmData;\n    VMA_ASSERT(currentBlock != VMA_NULL);\n    VMA_ASSERT(currentBlock->offset <= offset);\n\n    if (currentBlock != m_NullBlock)\n        RemoveFreeBlock(currentBlock);\n\n    VkDeviceSize debugMargin = GetDebugMargin();\n    VkDeviceSize missingAlignment = offset - currentBlock->offset;\n\n    // Append missing alignment to prev block or create new one\n    if (missingAlignment)\n    {\n        Block* prevBlock = currentBlock->prevPhysical;\n        VMA_ASSERT(prevBlock != VMA_NULL && \"There should be no missing alignment at offset 0!\");\n\n        if (prevBlock->IsFree() && prevBlock->size != debugMargin)\n        {\n            uint32_t oldList = GetListIndex(prevBlock->size);\n            prevBlock->size += missingAlignment;\n            // Check if new size crosses list bucket\n            if (oldList != GetListIndex(prevBlock->size))\n            {\n                prevBlock->size -= missingAlignment;\n                RemoveFreeBlock(prevBlock);\n                prevBlock->size += missingAlignment;\n                InsertFreeBlock(prevBlock);\n            }\n            else\n                m_BlocksFreeSize += missingAlignment;\n        }\n        else\n        {\n            Block* newBlock = m_BlockAllocator.Alloc();\n            currentBlock->prevPhysical = newBlock;\n            prevBlock->nextPhysical = newBlock;\n            newBlock->prevPhysical = prevBlock;\n            newBlock->nextPhysical = currentBlock;\n            newBlock->size = missingAlignment;\n            newBlock->offset = currentBlock->offset;\n            newBlock->MarkTaken();\n\n            InsertFreeBlock(newBlock);\n        }\n\n        currentBlock->size -= missingAlignment;\n        currentBlock->offset += missingAlignment;\n    }\n\n    VkDeviceSize size = request.size + debugMargin;\n    if (currentBlock->size == size)\n    {\n        if (currentBlock == m_NullBlock)\n        {\n            // Setup new null block\n            m_NullBlock = m_BlockAllocator.Alloc();\n            m_NullBlock->size = 0;\n            m_NullBlock->offset = currentBlock->offset + size;\n            m_NullBlock->prevPhysical = currentBlock;\n            m_NullBlock->nextPhysical = VMA_NULL;\n            m_NullBlock->MarkFree();\n            m_NullBlock->PrevFree() = VMA_NULL;\n            m_NullBlock->NextFree() = VMA_NULL;\n            currentBlock->nextPhysical = m_NullBlock;\n            currentBlock->MarkTaken();\n        }\n    }\n    else\n    {\n        VMA_ASSERT(currentBlock->size > size && \"Proper block already found, shouldn't find smaller one!\");\n\n        // Create new free block\n        Block* newBlock = m_BlockAllocator.Alloc();\n        newBlock->size = currentBlock->size - size;\n        newBlock->offset = currentBlock->offset + size;\n        newBlock->prevPhysical = currentBlock;\n        newBlock->nextPhysical = currentBlock->nextPhysical;\n        currentBlock->nextPhysical = newBlock;\n        currentBlock->size = size;\n\n        if (currentBlock == m_NullBlock)\n        {\n            m_NullBlock = newBlock;\n            m_NullBlock->MarkFree();\n            m_NullBlock->NextFree() = VMA_NULL;\n            m_NullBlock->PrevFree() = VMA_NULL;\n            currentBlock->MarkTaken();\n        }\n        else\n        {\n            newBlock->nextPhysical->prevPhysical = newBlock;\n            newBlock->MarkTaken();\n            InsertFreeBlock(newBlock);\n        }\n    }\n    currentBlock->UserData() = userData;\n\n    if (debugMargin > 0)\n    {\n        currentBlock->size -= debugMargin;\n        Block* newBlock = m_BlockAllocator.Alloc();\n        newBlock->size = debugMargin;\n        newBlock->offset = currentBlock->offset + currentBlock->size;\n        newBlock->prevPhysical = currentBlock;\n        newBlock->nextPhysical = currentBlock->nextPhysical;\n        newBlock->MarkTaken();\n        currentBlock->nextPhysical->prevPhysical = newBlock;\n        currentBlock->nextPhysical = newBlock;\n        InsertFreeBlock(newBlock);\n    }\n\n    if (!IsVirtual())\n        m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData,\n            currentBlock->offset, currentBlock->size);\n    ++m_AllocCount;\n}\n\nvoid VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle)\n{\n    Block* block = (Block*)allocHandle;\n    Block* next = block->nextPhysical;\n    VMA_ASSERT(!block->IsFree() && \"Block is already free!\");\n\n    if (!IsVirtual())\n        m_GranularityHandler.FreePages(block->offset, block->size);\n    --m_AllocCount;\n\n    VkDeviceSize debugMargin = GetDebugMargin();\n    if (debugMargin > 0)\n    {\n        RemoveFreeBlock(next);\n        MergeBlock(next, block);\n        block = next;\n        next = next->nextPhysical;\n    }\n\n    // Try merging\n    Block* prev = block->prevPhysical;\n    if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin)\n    {\n        RemoveFreeBlock(prev);\n        MergeBlock(block, prev);\n    }\n\n    if (!next->IsFree())\n        InsertFreeBlock(block);\n    else if (next == m_NullBlock)\n        MergeBlock(m_NullBlock, block);\n    else\n    {\n        RemoveFreeBlock(next);\n        MergeBlock(next, block);\n        InsertFreeBlock(next);\n    }\n}\n\nvoid VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)\n{\n    Block* block = (Block*)allocHandle;\n    VMA_ASSERT(!block->IsFree() && \"Cannot get allocation info for free block!\");\n    outInfo.offset = block->offset;\n    outInfo.size = block->size;\n    outInfo.pUserData = block->UserData();\n}\n\nvoid* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const\n{\n    Block* block = (Block*)allocHandle;\n    VMA_ASSERT(!block->IsFree() && \"Cannot get user data for free block!\");\n    return block->UserData();\n}\n\nVmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const\n{\n    if (m_AllocCount == 0)\n        return VK_NULL_HANDLE;\n\n    for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical)\n    {\n        if (!block->IsFree())\n            return (VmaAllocHandle)block;\n    }\n    VMA_ASSERT(false && \"If m_AllocCount > 0 then should find any allocation!\");\n    return VK_NULL_HANDLE;\n}\n\nVmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const\n{\n    Block* startBlock = (Block*)prevAlloc;\n    VMA_ASSERT(!startBlock->IsFree() && \"Incorrect block!\");\n\n    for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical)\n    {\n        if (!block->IsFree())\n            return (VmaAllocHandle)block;\n    }\n    return VK_NULL_HANDLE;\n}\n\nVkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const\n{\n    Block* block = (Block*)alloc;\n    VMA_ASSERT(!block->IsFree() && \"Incorrect block!\");\n\n    if (block->prevPhysical)\n        return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0;\n    return 0;\n}\n\nvoid VmaBlockMetadata_TLSF::Clear()\n{\n    m_AllocCount = 0;\n    m_BlocksFreeCount = 0;\n    m_BlocksFreeSize = 0;\n    m_IsFreeBitmap = 0;\n    m_NullBlock->offset = 0;\n    m_NullBlock->size = GetSize();\n    Block* block = m_NullBlock->prevPhysical;\n    m_NullBlock->prevPhysical = VMA_NULL;\n    while (block)\n    {\n        Block* prev = block->prevPhysical;\n        m_BlockAllocator.Free(block);\n        block = prev;\n    }\n    memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));\n    memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t));\n    m_GranularityHandler.Clear();\n}\n\nvoid VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)\n{\n    Block* block = (Block*)allocHandle;\n    VMA_ASSERT(!block->IsFree() && \"Trying to set user data for not allocated block!\");\n    block->UserData() = userData;\n}\n\nvoid VmaBlockMetadata_TLSF::DebugLogAllAllocations() const\n{\n    for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)\n        if (!block->IsFree())\n            DebugLogAllocation(block->offset, block->size, block->UserData());\n}\n\nuint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const\n{\n    if (size > SMALL_BUFFER_SIZE)\n        return uint8_t(VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT);\n    return 0;\n}\n\nuint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const\n{\n    if (memoryClass == 0)\n    {\n        if (IsVirtual())\n            return static_cast<uint16_t>((size - 1) / 8);\n        else\n            return static_cast<uint16_t>((size - 1) / 64);\n    }\n    return static_cast<uint16_t>((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX));\n}\n\nuint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const\n{\n    if (memoryClass == 0)\n        return secondIndex;\n\n    const uint32_t index = static_cast<uint32_t>(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex;\n    if (IsVirtual())\n        return index + (1 << SECOND_LEVEL_INDEX);\n    else\n        return index + 4;\n}\n\nuint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const\n{\n    uint8_t memoryClass = SizeToMemoryClass(size);\n    return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass));\n}\n\nvoid VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block)\n{\n    VMA_ASSERT(block != m_NullBlock);\n    VMA_ASSERT(block->IsFree());\n\n    if (block->NextFree() != VMA_NULL)\n        block->NextFree()->PrevFree() = block->PrevFree();\n    if (block->PrevFree() != VMA_NULL)\n        block->PrevFree()->NextFree() = block->NextFree();\n    else\n    {\n        uint8_t memClass = SizeToMemoryClass(block->size);\n        uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);\n        uint32_t index = GetListIndex(memClass, secondIndex);\n        VMA_ASSERT(m_FreeList[index] == block);\n        m_FreeList[index] = block->NextFree();\n        if (block->NextFree() == VMA_NULL)\n        {\n            m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex);\n            if (m_InnerIsFreeBitmap[memClass] == 0)\n                m_IsFreeBitmap &= ~(1UL << memClass);\n        }\n    }\n    block->MarkTaken();\n    block->UserData() = VMA_NULL;\n    --m_BlocksFreeCount;\n    m_BlocksFreeSize -= block->size;\n}\n\nvoid VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block)\n{\n    VMA_ASSERT(block != m_NullBlock);\n    VMA_ASSERT(!block->IsFree() && \"Cannot insert block twice!\");\n\n    uint8_t memClass = SizeToMemoryClass(block->size);\n    uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);\n    uint32_t index = GetListIndex(memClass, secondIndex);\n    VMA_ASSERT(index < m_ListsCount);\n    block->PrevFree() = VMA_NULL;\n    block->NextFree() = m_FreeList[index];\n    m_FreeList[index] = block;\n    if (block->NextFree() != VMA_NULL)\n        block->NextFree()->PrevFree() = block;\n    else\n    {\n        m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex;\n        m_IsFreeBitmap |= 1UL << memClass;\n    }\n    ++m_BlocksFreeCount;\n    m_BlocksFreeSize += block->size;\n}\n\nvoid VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev)\n{\n    VMA_ASSERT(block->prevPhysical == prev && \"Cannot merge separate physical regions!\");\n    VMA_ASSERT(!prev->IsFree() && \"Cannot merge block that belongs to free list!\");\n\n    block->offset = prev->offset;\n    block->size += prev->size;\n    block->prevPhysical = prev->prevPhysical;\n    if (block->prevPhysical)\n        block->prevPhysical->nextPhysical = block;\n    m_BlockAllocator.Free(prev);\n}\n\nVmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const\n{\n    uint8_t memoryClass = SizeToMemoryClass(size);\n    uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass));\n    if (!innerFreeMap)\n    {\n        // Check higher levels for available blocks\n        uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1));\n        if (!freeMap)\n            return VMA_NULL; // No more memory available\n\n        // Find lowest free region\n        memoryClass = VMA_BITSCAN_LSB(freeMap);\n        innerFreeMap = m_InnerIsFreeBitmap[memoryClass];\n        VMA_ASSERT(innerFreeMap != 0);\n    }\n    // Find lowest free subregion\n    listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap));\n    VMA_ASSERT(m_FreeList[listIndex]);\n    return m_FreeList[listIndex];\n}\n\nbool VmaBlockMetadata_TLSF::CheckBlock(\n    Block& block,\n    uint32_t listIndex,\n    VkDeviceSize allocSize,\n    VkDeviceSize allocAlignment,\n    VmaSuballocationType allocType,\n    VmaAllocationRequest* pAllocationRequest)\n{\n    VMA_ASSERT(block.IsFree() && \"Block is already taken!\");\n\n    VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment);\n    if (block.size < allocSize + alignedOffset - block.offset)\n        return false;\n\n    // Check for granularity conflicts\n    if (!IsVirtual() &&\n        m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType))\n        return false;\n\n    // Alloc successful\n    pAllocationRequest->type = VmaAllocationRequestType::TLSF;\n    pAllocationRequest->allocHandle = (VmaAllocHandle)&block;\n    pAllocationRequest->size = allocSize - GetDebugMargin();\n    pAllocationRequest->customData = (void*)allocType;\n    pAllocationRequest->algorithmData = alignedOffset;\n\n    // Place block at the start of list if it's normal block\n    if (listIndex != m_ListsCount && block.PrevFree())\n    {\n        block.PrevFree()->NextFree() = block.NextFree();\n        if (block.NextFree())\n            block.NextFree()->PrevFree() = block.PrevFree();\n        block.PrevFree() = VMA_NULL;\n        block.NextFree() = m_FreeList[listIndex];\n        m_FreeList[listIndex] = &block;\n        if (block.NextFree())\n            block.NextFree()->PrevFree() = &block;\n    }\n\n    return true;\n}\n#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS\n#endif // _VMA_BLOCK_METADATA_TLSF\n\n#ifndef _VMA_BLOCK_VECTOR\n/*\nSequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific\nVulkan memory type.\n\nSynchronized internally with a mutex.\n*/\nclass VmaBlockVector\n{\n    friend struct VmaDefragmentationContext_T;\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockVector)\npublic:\n    VmaBlockVector(\n        VmaAllocator hAllocator,\n        VmaPool hParentPool,\n        uint32_t memoryTypeIndex,\n        VkDeviceSize preferredBlockSize,\n        size_t minBlockCount,\n        size_t maxBlockCount,\n        VkDeviceSize bufferImageGranularity,\n        bool explicitBlockSize,\n        uint32_t algorithm,\n        float priority,\n        VkDeviceSize minAllocationAlignment,\n        void* pMemoryAllocateNext);\n    ~VmaBlockVector();\n\n    VmaAllocator GetAllocator() const { return m_hAllocator; }\n    VmaPool GetParentPool() const { return m_hParentPool; }\n    bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }\n    uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }\n    VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }\n    VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }\n    uint32_t GetAlgorithm() const { return m_Algorithm; }\n    bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; }\n    float GetPriority() const { return m_Priority; }\n    const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; }\n    // To be used only while the m_Mutex is locked. Used during defragmentation.\n    size_t GetBlockCount() const { return m_Blocks.size(); }\n    // To be used only while the m_Mutex is locked. Used during defragmentation.\n    VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }\n    VMA_RW_MUTEX &GetMutex() { return m_Mutex; }\n\n    VkResult CreateMinBlocks();\n    void AddStatistics(VmaStatistics& inoutStats);\n    void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);\n    bool IsEmpty();\n    bool IsCorruptionDetectionEnabled() const;\n\n    VkResult Allocate(\n        VkDeviceSize size,\n        VkDeviceSize alignment,\n        const VmaAllocationCreateInfo& createInfo,\n        VmaSuballocationType suballocType,\n        size_t allocationCount,\n        VmaAllocation* pAllocations);\n\n    void Free(const VmaAllocation hAllocation);\n\n#if VMA_STATS_STRING_ENABLED\n    void PrintDetailedMap(class VmaJsonWriter& json);\n#endif\n\n    VkResult CheckCorruption();\n\nprivate:\n    const VmaAllocator m_hAllocator;\n    const VmaPool m_hParentPool;\n    const uint32_t m_MemoryTypeIndex;\n    const VkDeviceSize m_PreferredBlockSize;\n    const size_t m_MinBlockCount;\n    const size_t m_MaxBlockCount;\n    const VkDeviceSize m_BufferImageGranularity;\n    const bool m_ExplicitBlockSize;\n    const uint32_t m_Algorithm;\n    const float m_Priority;\n    const VkDeviceSize m_MinAllocationAlignment;\n\n    void* const m_pMemoryAllocateNext;\n    VMA_RW_MUTEX m_Mutex;\n    // Incrementally sorted by sumFreeSize, ascending.\n    VmaVector<VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*>> m_Blocks;\n    uint32_t m_NextBlockId;\n    bool m_IncrementalSort = true;\n\n    void SetIncrementalSort(bool val) { m_IncrementalSort = val; }\n\n    VkDeviceSize CalcMaxBlockSize() const;\n    // Finds and removes given block from vector.\n    void Remove(VmaDeviceMemoryBlock* pBlock);\n    // Performs single step in sorting m_Blocks. They may not be fully sorted\n    // after this call.\n    void IncrementallySortBlocks();\n    void SortByFreeSize();\n\n    VkResult AllocatePage(\n        VkDeviceSize size,\n        VkDeviceSize alignment,\n        const VmaAllocationCreateInfo& createInfo,\n        VmaSuballocationType suballocType,\n        VmaAllocation* pAllocation);\n\n    VkResult AllocateFromBlock(\n        VmaDeviceMemoryBlock* pBlock,\n        VkDeviceSize size,\n        VkDeviceSize alignment,\n        VmaAllocationCreateFlags allocFlags,\n        void* pUserData,\n        VmaSuballocationType suballocType,\n        uint32_t strategy,\n        VmaAllocation* pAllocation);\n\n    VkResult CommitAllocationRequest(\n        VmaAllocationRequest& allocRequest,\n        VmaDeviceMemoryBlock* pBlock,\n        VkDeviceSize alignment,\n        VmaAllocationCreateFlags allocFlags,\n        void* pUserData,\n        VmaSuballocationType suballocType,\n        VmaAllocation* pAllocation);\n\n    VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);\n    bool HasEmptyBlock();\n};\n#endif // _VMA_BLOCK_VECTOR\n\n#ifndef _VMA_DEFRAGMENTATION_CONTEXT\nstruct VmaDefragmentationContext_T\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaDefragmentationContext_T)\npublic:\n    VmaDefragmentationContext_T(\n        VmaAllocator hAllocator,\n        const VmaDefragmentationInfo& info);\n    ~VmaDefragmentationContext_T();\n\n    void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; }\n\n    VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo);\n    VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo);\n\nprivate:\n    // Max number of allocations to ignore due to size constraints before ending single pass\n    static const uint8_t MAX_ALLOCS_TO_IGNORE = 16;\n    enum class CounterStatus { Pass, Ignore, End };\n\n    struct FragmentedBlock\n    {\n        uint32_t data;\n        VmaDeviceMemoryBlock* block;\n    };\n    struct StateBalanced\n    {\n        VkDeviceSize avgFreeSize = 0;\n        VkDeviceSize avgAllocSize = UINT64_MAX;\n    };\n    struct StateExtensive\n    {\n        enum class Operation : uint8_t\n        {\n            FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll,\n            MoveBuffers, MoveTextures, MoveAll,\n            Cleanup, Done\n        };\n\n        Operation operation = Operation::FindFreeBlockTexture;\n        size_t firstFreeBlock = SIZE_MAX;\n    };\n    struct MoveAllocationData\n    {\n        VkDeviceSize size;\n        VkDeviceSize alignment;\n        VmaSuballocationType type;\n        VmaAllocationCreateFlags flags;\n        VmaDefragmentationMove move = {};\n    };\n\n    const VkDeviceSize m_MaxPassBytes;\n    const uint32_t m_MaxPassAllocations;\n    const PFN_vmaCheckDefragmentationBreakFunction m_BreakCallback;\n    void* m_BreakCallbackUserData;\n\n    VmaStlAllocator<VmaDefragmentationMove> m_MoveAllocator;\n    VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>> m_Moves;\n\n    uint8_t m_IgnoredAllocs = 0;\n    uint32_t m_Algorithm;\n    uint32_t m_BlockVectorCount;\n    VmaBlockVector* m_PoolBlockVector;\n    VmaBlockVector** m_pBlockVectors;\n    size_t m_ImmovableBlockCount = 0;\n    VmaDefragmentationStats m_GlobalStats = { 0 };\n    VmaDefragmentationStats m_PassStats = { 0 };\n    void* m_AlgorithmState = VMA_NULL;\n\n    static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata);\n    CounterStatus CheckCounters(VkDeviceSize bytes);\n    bool IncrementCounters(VkDeviceSize bytes);\n    bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block);\n    bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector);\n\n    bool ComputeDefragmentation(VmaBlockVector& vector, size_t index);\n    bool ComputeDefragmentation_Fast(VmaBlockVector& vector);\n    bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update);\n    bool ComputeDefragmentation_Full(VmaBlockVector& vector);\n    bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index);\n\n    void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state);\n    bool MoveDataToFreeBlocks(VmaSuballocationType currentType,\n        VmaBlockVector& vector, size_t firstFreeBlock,\n        bool& texturePresent, bool& bufferPresent, bool& otherPresent);\n};\n#endif // _VMA_DEFRAGMENTATION_CONTEXT\n\n#ifndef _VMA_POOL_T\nstruct VmaPool_T\n{\n    friend struct VmaPoolListItemTraits;\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaPool_T)\npublic:\n    VmaBlockVector m_BlockVector;\n    VmaDedicatedAllocationList m_DedicatedAllocations;\n\n    VmaPool_T(\n        VmaAllocator hAllocator,\n        const VmaPoolCreateInfo& createInfo,\n        VkDeviceSize preferredBlockSize);\n    ~VmaPool_T();\n\n    uint32_t GetId() const { return m_Id; }\n    void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }\n\n    const char* GetName() const { return m_Name; }\n    void SetName(const char* pName);\n\n#if VMA_STATS_STRING_ENABLED\n    //void PrintDetailedMap(class VmaStringBuilder& sb);\n#endif\n\nprivate:\n    uint32_t m_Id;\n    char* m_Name;\n    VmaPool_T* m_PrevPool = VMA_NULL;\n    VmaPool_T* m_NextPool = VMA_NULL;\n};\n\nstruct VmaPoolListItemTraits\n{\n    typedef VmaPool_T ItemType;\n\n    static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }\n    static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }\n    static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }\n    static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }\n};\n#endif // _VMA_POOL_T\n\n#ifndef _VMA_CURRENT_BUDGET_DATA\nstruct VmaCurrentBudgetData\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaCurrentBudgetData)\npublic:\n\n    VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS];\n    VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS];\n    VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];\n    VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];\n\n#if VMA_MEMORY_BUDGET\n    VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;\n    VMA_RW_MUTEX m_BudgetMutex;\n    uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];\n    uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];\n    uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];\n#endif // VMA_MEMORY_BUDGET\n\n    VmaCurrentBudgetData();\n\n    void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);\n    void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);\n};\n\n#ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS\nVmaCurrentBudgetData::VmaCurrentBudgetData()\n{\n    for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)\n    {\n        m_BlockCount[heapIndex] = 0;\n        m_AllocationCount[heapIndex] = 0;\n        m_BlockBytes[heapIndex] = 0;\n        m_AllocationBytes[heapIndex] = 0;\n#if VMA_MEMORY_BUDGET\n        m_VulkanUsage[heapIndex] = 0;\n        m_VulkanBudget[heapIndex] = 0;\n        m_BlockBytesAtBudgetFetch[heapIndex] = 0;\n#endif\n    }\n\n#if VMA_MEMORY_BUDGET\n    m_OperationsSinceBudgetFetch = 0;\n#endif\n}\n\nvoid VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)\n{\n    m_AllocationBytes[heapIndex] += allocationSize;\n    ++m_AllocationCount[heapIndex];\n#if VMA_MEMORY_BUDGET\n    ++m_OperationsSinceBudgetFetch;\n#endif\n}\n\nvoid VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)\n{\n    VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize);\n    m_AllocationBytes[heapIndex] -= allocationSize;\n    VMA_ASSERT(m_AllocationCount[heapIndex] > 0);\n    --m_AllocationCount[heapIndex];\n#if VMA_MEMORY_BUDGET\n    ++m_OperationsSinceBudgetFetch;\n#endif\n}\n#endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS\n#endif // _VMA_CURRENT_BUDGET_DATA\n\n#ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR\n/*\nThread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.\n*/\nclass VmaAllocationObjectAllocator\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocationObjectAllocator)\npublic:\n    VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks)\n        : m_Allocator(pAllocationCallbacks, 1024) {}\n\n    template<typename... Types> VmaAllocation Allocate(Types&&... args);\n    void Free(VmaAllocation hAlloc);\n\nprivate:\n    VMA_MUTEX m_Mutex;\n    VmaPoolAllocator<VmaAllocation_T> m_Allocator;\n};\n\ntemplate<typename... Types>\nVmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args)\n{\n    VmaMutexLock mutexLock(m_Mutex);\n    return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);\n}\n\nvoid VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)\n{\n    VmaMutexLock mutexLock(m_Mutex);\n    m_Allocator.Free(hAlloc);\n}\n#endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR\n\n#ifndef _VMA_VIRTUAL_BLOCK_T\nstruct VmaVirtualBlock_T\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaVirtualBlock_T)\npublic:\n    const bool m_AllocationCallbacksSpecified;\n    const VkAllocationCallbacks m_AllocationCallbacks;\n\n    VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo);\n    ~VmaVirtualBlock_T();\n\n    VkResult Init() { return VK_SUCCESS; }\n    bool IsEmpty() const { return m_Metadata->IsEmpty(); }\n    void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); }\n    void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); }\n    void Clear() { m_Metadata->Clear(); }\n\n    const VkAllocationCallbacks* GetAllocationCallbacks() const;\n    void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo);\n    VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,\n        VkDeviceSize* outOffset);\n    void GetStatistics(VmaStatistics& outStats) const;\n    void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const;\n#if VMA_STATS_STRING_ENABLED\n    void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const;\n#endif\n\nprivate:\n    VmaBlockMetadata* m_Metadata;\n};\n\n#ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS\nVmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo)\n    : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL),\n    m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks)\n{\n    const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK;\n    switch (algorithm)\n    {\n    case 0:\n        m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);\n        break;\n    case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT:\n        m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true);\n        break;\n    default:\n        VMA_ASSERT(0);\n        m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);\n    }\n\n    m_Metadata->Init(createInfo.size);\n}\n\nVmaVirtualBlock_T::~VmaVirtualBlock_T()\n{\n    // Define macro VMA_DEBUG_LOG_FORMAT or more specialized VMA_LEAK_LOG_FORMAT\n    // to receive the list of the unfreed allocations.\n    if (!m_Metadata->IsEmpty())\n        m_Metadata->DebugLogAllAllocations();\n    // This is the most important assert in the entire library.\n    // Hitting it means you have some memory leak - unreleased virtual allocations.\n    VMA_ASSERT_LEAK(m_Metadata->IsEmpty() && \"Some virtual allocations were not freed before destruction of this virtual block!\");\n\n    vma_delete(GetAllocationCallbacks(), m_Metadata);\n}\n\nconst VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const\n{\n    return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;\n}\n\nvoid VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo)\n{\n    m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo);\n}\n\nVkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,\n    VkDeviceSize* outOffset)\n{\n    VmaAllocationRequest request = {};\n    if (m_Metadata->CreateAllocationRequest(\n        createInfo.size, // allocSize\n        VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment\n        (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress\n        VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant\n        createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy\n        &request))\n    {\n        m_Metadata->Alloc(request,\n            VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant\n            createInfo.pUserData);\n        outAllocation = (VmaVirtualAllocation)request.allocHandle;\n        if(outOffset)\n            *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle);\n        return VK_SUCCESS;\n    }\n    outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE;\n    if (outOffset)\n        *outOffset = UINT64_MAX;\n    return VK_ERROR_OUT_OF_DEVICE_MEMORY;\n}\n\nvoid VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const\n{\n    VmaClearStatistics(outStats);\n    m_Metadata->AddStatistics(outStats);\n}\n\nvoid VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const\n{\n    VmaClearDetailedStatistics(outStats);\n    m_Metadata->AddDetailedStatistics(outStats);\n}\n\n#if VMA_STATS_STRING_ENABLED\nvoid VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const\n{\n    VmaJsonWriter json(GetAllocationCallbacks(), sb);\n    json.BeginObject();\n\n    VmaDetailedStatistics stats;\n    CalculateDetailedStatistics(stats);\n\n    json.WriteString(\"Stats\");\n    VmaPrintDetailedStatistics(json, stats);\n\n    if (detailedMap)\n    {\n        json.WriteString(\"Details\");\n        json.BeginObject();\n        m_Metadata->PrintDetailedMap(json);\n        json.EndObject();\n    }\n\n    json.EndObject();\n}\n#endif // VMA_STATS_STRING_ENABLED\n#endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS\n#endif // _VMA_VIRTUAL_BLOCK_T\n\n\n// Main allocator object.\nstruct VmaAllocator_T\n{\n    VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocator_T)\npublic:\n    const bool m_UseMutex;\n    const uint32_t m_VulkanApiVersion;\n    bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).\n    bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).\n    bool m_UseExtMemoryBudget;\n    bool m_UseAmdDeviceCoherentMemory;\n    bool m_UseKhrBufferDeviceAddress;\n    bool m_UseExtMemoryPriority;\n    bool m_UseKhrMaintenance4;\n    bool m_UseKhrMaintenance5;\n    bool m_UseKhrExternalMemoryWin32;\n    const VkDevice m_hDevice;\n    const VkInstance m_hInstance;\n    const bool m_AllocationCallbacksSpecified;\n    const VkAllocationCallbacks m_AllocationCallbacks;\n    VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;\n    VmaAllocationObjectAllocator m_AllocationObjectAllocator;\n\n    // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.\n    uint32_t m_HeapSizeLimitMask;\n\n    VkPhysicalDeviceProperties m_PhysicalDeviceProperties;\n    VkPhysicalDeviceMemoryProperties m_MemProps;\n\n    // Default pools.\n    VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];\n    VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];\n\n    VmaCurrentBudgetData m_Budget;\n    VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.\n\n    VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);\n    VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);\n    ~VmaAllocator_T();\n\n    const VkAllocationCallbacks* GetAllocationCallbacks() const\n    {\n        return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;\n    }\n    const VmaVulkanFunctions& GetVulkanFunctions() const\n    {\n        return m_VulkanFunctions;\n    }\n\n    VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }\n\n    VkDeviceSize GetBufferImageGranularity() const\n    {\n        return VMA_MAX(\n            static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),\n            m_PhysicalDeviceProperties.limits.bufferImageGranularity);\n    }\n\n    uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }\n    uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }\n\n    uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const\n    {\n        VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);\n        return m_MemProps.memoryTypes[memTypeIndex].heapIndex;\n    }\n    // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.\n    bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const\n    {\n        return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==\n            VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;\n    }\n    // Minimum alignment for all allocations in specific memory type.\n    VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const\n    {\n        return IsMemoryTypeNonCoherent(memTypeIndex) ?\n            VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :\n            (VkDeviceSize)VMA_MIN_ALIGNMENT;\n    }\n\n    bool IsIntegratedGpu() const\n    {\n        return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;\n    }\n\n    uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }\n\n    void GetBufferMemoryRequirements(\n        VkBuffer hBuffer,\n        VkMemoryRequirements& memReq,\n        bool& requiresDedicatedAllocation,\n        bool& prefersDedicatedAllocation) const;\n    void GetImageMemoryRequirements(\n        VkImage hImage,\n        VkMemoryRequirements& memReq,\n        bool& requiresDedicatedAllocation,\n        bool& prefersDedicatedAllocation) const;\n    VkResult FindMemoryTypeIndex(\n        uint32_t memoryTypeBits,\n        const VmaAllocationCreateInfo* pAllocationCreateInfo,\n        VmaBufferImageUsage bufImgUsage,\n        uint32_t* pMemoryTypeIndex) const;\n\n    // Main allocation function.\n    VkResult AllocateMemory(\n        const VkMemoryRequirements& vkMemReq,\n        bool requiresDedicatedAllocation,\n        bool prefersDedicatedAllocation,\n        VkBuffer dedicatedBuffer,\n        VkImage dedicatedImage,\n        VmaBufferImageUsage dedicatedBufferImageUsage,\n        const VmaAllocationCreateInfo& createInfo,\n        VmaSuballocationType suballocType,\n        size_t allocationCount,\n        VmaAllocation* pAllocations);\n\n    // Main deallocation function.\n    void FreeMemory(\n        size_t allocationCount,\n        const VmaAllocation* pAllocations);\n\n    void CalculateStatistics(VmaTotalStatistics* pStats);\n\n    void GetHeapBudgets(\n        VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount);\n\n#if VMA_STATS_STRING_ENABLED\n    void PrintDetailedMap(class VmaJsonWriter& json);\n#endif\n\n    void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);\n    void GetAllocationInfo2(VmaAllocation hAllocation, VmaAllocationInfo2* pAllocationInfo);\n\n    VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);\n    void DestroyPool(VmaPool pool);\n    void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats);\n    void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats);\n\n    void SetCurrentFrameIndex(uint32_t frameIndex);\n    uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }\n\n    VkResult CheckPoolCorruption(VmaPool hPool);\n    VkResult CheckCorruption(uint32_t memoryTypeBits);\n\n    // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.\n    VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);\n    // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.\n    void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);\n    // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.\n    VkResult BindVulkanBuffer(\n        VkDeviceMemory memory,\n        VkDeviceSize memoryOffset,\n        VkBuffer buffer,\n        const void* pNext);\n    // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.\n    VkResult BindVulkanImage(\n        VkDeviceMemory memory,\n        VkDeviceSize memoryOffset,\n        VkImage image,\n        const void* pNext);\n\n    VkResult Map(VmaAllocation hAllocation, void** ppData);\n    void Unmap(VmaAllocation hAllocation);\n\n    VkResult BindBufferMemory(\n        VmaAllocation hAllocation,\n        VkDeviceSize allocationLocalOffset,\n        VkBuffer hBuffer,\n        const void* pNext);\n    VkResult BindImageMemory(\n        VmaAllocation hAllocation,\n        VkDeviceSize allocationLocalOffset,\n        VkImage hImage,\n        const void* pNext);\n\n    VkResult FlushOrInvalidateAllocation(\n        VmaAllocation hAllocation,\n        VkDeviceSize offset, VkDeviceSize size,\n        VMA_CACHE_OPERATION op);\n    VkResult FlushOrInvalidateAllocations(\n        uint32_t allocationCount,\n        const VmaAllocation* allocations,\n        const VkDeviceSize* offsets, const VkDeviceSize* sizes,\n        VMA_CACHE_OPERATION op);\n\n    VkResult CopyMemoryToAllocation(\n        const void* pSrcHostPointer,\n        VmaAllocation dstAllocation,\n        VkDeviceSize dstAllocationLocalOffset,\n        VkDeviceSize size);\n    VkResult CopyAllocationToMemory(\n        VmaAllocation srcAllocation,\n        VkDeviceSize srcAllocationLocalOffset,\n        void* pDstHostPointer,\n        VkDeviceSize size);\n\n    void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);\n\n    /*\n    Returns bit mask of memory types that can support defragmentation on GPU as\n    they support creation of required buffer for copy operations.\n    */\n    uint32_t GetGpuDefragmentationMemoryTypeBits();\n\n#if VMA_EXTERNAL_MEMORY\n    VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const\n    {\n        return m_TypeExternalMemoryHandleTypes[memTypeIndex];\n    }\n#endif // #if VMA_EXTERNAL_MEMORY\n\nprivate:\n    VkDeviceSize m_PreferredLargeHeapBlockSize;\n\n    VkPhysicalDevice m_PhysicalDevice;\n    VMA_ATOMIC_UINT32 m_CurrentFrameIndex;\n    VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.\n#if VMA_EXTERNAL_MEMORY\n    VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES];\n#endif // #if VMA_EXTERNAL_MEMORY\n\n    VMA_RW_MUTEX m_PoolsMutex;\n    typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;\n    // Protected by m_PoolsMutex.\n    PoolList m_Pools;\n    uint32_t m_NextPoolId;\n\n    VmaVulkanFunctions m_VulkanFunctions;\n\n    // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.\n    uint32_t m_GlobalMemoryTypeBits;\n\n    void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);\n\n#if VMA_STATIC_VULKAN_FUNCTIONS == 1\n    void ImportVulkanFunctions_Static();\n#endif\n\n    void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);\n\n#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1\n    void ImportVulkanFunctions_Dynamic();\n#endif\n\n    void ValidateVulkanFunctions();\n\n    VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);\n\n    VkResult AllocateMemoryOfType(\n        VmaPool pool,\n        VkDeviceSize size,\n        VkDeviceSize alignment,\n        bool dedicatedPreferred,\n        VkBuffer dedicatedBuffer,\n        VkImage dedicatedImage,\n        VmaBufferImageUsage dedicatedBufferImageUsage,\n        const VmaAllocationCreateInfo& createInfo,\n        uint32_t memTypeIndex,\n        VmaSuballocationType suballocType,\n        VmaDedicatedAllocationList& dedicatedAllocations,\n        VmaBlockVector& blockVector,\n        size_t allocationCount,\n        VmaAllocation* pAllocations);\n\n    // Helper function only to be used inside AllocateDedicatedMemory.\n    VkResult AllocateDedicatedMemoryPage(\n        VmaPool pool,\n        VkDeviceSize size,\n        VmaSuballocationType suballocType,\n        uint32_t memTypeIndex,\n        const VkMemoryAllocateInfo& allocInfo,\n        bool map,\n        bool isUserDataString,\n        bool isMappingAllowed,\n        void* pUserData,\n        VmaAllocation* pAllocation);\n\n    // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.\n    VkResult AllocateDedicatedMemory(\n        VmaPool pool,\n        VkDeviceSize size,\n        VmaSuballocationType suballocType,\n        VmaDedicatedAllocationList& dedicatedAllocations,\n        uint32_t memTypeIndex,\n        bool map,\n        bool isUserDataString,\n        bool isMappingAllowed,\n        bool canAliasMemory,\n        void* pUserData,\n        float priority,\n        VkBuffer dedicatedBuffer,\n        VkImage dedicatedImage,\n        VmaBufferImageUsage dedicatedBufferImageUsage,\n        size_t allocationCount,\n        VmaAllocation* pAllocations,\n        const void* pNextChain = VMA_NULL);\n\n    void FreeDedicatedMemory(const VmaAllocation allocation);\n\n    VkResult CalcMemTypeParams(\n        VmaAllocationCreateInfo& outCreateInfo,\n        uint32_t memTypeIndex,\n        VkDeviceSize size,\n        size_t allocationCount);\n    VkResult CalcAllocationParams(\n        VmaAllocationCreateInfo& outCreateInfo,\n        bool dedicatedRequired,\n        bool dedicatedPreferred);\n\n    /*\n    Calculates and returns bit mask of memory types that can support defragmentation\n    on GPU as they support creation of required buffer for copy operations.\n    */\n    uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;\n    uint32_t CalculateGlobalMemoryTypeBits() const;\n\n    bool GetFlushOrInvalidateRange(\n        VmaAllocation allocation,\n        VkDeviceSize offset, VkDeviceSize size,\n        VkMappedMemoryRange& outRange) const;\n\n#if VMA_MEMORY_BUDGET\n    void UpdateVulkanBudget();\n#endif // #if VMA_MEMORY_BUDGET\n};\n\n\n#ifndef _VMA_MEMORY_FUNCTIONS\nstatic void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)\n{\n    return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);\n}\n\nstatic void VmaFree(VmaAllocator hAllocator, void* ptr)\n{\n    VmaFree(&hAllocator->m_AllocationCallbacks, ptr);\n}\n\ntemplate<typename T>\nstatic T* VmaAllocate(VmaAllocator hAllocator)\n{\n    return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));\n}\n\ntemplate<typename T>\nstatic T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)\n{\n    return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));\n}\n\ntemplate<typename T>\nstatic void vma_delete(VmaAllocator hAllocator, T* ptr)\n{\n    if(ptr != VMA_NULL)\n    {\n        ptr->~T();\n        VmaFree(hAllocator, ptr);\n    }\n}\n\ntemplate<typename T>\nstatic void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)\n{\n    if(ptr != VMA_NULL)\n    {\n        for(size_t i = count; i--; )\n            ptr[i].~T();\n        VmaFree(hAllocator, ptr);\n    }\n}\n#endif // _VMA_MEMORY_FUNCTIONS\n\n#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS\nVmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator)\n    : m_pMetadata(VMA_NULL),\n    m_MemoryTypeIndex(UINT32_MAX),\n    m_Id(0),\n    m_hMemory(VK_NULL_HANDLE),\n    m_MapCount(0),\n    m_pMappedData(VMA_NULL){}\n\nVmaDeviceMemoryBlock::~VmaDeviceMemoryBlock()\n{\n    VMA_ASSERT_LEAK(m_MapCount == 0 && \"VkDeviceMemory block is being destroyed while it is still mapped.\");\n    VMA_ASSERT_LEAK(m_hMemory == VK_NULL_HANDLE);\n}\n\nvoid VmaDeviceMemoryBlock::Init(\n    VmaAllocator hAllocator,\n    VmaPool hParentPool,\n    uint32_t newMemoryTypeIndex,\n    VkDeviceMemory newMemory,\n    VkDeviceSize newSize,\n    uint32_t id,\n    uint32_t algorithm,\n    VkDeviceSize bufferImageGranularity)\n{\n    VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);\n\n    m_hParentPool = hParentPool;\n    m_MemoryTypeIndex = newMemoryTypeIndex;\n    m_Id = id;\n    m_hMemory = newMemory;\n\n    switch (algorithm)\n    {\n    case 0:\n        m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),\n            bufferImageGranularity, false); // isVirtual\n        break;\n    case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:\n        m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(),\n            bufferImageGranularity, false); // isVirtual\n        break;\n    default:\n        VMA_ASSERT(0);\n        m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),\n            bufferImageGranularity, false); // isVirtual\n    }\n    m_pMetadata->Init(newSize);\n}\n\nvoid VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)\n{\n    // Define macro VMA_DEBUG_LOG_FORMAT or more specialized VMA_LEAK_LOG_FORMAT\n    // to receive the list of the unfreed allocations.\n    if (!m_pMetadata->IsEmpty())\n        m_pMetadata->DebugLogAllAllocations();\n    // This is the most important assert in the entire library.\n    // Hitting it means you have some memory leak - unreleased VmaAllocation objects.\n    VMA_ASSERT_LEAK(m_pMetadata->IsEmpty() && \"Some allocations were not freed before destruction of this memory block!\");\n\n    VMA_ASSERT_LEAK(m_hMemory != VK_NULL_HANDLE);\n    allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);\n    m_hMemory = VK_NULL_HANDLE;\n\n    vma_delete(allocator, m_pMetadata);\n    m_pMetadata = VMA_NULL;\n}\n\nvoid VmaDeviceMemoryBlock::PostAlloc(VmaAllocator hAllocator)\n{\n    VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);\n    m_MappingHysteresis.PostAlloc();\n}\n\nvoid VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator)\n{\n    VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);\n    if(m_MappingHysteresis.PostFree())\n    {\n        VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0);\n        if (m_MapCount == 0)\n        {\n            m_pMappedData = VMA_NULL;\n            (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);\n        }\n    }\n}\n\nbool VmaDeviceMemoryBlock::Validate() const\n{\n    VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&\n        (m_pMetadata->GetSize() != 0));\n\n    return m_pMetadata->Validate();\n}\n\nVkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)\n{\n    void* pData = VMA_NULL;\n    VkResult res = Map(hAllocator, 1, &pData);\n    if (res != VK_SUCCESS)\n    {\n        return res;\n    }\n\n    res = m_pMetadata->CheckCorruption(pData);\n\n    Unmap(hAllocator, 1);\n\n    return res;\n}\n\nVkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)\n{\n    if (count == 0)\n    {\n        return VK_SUCCESS;\n    }\n\n    VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);\n    const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();\n    if (oldTotalMapCount != 0)\n    {\n        VMA_ASSERT(m_pMappedData != VMA_NULL);\n        m_MappingHysteresis.PostMap();\n        m_MapCount += count;\n        if (ppData != VMA_NULL)\n        {\n            *ppData = m_pMappedData;\n        }\n        return VK_SUCCESS;\n    }\n    else\n    {\n        VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(\n            hAllocator->m_hDevice,\n            m_hMemory,\n            0, // offset\n            VK_WHOLE_SIZE,\n            0, // flags\n            &m_pMappedData);\n        if (result == VK_SUCCESS)\n        {\n            VMA_ASSERT(m_pMappedData != VMA_NULL);\n            m_MappingHysteresis.PostMap();\n            m_MapCount = count;\n            if (ppData != VMA_NULL)\n            {\n                *ppData = m_pMappedData;\n            }\n        }\n        return result;\n    }\n}\n\nvoid VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)\n{\n    if (count == 0)\n    {\n        return;\n    }\n\n    VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);\n    if (m_MapCount >= count)\n    {\n        m_MapCount -= count;\n        const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();\n        if (totalMapCount == 0)\n        {\n            m_pMappedData = VMA_NULL;\n            (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);\n        }\n        m_MappingHysteresis.PostUnmap();\n    }\n    else\n    {\n        VMA_ASSERT(0 && \"VkDeviceMemory block is being unmapped while it was not previously mapped.\");\n    }\n}\n\nVkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)\n{\n    VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);\n\n    void* pData;\n    VkResult res = Map(hAllocator, 1, &pData);\n    if (res != VK_SUCCESS)\n    {\n        return res;\n    }\n\n    VmaWriteMagicValue(pData, allocOffset + allocSize);\n\n    Unmap(hAllocator, 1);\n    return VK_SUCCESS;\n}\n\nVkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)\n{\n    VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);\n\n    void* pData;\n    VkResult res = Map(hAllocator, 1, &pData);\n    if (res != VK_SUCCESS)\n    {\n        return res;\n    }\n\n    if (!VmaValidateMagicValue(pData, allocOffset + allocSize))\n    {\n        VMA_ASSERT(0 && \"MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!\");\n    }\n\n    Unmap(hAllocator, 1);\n    return VK_SUCCESS;\n}\n\nVkResult VmaDeviceMemoryBlock::BindBufferMemory(\n    const VmaAllocator hAllocator,\n    const VmaAllocation hAllocation,\n    VkDeviceSize allocationLocalOffset,\n    VkBuffer hBuffer,\n    const void* pNext)\n{\n    VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&\n        hAllocation->GetBlock() == this);\n    VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&\n        \"Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?\");\n    const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;\n    // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.\n    VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);\n    return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);\n}\n\nVkResult VmaDeviceMemoryBlock::BindImageMemory(\n    const VmaAllocator hAllocator,\n    const VmaAllocation hAllocation,\n    VkDeviceSize allocationLocalOffset,\n    VkImage hImage,\n    const void* pNext)\n{\n    VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&\n        hAllocation->GetBlock() == this);\n    VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&\n        \"Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?\");\n    const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;\n    // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.\n    VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);\n    return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);\n}\n\n#if VMA_EXTERNAL_MEMORY_WIN32\nVkResult VmaDeviceMemoryBlock::CreateWin32Handle(const VmaAllocator hAllocator, PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, HANDLE hTargetProcess, HANDLE* pHandle) noexcept\n{\n    VMA_ASSERT(pHandle);\n    return m_Handle.GetHandle(hAllocator->m_hDevice, m_hMemory, pvkGetMemoryWin32HandleKHR, hTargetProcess, hAllocator->m_UseMutex, pHandle);\n}\n#endif // VMA_EXTERNAL_MEMORY_WIN32\n#endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS\n\n#ifndef _VMA_ALLOCATION_T_FUNCTIONS\nVmaAllocation_T::VmaAllocation_T(bool mappingAllowed)\n    : m_Alignment{ 1 },\n    m_Size{ 0 },\n    m_pUserData{ VMA_NULL },\n    m_pName{ VMA_NULL },\n    m_MemoryTypeIndex{ 0 },\n    m_Type{ (uint8_t)ALLOCATION_TYPE_NONE },\n    m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN },\n    m_MapCount{ 0 },\n    m_Flags{ 0 }\n{\n    if(mappingAllowed)\n        m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED;\n}\n\nVmaAllocation_T::~VmaAllocation_T()\n{\n    VMA_ASSERT_LEAK(m_MapCount == 0 && \"Allocation was not unmapped before destruction.\");\n\n    // Check if owned string was freed.\n    VMA_ASSERT(m_pName == VMA_NULL);\n}\n\nvoid VmaAllocation_T::InitBlockAllocation(\n    VmaDeviceMemoryBlock* block,\n    VmaAllocHandle allocHandle,\n    VkDeviceSize alignment,\n    VkDeviceSize size,\n    uint32_t memoryTypeIndex,\n    VmaSuballocationType suballocationType,\n    bool mapped)\n{\n    VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);\n    VMA_ASSERT(block != VMA_NULL);\n    m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;\n    m_Alignment = alignment;\n    m_Size = size;\n    m_MemoryTypeIndex = memoryTypeIndex;\n    if(mapped)\n    {\n        VMA_ASSERT(IsMappingAllowed() && \"Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.\");\n        m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;\n    }\n    m_SuballocationType = (uint8_t)suballocationType;\n    m_BlockAllocation.m_Block = block;\n    m_BlockAllocation.m_AllocHandle = allocHandle;\n}\n\nvoid VmaAllocation_T::InitDedicatedAllocation(\n    VmaAllocator allocator,\n    VmaPool hParentPool,\n    uint32_t memoryTypeIndex,\n    VkDeviceMemory hMemory,\n    VmaSuballocationType suballocationType,\n    void* pMappedData,\n    VkDeviceSize size)\n{\n    VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);\n    VMA_ASSERT(hMemory != VK_NULL_HANDLE);\n    m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;\n    m_Alignment = 0;\n    m_Size = size;\n    m_MemoryTypeIndex = memoryTypeIndex;\n    m_SuballocationType = (uint8_t)suballocationType;\n    m_DedicatedAllocation.m_ExtraData = VMA_NULL;\n    m_DedicatedAllocation.m_hParentPool = hParentPool;\n    m_DedicatedAllocation.m_hMemory = hMemory;\n    m_DedicatedAllocation.m_Prev = VMA_NULL;\n    m_DedicatedAllocation.m_Next = VMA_NULL;\n\n    if (pMappedData != VMA_NULL)\n    {\n        VMA_ASSERT(IsMappingAllowed() && \"Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.\");\n        m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;\n        EnsureExtraData(allocator);\n        m_DedicatedAllocation.m_ExtraData->m_pMappedData = pMappedData;\n    }\n}\n\nvoid VmaAllocation_T::Destroy(VmaAllocator allocator)\n{\n    FreeName(allocator);\n\n    if (GetType() == ALLOCATION_TYPE_DEDICATED)\n    {\n        vma_delete(allocator, m_DedicatedAllocation.m_ExtraData);\n    }\n}\n\nvoid VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName)\n{\n    VMA_ASSERT(pName == VMA_NULL || pName != m_pName);\n\n    FreeName(hAllocator);\n\n    if (pName != VMA_NULL)\n        m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName);\n}\n\nuint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation)\n{\n    VMA_ASSERT(allocation != VMA_NULL);\n    VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);\n    VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK);\n\n    if (m_MapCount != 0)\n        m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount);\n\n    m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation);\n    std::swap(m_BlockAllocation, allocation->m_BlockAllocation);\n    m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this);\n\n#if VMA_STATS_STRING_ENABLED\n    std::swap(m_BufferImageUsage, allocation->m_BufferImageUsage);\n#endif\n    return m_MapCount;\n}\n\nVmaAllocHandle VmaAllocation_T::GetAllocHandle() const\n{\n    switch (m_Type)\n    {\n    case ALLOCATION_TYPE_BLOCK:\n        return m_BlockAllocation.m_AllocHandle;\n    case ALLOCATION_TYPE_DEDICATED:\n        return VK_NULL_HANDLE;\n    default:\n        VMA_ASSERT(0);\n        return VK_NULL_HANDLE;\n    }\n}\n\nVkDeviceSize VmaAllocation_T::GetOffset() const\n{\n    switch (m_Type)\n    {\n    case ALLOCATION_TYPE_BLOCK:\n        return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle);\n    case ALLOCATION_TYPE_DEDICATED:\n        return 0;\n    default:\n        VMA_ASSERT(0);\n        return 0;\n    }\n}\n\nVmaPool VmaAllocation_T::GetParentPool() const\n{\n    switch (m_Type)\n    {\n    case ALLOCATION_TYPE_BLOCK:\n        return m_BlockAllocation.m_Block->GetParentPool();\n    case ALLOCATION_TYPE_DEDICATED:\n        return m_DedicatedAllocation.m_hParentPool;\n    default:\n        VMA_ASSERT(0);\n        return VK_NULL_HANDLE;\n    }\n}\n\nVkDeviceMemory VmaAllocation_T::GetMemory() const\n{\n    switch (m_Type)\n    {\n    case ALLOCATION_TYPE_BLOCK:\n        return m_BlockAllocation.m_Block->GetDeviceMemory();\n    case ALLOCATION_TYPE_DEDICATED:\n        return m_DedicatedAllocation.m_hMemory;\n    default:\n        VMA_ASSERT(0);\n        return VK_NULL_HANDLE;\n    }\n}\n\nvoid* VmaAllocation_T::GetMappedData() const\n{\n    switch (m_Type)\n    {\n    case ALLOCATION_TYPE_BLOCK:\n        if (m_MapCount != 0 || IsPersistentMap())\n        {\n            void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();\n            VMA_ASSERT(pBlockData != VMA_NULL);\n            return (char*)pBlockData + GetOffset();\n        }\n        else\n        {\n            return VMA_NULL;\n        }\n        break;\n    case ALLOCATION_TYPE_DEDICATED:\n        VMA_ASSERT((m_DedicatedAllocation.m_ExtraData != VMA_NULL && m_DedicatedAllocation.m_ExtraData->m_pMappedData != VMA_NULL) ==\n            (m_MapCount != 0 || IsPersistentMap()));\n        return m_DedicatedAllocation.m_ExtraData != VMA_NULL ? m_DedicatedAllocation.m_ExtraData->m_pMappedData : VMA_NULL;\n    default:\n        VMA_ASSERT(0);\n        return VMA_NULL;\n    }\n}\n\nvoid VmaAllocation_T::BlockAllocMap()\n{\n    VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);\n    VMA_ASSERT(IsMappingAllowed() && \"Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.\");\n\n    if (m_MapCount < 0xFF)\n    {\n        ++m_MapCount;\n    }\n    else\n    {\n        VMA_ASSERT(0 && \"Allocation mapped too many times simultaneously.\");\n    }\n}\n\nvoid VmaAllocation_T::BlockAllocUnmap()\n{\n    VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);\n\n    if (m_MapCount > 0)\n    {\n        --m_MapCount;\n    }\n    else\n    {\n        VMA_ASSERT(0 && \"Unmapping allocation not previously mapped.\");\n    }\n}\n\nVkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)\n{\n    VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);\n    VMA_ASSERT(IsMappingAllowed() && \"Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.\");\n\n    EnsureExtraData(hAllocator);\n\n    if (m_MapCount != 0 || IsPersistentMap())\n    {\n        if (m_MapCount < 0xFF)\n        {\n            VMA_ASSERT(m_DedicatedAllocation.m_ExtraData->m_pMappedData != VMA_NULL);\n            *ppData = m_DedicatedAllocation.m_ExtraData->m_pMappedData;\n            ++m_MapCount;\n            return VK_SUCCESS;\n        }\n        else\n        {\n            VMA_ASSERT(0 && \"Dedicated allocation mapped too many times simultaneously.\");\n            return VK_ERROR_MEMORY_MAP_FAILED;\n        }\n    }\n    else\n    {\n        VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(\n            hAllocator->m_hDevice,\n            m_DedicatedAllocation.m_hMemory,\n            0, // offset\n            VK_WHOLE_SIZE,\n            0, // flags\n            ppData);\n        if (result == VK_SUCCESS)\n        {\n            m_DedicatedAllocation.m_ExtraData->m_pMappedData = *ppData;\n            m_MapCount = 1;\n        }\n        return result;\n    }\n}\n\nvoid VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)\n{\n    VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);\n\n    if (m_MapCount > 0)\n    {\n        --m_MapCount;\n        if (m_MapCount == 0 && !IsPersistentMap())\n        {\n            VMA_ASSERT(m_DedicatedAllocation.m_ExtraData != VMA_NULL);\n            m_DedicatedAllocation.m_ExtraData->m_pMappedData = VMA_NULL;\n            (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(\n                hAllocator->m_hDevice,\n                m_DedicatedAllocation.m_hMemory);\n        }\n    }\n    else\n    {\n        VMA_ASSERT(0 && \"Unmapping dedicated allocation not previously mapped.\");\n    }\n}\n\n#if VMA_STATS_STRING_ENABLED\nvoid VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const\n{\n    json.WriteString(\"Type\");\n    json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);\n\n    json.WriteString(\"Size\");\n    json.WriteNumber(m_Size);\n    json.WriteString(\"Usage\");\n    json.WriteNumber(m_BufferImageUsage.Value); // It may be uint32_t or uint64_t.\n\n    if (m_pUserData != VMA_NULL)\n    {\n        json.WriteString(\"CustomData\");\n        json.BeginString();\n        json.ContinueString_Pointer(m_pUserData);\n        json.EndString();\n    }\n    if (m_pName != VMA_NULL)\n    {\n        json.WriteString(\"Name\");\n        json.WriteString(m_pName);\n    }\n}\n#if VMA_EXTERNAL_MEMORY_WIN32\nVkResult VmaAllocation_T::GetWin32Handle(VmaAllocator hAllocator, HANDLE hTargetProcess, HANDLE* pHandle) noexcept\n{\n    auto pvkGetMemoryWin32HandleKHR = hAllocator->GetVulkanFunctions().vkGetMemoryWin32HandleKHR;\n    switch (m_Type)\n    {\n    case ALLOCATION_TYPE_BLOCK:\n        return m_BlockAllocation.m_Block->CreateWin32Handle(hAllocator, pvkGetMemoryWin32HandleKHR, hTargetProcess, pHandle);\n    case ALLOCATION_TYPE_DEDICATED:\n        EnsureExtraData(hAllocator);\n        return m_DedicatedAllocation.m_ExtraData->m_Handle.GetHandle(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory, pvkGetMemoryWin32HandleKHR, hTargetProcess, hAllocator->m_UseMutex, pHandle);\n    default:\n        VMA_ASSERT(0);\n        return VK_ERROR_FEATURE_NOT_PRESENT;\n    }\n}\n#endif // VMA_EXTERNAL_MEMORY_WIN32\n#endif // VMA_STATS_STRING_ENABLED\n\nvoid VmaAllocation_T::EnsureExtraData(VmaAllocator hAllocator)\n{\n    if (m_DedicatedAllocation.m_ExtraData == VMA_NULL)\n    {\n        m_DedicatedAllocation.m_ExtraData = vma_new(hAllocator, VmaAllocationExtraData)();\n    }\n}\n\nvoid VmaAllocation_T::FreeName(VmaAllocator hAllocator)\n{\n    if(m_pName)\n    {\n        VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName);\n        m_pName = VMA_NULL;\n    }\n}\n#endif // _VMA_ALLOCATION_T_FUNCTIONS\n\n#ifndef _VMA_BLOCK_VECTOR_FUNCTIONS\nVmaBlockVector::VmaBlockVector(\n    VmaAllocator hAllocator,\n    VmaPool hParentPool,\n    uint32_t memoryTypeIndex,\n    VkDeviceSize preferredBlockSize,\n    size_t minBlockCount,\n    size_t maxBlockCount,\n    VkDeviceSize bufferImageGranularity,\n    bool explicitBlockSize,\n    uint32_t algorithm,\n    float priority,\n    VkDeviceSize minAllocationAlignment,\n    void* pMemoryAllocateNext)\n    : m_hAllocator(hAllocator),\n    m_hParentPool(hParentPool),\n    m_MemoryTypeIndex(memoryTypeIndex),\n    m_PreferredBlockSize(preferredBlockSize),\n    m_MinBlockCount(minBlockCount),\n    m_MaxBlockCount(maxBlockCount),\n    m_BufferImageGranularity(bufferImageGranularity),\n    m_ExplicitBlockSize(explicitBlockSize),\n    m_Algorithm(algorithm),\n    m_Priority(priority),\n    m_MinAllocationAlignment(minAllocationAlignment),\n    m_pMemoryAllocateNext(pMemoryAllocateNext),\n    m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),\n    m_NextBlockId(0) {}\n\nVmaBlockVector::~VmaBlockVector()\n{\n    for (size_t i = m_Blocks.size(); i--; )\n    {\n        m_Blocks[i]->Destroy(m_hAllocator);\n        vma_delete(m_hAllocator, m_Blocks[i]);\n    }\n}\n\nVkResult VmaBlockVector::CreateMinBlocks()\n{\n    for (size_t i = 0; i < m_MinBlockCount; ++i)\n    {\n        VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);\n        if (res != VK_SUCCESS)\n        {\n            return res;\n        }\n    }\n    return VK_SUCCESS;\n}\n\nvoid VmaBlockVector::AddStatistics(VmaStatistics& inoutStats)\n{\n    VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);\n\n    const size_t blockCount = m_Blocks.size();\n    for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)\n    {\n        const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];\n        VMA_ASSERT(pBlock);\n        VMA_HEAVY_ASSERT(pBlock->Validate());\n        pBlock->m_pMetadata->AddStatistics(inoutStats);\n    }\n}\n\nvoid VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)\n{\n    VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);\n\n    const size_t blockCount = m_Blocks.size();\n    for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)\n    {\n        const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];\n        VMA_ASSERT(pBlock);\n        VMA_HEAVY_ASSERT(pBlock->Validate());\n        pBlock->m_pMetadata->AddDetailedStatistics(inoutStats);\n    }\n}\n\nbool VmaBlockVector::IsEmpty()\n{\n    VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);\n    return m_Blocks.empty();\n}\n\nbool VmaBlockVector::IsCorruptionDetectionEnabled() const\n{\n    const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;\n    return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&\n        (VMA_DEBUG_MARGIN > 0) &&\n        (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&\n        (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;\n}\n\nVkResult VmaBlockVector::Allocate(\n    VkDeviceSize size,\n    VkDeviceSize alignment,\n    const VmaAllocationCreateInfo& createInfo,\n    VmaSuballocationType suballocType,\n    size_t allocationCount,\n    VmaAllocation* pAllocations)\n{\n    size_t allocIndex;\n    VkResult res = VK_SUCCESS;\n\n    alignment = VMA_MAX(alignment, m_MinAllocationAlignment);\n\n    if (IsCorruptionDetectionEnabled())\n    {\n        size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));\n        alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));\n    }\n\n    {\n        VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);\n        for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)\n        {\n            res = AllocatePage(\n                size,\n                alignment,\n                createInfo,\n                suballocType,\n                pAllocations + allocIndex);\n            if (res != VK_SUCCESS)\n            {\n                break;\n            }\n        }\n    }\n\n    if (res != VK_SUCCESS)\n    {\n        // Free all already created allocations.\n        while (allocIndex--)\n            Free(pAllocations[allocIndex]);\n        memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);\n    }\n\n    return res;\n}\n\nVkResult VmaBlockVector::AllocatePage(\n    VkDeviceSize size,\n    VkDeviceSize alignment,\n    const VmaAllocationCreateInfo& createInfo,\n    VmaSuballocationType suballocType,\n    VmaAllocation* pAllocation)\n{\n    const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;\n\n    VkDeviceSize freeMemory;\n    {\n        const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);\n        VmaBudget heapBudget = {};\n        m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);\n        freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;\n    }\n\n    const bool canFallbackToDedicated = !HasExplicitBlockSize() &&\n        (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0;\n    const bool canCreateNewBlock =\n        ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&\n        (m_Blocks.size() < m_MaxBlockCount) &&\n        (freeMemory >= size || !canFallbackToDedicated);\n    uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;\n\n    // Upper address can only be used with linear allocator and within single memory block.\n    if (isUpperAddress &&\n        (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))\n    {\n        return VK_ERROR_FEATURE_NOT_PRESENT;\n    }\n\n    // Early reject: requested allocation size is larger that maximum block size for this block vector.\n    if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize)\n    {\n        return VK_ERROR_OUT_OF_DEVICE_MEMORY;\n    }\n\n    // 1. Search existing allocations. Try to allocate.\n    if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)\n    {\n        // Use only last block.\n        if (!m_Blocks.empty())\n        {\n            VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();\n            VMA_ASSERT(pCurrBlock);\n            VkResult res = AllocateFromBlock(\n                pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);\n            if (res == VK_SUCCESS)\n            {\n                VMA_DEBUG_LOG_FORMAT(\"    Returned from last block #%\" PRIu32, pCurrBlock->GetId());\n                IncrementallySortBlocks();\n                return VK_SUCCESS;\n            }\n        }\n    }\n    else\n    {\n        if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default\n        {\n            const bool isHostVisible =\n                (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;\n            if(isHostVisible)\n            {\n                const bool isMappingAllowed = (createInfo.flags &\n                    (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;\n                /*\n                For non-mappable allocations, check blocks that are not mapped first.\n                For mappable allocations, check blocks that are already mapped first.\n                This way, having many blocks, we will separate mappable and non-mappable allocations,\n                hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc.\n                */\n                for(size_t mappingI = 0; mappingI < 2; ++mappingI)\n                {\n                    // Forward order in m_Blocks - prefer blocks with smallest amount of free space.\n                    for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)\n                    {\n                        VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];\n                        VMA_ASSERT(pCurrBlock);\n                        const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL;\n                        if((mappingI == 0) == (isMappingAllowed == isBlockMapped))\n                        {\n                            VkResult res = AllocateFromBlock(\n                                pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);\n                            if (res == VK_SUCCESS)\n                            {\n                                VMA_DEBUG_LOG_FORMAT(\"    Returned from existing block #%\" PRIu32, pCurrBlock->GetId());\n                                IncrementallySortBlocks();\n                                return VK_SUCCESS;\n                            }\n                        }\n                    }\n                }\n            }\n            else\n            {\n                // Forward order in m_Blocks - prefer blocks with smallest amount of free space.\n                for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)\n                {\n                    VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];\n                    VMA_ASSERT(pCurrBlock);\n                    VkResult res = AllocateFromBlock(\n                        pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);\n                    if (res == VK_SUCCESS)\n                    {\n                        VMA_DEBUG_LOG_FORMAT(\"    Returned from existing block #%\" PRIu32, pCurrBlock->GetId());\n                        IncrementallySortBlocks();\n                        return VK_SUCCESS;\n                    }\n                }\n            }\n        }\n        else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT\n        {\n            // Backward order in m_Blocks - prefer blocks with largest amount of free space.\n            for (size_t blockIndex = m_Blocks.size(); blockIndex--; )\n            {\n                VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];\n                VMA_ASSERT(pCurrBlock);\n                VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);\n                if (res == VK_SUCCESS)\n                {\n                    VMA_DEBUG_LOG_FORMAT(\"    Returned from existing block #%\" PRIu32, pCurrBlock->GetId());\n                    IncrementallySortBlocks();\n                    return VK_SUCCESS;\n                }\n            }\n        }\n    }\n\n    // 2. Try to create new block.\n    if (canCreateNewBlock)\n    {\n        // Calculate optimal size for new block.\n        VkDeviceSize newBlockSize = m_PreferredBlockSize;\n        uint32_t newBlockSizeShift = 0;\n        const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;\n\n        if (!m_ExplicitBlockSize)\n        {\n            // Allocate 1/8, 1/4, 1/2 as first blocks.\n            const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();\n            for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)\n            {\n                const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;\n                if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)\n                {\n                    newBlockSize = smallerNewBlockSize;\n                    ++newBlockSizeShift;\n                }\n                else\n                {\n                    break;\n                }\n            }\n        }\n\n        size_t newBlockIndex = 0;\n        VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?\n            CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;\n        // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.\n        if (!m_ExplicitBlockSize)\n        {\n            while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)\n            {\n                const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;\n                if (smallerNewBlockSize >= size)\n                {\n                    newBlockSize = smallerNewBlockSize;\n                    ++newBlockSizeShift;\n                    res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?\n                        CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;\n                }\n                else\n                {\n                    break;\n                }\n            }\n        }\n\n        if (res == VK_SUCCESS)\n        {\n            VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];\n            VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);\n\n            res = AllocateFromBlock(\n                pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);\n            if (res == VK_SUCCESS)\n            {\n                VMA_DEBUG_LOG_FORMAT(\"    Created new block #%\" PRIu32 \" Size=%\" PRIu64, pBlock->GetId(), newBlockSize);\n                IncrementallySortBlocks();\n                return VK_SUCCESS;\n            }\n            else\n            {\n                // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.\n                return VK_ERROR_OUT_OF_DEVICE_MEMORY;\n            }\n        }\n    }\n\n    return VK_ERROR_OUT_OF_DEVICE_MEMORY;\n}\n\nvoid VmaBlockVector::Free(const VmaAllocation hAllocation)\n{\n    VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;\n\n    bool budgetExceeded = false;\n    {\n        const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);\n        VmaBudget heapBudget = {};\n        m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);\n        budgetExceeded = heapBudget.usage >= heapBudget.budget;\n    }\n\n    // Scope for lock.\n    {\n        VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);\n\n        VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();\n\n        if (IsCorruptionDetectionEnabled())\n        {\n            VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());\n            VMA_ASSERT(res == VK_SUCCESS && \"Couldn't map block memory to validate magic value.\");\n        }\n\n        if (hAllocation->IsPersistentMap())\n        {\n            pBlock->Unmap(m_hAllocator, 1);\n        }\n\n        const bool hadEmptyBlockBeforeFree = HasEmptyBlock();\n        pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle());\n        pBlock->PostFree(m_hAllocator);\n        VMA_HEAVY_ASSERT(pBlock->Validate());\n\n        VMA_DEBUG_LOG_FORMAT(\"  Freed from MemoryTypeIndex=%\" PRIu32, m_MemoryTypeIndex);\n\n        const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;\n        // pBlock became empty after this deallocation.\n        if (pBlock->m_pMetadata->IsEmpty())\n        {\n            // Already had empty block. We don't want to have two, so delete this one.\n            if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock)\n            {\n                pBlockToDelete = pBlock;\n                Remove(pBlock);\n            }\n            // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth.\n        }\n        // pBlock didn't become empty, but we have another empty block - find and free that one.\n        // (This is optional, heuristics.)\n        else if (hadEmptyBlockBeforeFree && canDeleteBlock)\n        {\n            VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();\n            if (pLastBlock->m_pMetadata->IsEmpty())\n            {\n                pBlockToDelete = pLastBlock;\n                m_Blocks.pop_back();\n            }\n        }\n\n        IncrementallySortBlocks();\n\n        m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize());\n        hAllocation->Destroy(m_hAllocator);\n        m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation);\n    }\n\n    // Destruction of a free block. Deferred until this point, outside of mutex\n    // lock, for performance reason.\n    if (pBlockToDelete != VMA_NULL)\n    {\n        VMA_DEBUG_LOG_FORMAT(\"    Deleted empty block #%\" PRIu32, pBlockToDelete->GetId());\n        pBlockToDelete->Destroy(m_hAllocator);\n        vma_delete(m_hAllocator, pBlockToDelete);\n    }\n}\n\nVkDeviceSize VmaBlockVector::CalcMaxBlockSize() const\n{\n    VkDeviceSize result = 0;\n    for (size_t i = m_Blocks.size(); i--; )\n    {\n        result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());\n        if (result >= m_PreferredBlockSize)\n        {\n            break;\n        }\n    }\n    return result;\n}\n\nvoid VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)\n{\n    for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)\n    {\n        if (m_Blocks[blockIndex] == pBlock)\n        {\n            VmaVectorRemove(m_Blocks, blockIndex);\n            return;\n        }\n    }\n    VMA_ASSERT(0);\n}\n\nvoid VmaBlockVector::IncrementallySortBlocks()\n{\n    if (!m_IncrementalSort)\n        return;\n    if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)\n    {\n        // Bubble sort only until first swap.\n        for (size_t i = 1; i < m_Blocks.size(); ++i)\n        {\n            if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())\n            {\n                std::swap(m_Blocks[i - 1], m_Blocks[i]);\n                return;\n            }\n        }\n    }\n}\n\nvoid VmaBlockVector::SortByFreeSize()\n{\n    VMA_SORT(m_Blocks.begin(), m_Blocks.end(),\n        [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool\n        {\n            return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize();\n        });\n}\n\nVkResult VmaBlockVector::AllocateFromBlock(\n    VmaDeviceMemoryBlock* pBlock,\n    VkDeviceSize size,\n    VkDeviceSize alignment,\n    VmaAllocationCreateFlags allocFlags,\n    void* pUserData,\n    VmaSuballocationType suballocType,\n    uint32_t strategy,\n    VmaAllocation* pAllocation)\n{\n    const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;\n\n    VmaAllocationRequest currRequest = {};\n    if (pBlock->m_pMetadata->CreateAllocationRequest(\n        size,\n        alignment,\n        isUpperAddress,\n        suballocType,\n        strategy,\n        &currRequest))\n    {\n        return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation);\n    }\n    return VK_ERROR_OUT_OF_DEVICE_MEMORY;\n}\n\nVkResult VmaBlockVector::CommitAllocationRequest(\n    VmaAllocationRequest& allocRequest,\n    VmaDeviceMemoryBlock* pBlock,\n    VkDeviceSize alignment,\n    VmaAllocationCreateFlags allocFlags,\n    void* pUserData,\n    VmaSuballocationType suballocType,\n    VmaAllocation* pAllocation)\n{\n    const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;\n    const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;\n    const bool isMappingAllowed = (allocFlags &\n        (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;\n\n    pBlock->PostAlloc(m_hAllocator);\n    // Allocate from pCurrBlock.\n    if (mapped)\n    {\n        VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);\n        if (res != VK_SUCCESS)\n        {\n            return res;\n        }\n    }\n\n    *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed);\n    pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation);\n    (*pAllocation)->InitBlockAllocation(\n        pBlock,\n        allocRequest.allocHandle,\n        alignment,\n        allocRequest.size, // Not size, as actual allocation size may be larger than requested!\n        m_MemoryTypeIndex,\n        suballocType,\n        mapped);\n    VMA_HEAVY_ASSERT(pBlock->Validate());\n    if (isUserDataString)\n        (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData);\n    else\n        (*pAllocation)->SetUserData(m_hAllocator, pUserData);\n    m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size);\n    if (VMA_DEBUG_INITIALIZE_ALLOCATIONS)\n    {\n        m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);\n    }\n    if (IsCorruptionDetectionEnabled())\n    {\n        VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size);\n        VMA_ASSERT(res == VK_SUCCESS && \"Couldn't map block memory to write magic value.\");\n    }\n    return VK_SUCCESS;\n}\n\nVkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)\n{\n    VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };\n    allocInfo.pNext = m_pMemoryAllocateNext;\n    allocInfo.memoryTypeIndex = m_MemoryTypeIndex;\n    allocInfo.allocationSize = blockSize;\n\n#if VMA_BUFFER_DEVICE_ADDRESS\n    // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.\n    VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };\n    if (m_hAllocator->m_UseKhrBufferDeviceAddress)\n    {\n        allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;\n        VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);\n    }\n#endif // VMA_BUFFER_DEVICE_ADDRESS\n\n#if VMA_MEMORY_PRIORITY\n    VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };\n    if (m_hAllocator->m_UseExtMemoryPriority)\n    {\n        VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f);\n        priorityInfo.priority = m_Priority;\n        VmaPnextChainPushFront(&allocInfo, &priorityInfo);\n    }\n#endif // VMA_MEMORY_PRIORITY\n\n#if VMA_EXTERNAL_MEMORY\n    // Attach VkExportMemoryAllocateInfoKHR if necessary.\n    VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };\n    exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex);\n    if (exportMemoryAllocInfo.handleTypes != 0)\n    {\n        VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);\n    }\n#endif // VMA_EXTERNAL_MEMORY\n\n    VkDeviceMemory mem = VK_NULL_HANDLE;\n    VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);\n    if (res < 0)\n    {\n        return res;\n    }\n\n    // New VkDeviceMemory successfully created.\n\n    // Create new Allocation for it.\n    VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);\n    pBlock->Init(\n        m_hAllocator,\n        m_hParentPool,\n        m_MemoryTypeIndex,\n        mem,\n        allocInfo.allocationSize,\n        m_NextBlockId++,\n        m_Algorithm,\n        m_BufferImageGranularity);\n\n    m_Blocks.push_back(pBlock);\n    if (pNewBlockIndex != VMA_NULL)\n    {\n        *pNewBlockIndex = m_Blocks.size() - 1;\n    }\n\n    return VK_SUCCESS;\n}\n\nbool VmaBlockVector::HasEmptyBlock()\n{\n    for (size_t index = 0, count = m_Blocks.size(); index < count; ++index)\n    {\n        VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];\n        if (pBlock->m_pMetadata->IsEmpty())\n        {\n            return true;\n        }\n    }\n    return false;\n}\n\n#if VMA_STATS_STRING_ENABLED\nvoid VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)\n{\n    VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);\n\n\n    json.BeginObject();\n    for (size_t i = 0; i < m_Blocks.size(); ++i)\n    {\n        json.BeginString();\n        json.ContinueString(m_Blocks[i]->GetId());\n        json.EndString();\n\n        json.BeginObject();\n        json.WriteString(\"MapRefCount\");\n        json.WriteNumber(m_Blocks[i]->GetMapRefCount());\n\n        m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);\n        json.EndObject();\n    }\n    json.EndObject();\n}\n#endif // VMA_STATS_STRING_ENABLED\n\nVkResult VmaBlockVector::CheckCorruption()\n{\n    if (!IsCorruptionDetectionEnabled())\n    {\n        return VK_ERROR_FEATURE_NOT_PRESENT;\n    }\n\n    VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);\n    for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)\n    {\n        VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];\n        VMA_ASSERT(pBlock);\n        VkResult res = pBlock->CheckCorruption(m_hAllocator);\n        if (res != VK_SUCCESS)\n        {\n            return res;\n        }\n    }\n    return VK_SUCCESS;\n}\n\n#endif // _VMA_BLOCK_VECTOR_FUNCTIONS\n\n#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS\nVmaDefragmentationContext_T::VmaDefragmentationContext_T(\n    VmaAllocator hAllocator,\n    const VmaDefragmentationInfo& info)\n    : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass),\n    m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass),\n    m_BreakCallback(info.pfnBreakCallback),\n    m_BreakCallbackUserData(info.pBreakCallbackUserData),\n    m_MoveAllocator(hAllocator->GetAllocationCallbacks()),\n    m_Moves(m_MoveAllocator)\n{\n    m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK;\n\n    if (info.pool != VMA_NULL)\n    {\n        m_BlockVectorCount = 1;\n        m_PoolBlockVector = &info.pool->m_BlockVector;\n        m_pBlockVectors = &m_PoolBlockVector;\n        m_PoolBlockVector->SetIncrementalSort(false);\n        m_PoolBlockVector->SortByFreeSize();\n    }\n    else\n    {\n        m_BlockVectorCount = hAllocator->GetMemoryTypeCount();\n        m_PoolBlockVector = VMA_NULL;\n        m_pBlockVectors = hAllocator->m_pBlockVectors;\n        for (uint32_t i = 0; i < m_BlockVectorCount; ++i)\n        {\n            VmaBlockVector* vector = m_pBlockVectors[i];\n            if (vector != VMA_NULL)\n            {\n                vector->SetIncrementalSort(false);\n                vector->SortByFreeSize();\n            }\n        }\n    }\n\n    switch (m_Algorithm)\n    {\n    case 0: // Default algorithm\n        m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT;\n        m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);\n        break;\n    case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:\n        m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);\n        break;\n    case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:\n        if (hAllocator->GetBufferImageGranularity() > 1)\n        {\n            m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount);\n        }\n        break;\n    }\n}\n\nVmaDefragmentationContext_T::~VmaDefragmentationContext_T()\n{\n    if (m_PoolBlockVector != VMA_NULL)\n    {\n        m_PoolBlockVector->SetIncrementalSort(true);\n    }\n    else\n    {\n        for (uint32_t i = 0; i < m_BlockVectorCount; ++i)\n        {\n            VmaBlockVector* vector = m_pBlockVectors[i];\n            if (vector != VMA_NULL)\n                vector->SetIncrementalSort(true);\n        }\n    }\n\n    if (m_AlgorithmState)\n    {\n        switch (m_Algorithm)\n        {\n        case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:\n            vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateBalanced*>(m_AlgorithmState), m_BlockVectorCount);\n            break;\n        case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:\n            vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateExtensive*>(m_AlgorithmState), m_BlockVectorCount);\n            break;\n        default:\n            VMA_ASSERT(0);\n        }\n    }\n}\n\nVkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo)\n{\n    if (m_PoolBlockVector != VMA_NULL)\n    {\n        VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex);\n\n        if (m_PoolBlockVector->GetBlockCount() > 1)\n            ComputeDefragmentation(*m_PoolBlockVector, 0);\n        else if (m_PoolBlockVector->GetBlockCount() == 1)\n            ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0));\n    }\n    else\n    {\n        for (uint32_t i = 0; i < m_BlockVectorCount; ++i)\n        {\n            if (m_pBlockVectors[i] != VMA_NULL)\n            {\n                VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex);\n\n                if (m_pBlockVectors[i]->GetBlockCount() > 1)\n                {\n                    if (ComputeDefragmentation(*m_pBlockVectors[i], i))\n                        break;\n                }\n                else if (m_pBlockVectors[i]->GetBlockCount() == 1)\n                {\n                    if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0)))\n                        break;\n                }\n            }\n        }\n    }\n\n    moveInfo.moveCount = static_cast<uint32_t>(m_Moves.size());\n    if (moveInfo.moveCount > 0)\n    {\n        moveInfo.pMoves = m_Moves.data();\n        return VK_INCOMPLETE;\n    }\n\n    moveInfo.pMoves = VMA_NULL;\n    return VK_SUCCESS;\n}\n\nVkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo)\n{\n    VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true);\n\n    VkResult result = VK_SUCCESS;\n    VmaStlAllocator<FragmentedBlock> blockAllocator(m_MoveAllocator.m_pCallbacks);\n    VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> immovableBlocks(blockAllocator);\n    VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> mappedBlocks(blockAllocator);\n\n    VmaAllocator allocator = VMA_NULL;\n    for (uint32_t i = 0; i < moveInfo.moveCount; ++i)\n    {\n        VmaDefragmentationMove& move = moveInfo.pMoves[i];\n        size_t prevCount = 0, currentCount = 0;\n        VkDeviceSize freedBlockSize = 0;\n\n        uint32_t vectorIndex;\n        VmaBlockVector* vector;\n        if (m_PoolBlockVector != VMA_NULL)\n        {\n            vectorIndex = 0;\n            vector = m_PoolBlockVector;\n        }\n        else\n        {\n            vectorIndex = move.srcAllocation->GetMemoryTypeIndex();\n            vector = m_pBlockVectors[vectorIndex];\n            VMA_ASSERT(vector != VMA_NULL);\n        }\n\n        switch (move.operation)\n        {\n        case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY:\n        {\n            uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation);\n            if (mapCount > 0)\n            {\n                allocator = vector->m_hAllocator;\n                VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock();\n                bool notPresent = true;\n                for (FragmentedBlock& block : mappedBlocks)\n                {\n                    if (block.block == newMapBlock)\n                    {\n                        notPresent = false;\n                        block.data += mapCount;\n                        break;\n                    }\n                }\n                if (notPresent)\n                    mappedBlocks.push_back({ mapCount, newMapBlock });\n            }\n\n            // Scope for locks, Free have it's own lock\n            {\n                VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);\n                prevCount = vector->GetBlockCount();\n                freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();\n            }\n            vector->Free(move.dstTmpAllocation);\n            {\n                VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);\n                currentCount = vector->GetBlockCount();\n            }\n\n            result = VK_INCOMPLETE;\n            break;\n        }\n        case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE:\n        {\n            m_PassStats.bytesMoved -= move.srcAllocation->GetSize();\n            --m_PassStats.allocationsMoved;\n            vector->Free(move.dstTmpAllocation);\n\n            VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock();\n            bool notPresent = true;\n            for (const FragmentedBlock& block : immovableBlocks)\n            {\n                if (block.block == newBlock)\n                {\n                    notPresent = false;\n                    break;\n                }\n            }\n            if (notPresent)\n                immovableBlocks.push_back({ vectorIndex, newBlock });\n            break;\n        }\n        case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY:\n        {\n            m_PassStats.bytesMoved -= move.srcAllocation->GetSize();\n            --m_PassStats.allocationsMoved;\n            // Scope for locks, Free have it's own lock\n            {\n                VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);\n                prevCount = vector->GetBlockCount();\n                freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize();\n            }\n            vector->Free(move.srcAllocation);\n            {\n                VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);\n                currentCount = vector->GetBlockCount();\n            }\n            freedBlockSize *= prevCount - currentCount;\n\n            VkDeviceSize dstBlockSize;\n            {\n                VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);\n                dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();\n            }\n            vector->Free(move.dstTmpAllocation);\n            {\n                VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);\n                freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount());\n                currentCount = vector->GetBlockCount();\n            }\n\n            result = VK_INCOMPLETE;\n            break;\n        }\n        default:\n            VMA_ASSERT(0);\n        }\n\n        if (prevCount > currentCount)\n        {\n            size_t freedBlocks = prevCount - currentCount;\n            m_PassStats.deviceMemoryBlocksFreed += static_cast<uint32_t>(freedBlocks);\n            m_PassStats.bytesFreed += freedBlockSize;\n        }\n\n        if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT &&\n            m_AlgorithmState != VMA_NULL)\n        {\n            // Avoid unnecessary tries to allocate when new free block is available\n            StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[vectorIndex];\n            if (state.firstFreeBlock != SIZE_MAX)\n            {\n                const size_t diff = prevCount - currentCount;\n                if (state.firstFreeBlock >= diff)\n                {\n                    state.firstFreeBlock -= diff;\n                    if (state.firstFreeBlock != 0)\n                        state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty();\n                }\n                else\n                    state.firstFreeBlock = 0;\n            }\n        }\n    }\n    moveInfo.moveCount = 0;\n    moveInfo.pMoves = VMA_NULL;\n    m_Moves.clear();\n\n    // Update stats\n    m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved;\n    m_GlobalStats.bytesFreed += m_PassStats.bytesFreed;\n    m_GlobalStats.bytesMoved += m_PassStats.bytesMoved;\n    m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed;\n    m_PassStats = { 0 };\n\n    // Move blocks with immovable allocations according to algorithm\n    if (immovableBlocks.size() > 0)\n    {\n        do\n        {\n            if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT)\n            {\n                if (m_AlgorithmState != VMA_NULL)\n                {\n                    bool swapped = false;\n                    // Move to the start of free blocks range\n                    for (const FragmentedBlock& block : immovableBlocks)\n                    {\n                        StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[block.data];\n                        if (state.operation != StateExtensive::Operation::Cleanup)\n                        {\n                            VmaBlockVector* vector = m_pBlockVectors[block.data];\n                            VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);\n\n                            for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i)\n                            {\n                                if (vector->GetBlock(i) == block.block)\n                                {\n                                    std::swap(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]);\n                                    if (state.firstFreeBlock != SIZE_MAX)\n                                    {\n                                        if (i + 1 < state.firstFreeBlock)\n                                        {\n                                            if (state.firstFreeBlock > 1)\n                                                std::swap(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]);\n                                            else\n                                                --state.firstFreeBlock;\n                                        }\n                                    }\n                                    swapped = true;\n                                    break;\n                                }\n                            }\n                        }\n                    }\n                    if (swapped)\n                        result = VK_INCOMPLETE;\n                    break;\n                }\n            }\n\n            // Move to the beginning\n            for (const FragmentedBlock& block : immovableBlocks)\n            {\n                VmaBlockVector* vector = m_pBlockVectors[block.data];\n                VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);\n\n                for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i)\n                {\n                    if (vector->GetBlock(i) == block.block)\n                    {\n                        std::swap(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]);\n                        break;\n                    }\n                }\n            }\n        } while (false);\n    }\n\n    // Bulk-map destination blocks\n    for (const FragmentedBlock& block : mappedBlocks)\n    {\n        VkResult res = block.block->Map(allocator, block.data, VMA_NULL);\n        VMA_ASSERT(res == VK_SUCCESS);\n    }\n    return result;\n}\n\nbool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index)\n{\n    switch (m_Algorithm)\n    {\n    case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT:\n        return ComputeDefragmentation_Fast(vector);\n    case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:\n        return ComputeDefragmentation_Balanced(vector, index, true);\n    case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT:\n        return ComputeDefragmentation_Full(vector);\n    case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:\n        return ComputeDefragmentation_Extensive(vector, index);\n    default:\n        VMA_ASSERT(0);\n        return ComputeDefragmentation_Balanced(vector, index, true);\n    }\n}\n\nVmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData(\n    VmaAllocHandle handle, VmaBlockMetadata* metadata)\n{\n    MoveAllocationData moveData;\n    moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle);\n    moveData.size = moveData.move.srcAllocation->GetSize();\n    moveData.alignment = moveData.move.srcAllocation->GetAlignment();\n    moveData.type = moveData.move.srcAllocation->GetSuballocationType();\n    moveData.flags = 0;\n\n    if (moveData.move.srcAllocation->IsPersistentMap())\n        moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;\n    if (moveData.move.srcAllocation->IsMappingAllowed())\n        moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;\n\n    return moveData;\n}\n\nVmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes)\n{\n    // Check custom criteria if exists\n    if (m_BreakCallback && m_BreakCallback(m_BreakCallbackUserData))\n        return CounterStatus::End;\n\n    // Ignore allocation if will exceed max size for copy\n    if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes)\n    {\n        if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE)\n            return CounterStatus::Ignore;\n        else\n            return CounterStatus::End;\n    }\n    else\n        m_IgnoredAllocs = 0;\n    return CounterStatus::Pass;\n}\n\nbool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes)\n{\n    m_PassStats.bytesMoved += bytes;\n    // Early return when max found\n    if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes)\n    {\n        VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations ||\n            m_PassStats.bytesMoved == m_MaxPassBytes) && \"Exceeded maximal pass threshold!\");\n        return true;\n    }\n    return false;\n}\n\nbool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block)\n{\n    VmaBlockMetadata* metadata = block->m_pMetadata;\n\n    for (VmaAllocHandle handle = metadata->GetAllocationListBegin();\n        handle != VK_NULL_HANDLE;\n        handle = metadata->GetNextAllocation(handle))\n    {\n        MoveAllocationData moveData = GetMoveData(handle, metadata);\n        // Ignore newly created allocations by defragmentation algorithm\n        if (moveData.move.srcAllocation->GetUserData() == this)\n            continue;\n        switch (CheckCounters(moveData.move.srcAllocation->GetSize()))\n        {\n        case CounterStatus::Ignore:\n            continue;\n        case CounterStatus::End:\n            return true;\n        case CounterStatus::Pass:\n            break;\n        default:\n            VMA_ASSERT(0);\n        }\n\n        VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();\n        if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size)\n        {\n            VmaAllocationRequest request = {};\n            if (metadata->CreateAllocationRequest(\n                moveData.size,\n                moveData.alignment,\n                false,\n                moveData.type,\n                VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,\n                &request))\n            {\n                if (metadata->GetAllocationOffset(request.allocHandle) < offset)\n                {\n                    if (vector.CommitAllocationRequest(\n                        request,\n                        block,\n                        moveData.alignment,\n                        moveData.flags,\n                        this,\n                        moveData.type,\n                        &moveData.move.dstTmpAllocation) == VK_SUCCESS)\n                    {\n                        m_Moves.push_back(moveData.move);\n                        if (IncrementCounters(moveData.size))\n                            return true;\n                    }\n                }\n            }\n        }\n    }\n    return false;\n}\n\nbool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector)\n{\n    for (; start < end; ++start)\n    {\n        VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start);\n        if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size)\n        {\n            if (vector.AllocateFromBlock(dstBlock,\n                data.size,\n                data.alignment,\n                data.flags,\n                this,\n                data.type,\n                0,\n                &data.move.dstTmpAllocation) == VK_SUCCESS)\n            {\n                m_Moves.push_back(data.move);\n                if (IncrementCounters(data.size))\n                    return true;\n                break;\n            }\n        }\n    }\n    return false;\n}\n\nbool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector)\n{\n    // Move only between blocks\n\n    // Go through allocations in last blocks and try to fit them inside first ones\n    for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)\n    {\n        VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;\n\n        for (VmaAllocHandle handle = metadata->GetAllocationListBegin();\n            handle != VK_NULL_HANDLE;\n            handle = metadata->GetNextAllocation(handle))\n        {\n            MoveAllocationData moveData = GetMoveData(handle, metadata);\n            // Ignore newly created allocations by defragmentation algorithm\n            if (moveData.move.srcAllocation->GetUserData() == this)\n                continue;\n            switch (CheckCounters(moveData.move.srcAllocation->GetSize()))\n            {\n            case CounterStatus::Ignore:\n                continue;\n            case CounterStatus::End:\n                return true;\n            case CounterStatus::Pass:\n                break;\n            default:\n                VMA_ASSERT(0);\n            }\n\n            // Check all previous blocks for free space\n            if (AllocInOtherBlock(0, i, moveData, vector))\n                return true;\n        }\n    }\n    return false;\n}\n\nbool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update)\n{\n    // Go over every allocation and try to fit it in previous blocks at lowest offsets,\n    // if not possible: realloc within single block to minimize offset (exclude offset == 0),\n    // but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block)\n    VMA_ASSERT(m_AlgorithmState != VMA_NULL);\n\n    StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index];\n    if (update && vectorState.avgAllocSize == UINT64_MAX)\n        UpdateVectorStatistics(vector, vectorState);\n\n    const size_t startMoveCount = m_Moves.size();\n    VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2;\n    for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)\n    {\n        VmaDeviceMemoryBlock* block = vector.GetBlock(i);\n        VmaBlockMetadata* metadata = block->m_pMetadata;\n        VkDeviceSize prevFreeRegionSize = 0;\n\n        for (VmaAllocHandle handle = metadata->GetAllocationListBegin();\n            handle != VK_NULL_HANDLE;\n            handle = metadata->GetNextAllocation(handle))\n        {\n            MoveAllocationData moveData = GetMoveData(handle, metadata);\n            // Ignore newly created allocations by defragmentation algorithm\n            if (moveData.move.srcAllocation->GetUserData() == this)\n                continue;\n            switch (CheckCounters(moveData.move.srcAllocation->GetSize()))\n            {\n            case CounterStatus::Ignore:\n                continue;\n            case CounterStatus::End:\n                return true;\n            case CounterStatus::Pass:\n                break;\n            default:\n                VMA_ASSERT(0);\n            }\n\n            // Check all previous blocks for free space\n            const size_t prevMoveCount = m_Moves.size();\n            if (AllocInOtherBlock(0, i, moveData, vector))\n                return true;\n\n            VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle);\n            // If no room found then realloc within block for lower offset\n            VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();\n            if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)\n            {\n                // Check if realloc will make sense\n                if (prevFreeRegionSize >= minimalFreeRegion ||\n                    nextFreeRegionSize >= minimalFreeRegion ||\n                    moveData.size <= vectorState.avgFreeSize ||\n                    moveData.size <= vectorState.avgAllocSize)\n                {\n                    VmaAllocationRequest request = {};\n                    if (metadata->CreateAllocationRequest(\n                        moveData.size,\n                        moveData.alignment,\n                        false,\n                        moveData.type,\n                        VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,\n                        &request))\n                    {\n                        if (metadata->GetAllocationOffset(request.allocHandle) < offset)\n                        {\n                            if (vector.CommitAllocationRequest(\n                                request,\n                                block,\n                                moveData.alignment,\n                                moveData.flags,\n                                this,\n                                moveData.type,\n                                &moveData.move.dstTmpAllocation) == VK_SUCCESS)\n                            {\n                                m_Moves.push_back(moveData.move);\n                                if (IncrementCounters(moveData.size))\n                                    return true;\n                            }\n                        }\n                    }\n                }\n            }\n            prevFreeRegionSize = nextFreeRegionSize;\n        }\n    }\n\n    // No moves performed, update statistics to current vector state\n    if (startMoveCount == m_Moves.size() && !update)\n    {\n        vectorState.avgAllocSize = UINT64_MAX;\n        return ComputeDefragmentation_Balanced(vector, index, false);\n    }\n    return false;\n}\n\nbool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector)\n{\n    // Go over every allocation and try to fit it in previous blocks at lowest offsets,\n    // if not possible: realloc within single block to minimize offset (exclude offset == 0)\n\n    for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)\n    {\n        VmaDeviceMemoryBlock* block = vector.GetBlock(i);\n        VmaBlockMetadata* metadata = block->m_pMetadata;\n\n        for (VmaAllocHandle handle = metadata->GetAllocationListBegin();\n            handle != VK_NULL_HANDLE;\n            handle = metadata->GetNextAllocation(handle))\n        {\n            MoveAllocationData moveData = GetMoveData(handle, metadata);\n            // Ignore newly created allocations by defragmentation algorithm\n            if (moveData.move.srcAllocation->GetUserData() == this)\n                continue;\n            switch (CheckCounters(moveData.move.srcAllocation->GetSize()))\n            {\n            case CounterStatus::Ignore:\n                continue;\n            case CounterStatus::End:\n                return true;\n            case CounterStatus::Pass:\n                break;\n            default:\n                VMA_ASSERT(0);\n            }\n\n            // Check all previous blocks for free space\n            const size_t prevMoveCount = m_Moves.size();\n            if (AllocInOtherBlock(0, i, moveData, vector))\n                return true;\n\n            // If no room found then realloc within block for lower offset\n            VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();\n            if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)\n            {\n                VmaAllocationRequest request = {};\n                if (metadata->CreateAllocationRequest(\n                    moveData.size,\n                    moveData.alignment,\n                    false,\n                    moveData.type,\n                    VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,\n                    &request))\n                {\n                    if (metadata->GetAllocationOffset(request.allocHandle) < offset)\n                    {\n                        if (vector.CommitAllocationRequest(\n                            request,\n                            block,\n                            moveData.alignment,\n                            moveData.flags,\n                            this,\n                            moveData.type,\n                            &moveData.move.dstTmpAllocation) == VK_SUCCESS)\n                        {\n                            m_Moves.push_back(moveData.move);\n                            if (IncrementCounters(moveData.size))\n                                return true;\n                        }\n                    }\n                }\n            }\n        }\n    }\n    return false;\n}\n\nbool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index)\n{\n    // First free single block, then populate it to the brim, then free another block, and so on\n\n    // Fallback to previous algorithm since without granularity conflicts it can achieve max packing\n    if (vector.m_BufferImageGranularity == 1)\n        return ComputeDefragmentation_Full(vector);\n\n    VMA_ASSERT(m_AlgorithmState != VMA_NULL);\n\n    StateExtensive& vectorState = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[index];\n\n    bool texturePresent = false, bufferPresent = false, otherPresent = false;\n    switch (vectorState.operation)\n    {\n    case StateExtensive::Operation::Done: // Vector defragmented\n        return false;\n    case StateExtensive::Operation::FindFreeBlockBuffer:\n    case StateExtensive::Operation::FindFreeBlockTexture:\n    case StateExtensive::Operation::FindFreeBlockAll:\n    {\n        // No more blocks to free, just perform fast realloc and move to cleanup\n        if (vectorState.firstFreeBlock == 0)\n        {\n            vectorState.operation = StateExtensive::Operation::Cleanup;\n            return ComputeDefragmentation_Fast(vector);\n        }\n\n        // No free blocks, have to clear last one\n        size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1;\n        VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata;\n\n        const size_t prevMoveCount = m_Moves.size();\n        for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin();\n            handle != VK_NULL_HANDLE;\n            handle = freeMetadata->GetNextAllocation(handle))\n        {\n            MoveAllocationData moveData = GetMoveData(handle, freeMetadata);\n            switch (CheckCounters(moveData.move.srcAllocation->GetSize()))\n            {\n            case CounterStatus::Ignore:\n                continue;\n            case CounterStatus::End:\n                return true;\n            case CounterStatus::Pass:\n                break;\n            default:\n                VMA_ASSERT(0);\n            }\n\n            // Check all previous blocks for free space\n            if (AllocInOtherBlock(0, last, moveData, vector))\n            {\n                // Full clear performed already\n                if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE)\n                    vectorState.firstFreeBlock = last;\n                return true;\n            }\n        }\n\n        if (prevMoveCount == m_Moves.size())\n        {\n            // Cannot perform full clear, have to move data in other blocks around\n            if (last != 0)\n            {\n                for (size_t i = last - 1; i; --i)\n                {\n                    if (ReallocWithinBlock(vector, vector.GetBlock(i)))\n                        return true;\n                }\n            }\n\n            if (prevMoveCount == m_Moves.size())\n            {\n                // No possible reallocs within blocks, try to move them around fast\n                return ComputeDefragmentation_Fast(vector);\n            }\n        }\n        else\n        {\n            switch (vectorState.operation)\n            {\n            case StateExtensive::Operation::FindFreeBlockBuffer:\n                vectorState.operation = StateExtensive::Operation::MoveBuffers;\n                break;\n            case StateExtensive::Operation::FindFreeBlockTexture:\n                vectorState.operation = StateExtensive::Operation::MoveTextures;\n                break;\n            case StateExtensive::Operation::FindFreeBlockAll:\n                vectorState.operation = StateExtensive::Operation::MoveAll;\n                break;\n            default:\n                VMA_ASSERT(0);\n                vectorState.operation = StateExtensive::Operation::MoveTextures;\n            }\n            vectorState.firstFreeBlock = last;\n            // Nothing done, block found without reallocations, can perform another reallocs in same pass\n            return ComputeDefragmentation_Extensive(vector, index);\n        }\n        break;\n    }\n    case StateExtensive::Operation::MoveTextures:\n    {\n        if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector,\n            vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))\n        {\n            if (texturePresent)\n            {\n                vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture;\n                return ComputeDefragmentation_Extensive(vector, index);\n            }\n\n            if (!bufferPresent && !otherPresent)\n            {\n                vectorState.operation = StateExtensive::Operation::Cleanup;\n                break;\n            }\n\n            // No more textures to move, check buffers\n            vectorState.operation = StateExtensive::Operation::MoveBuffers;\n            bufferPresent = false;\n            otherPresent = false;\n        }\n        else\n            break;\n        VMA_FALLTHROUGH; // Fallthrough\n    }\n    case StateExtensive::Operation::MoveBuffers:\n    {\n        if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector,\n            vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))\n        {\n            if (bufferPresent)\n            {\n                vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;\n                return ComputeDefragmentation_Extensive(vector, index);\n            }\n\n            if (!otherPresent)\n            {\n                vectorState.operation = StateExtensive::Operation::Cleanup;\n                break;\n            }\n\n            // No more buffers to move, check all others\n            vectorState.operation = StateExtensive::Operation::MoveAll;\n            otherPresent = false;\n        }\n        else\n            break;\n        VMA_FALLTHROUGH; // Fallthrough\n    }\n    case StateExtensive::Operation::MoveAll:\n    {\n        if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector,\n            vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))\n        {\n            if (otherPresent)\n            {\n                vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;\n                return ComputeDefragmentation_Extensive(vector, index);\n            }\n            // Everything moved\n            vectorState.operation = StateExtensive::Operation::Cleanup;\n        }\n        break;\n    }\n    case StateExtensive::Operation::Cleanup:\n        // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062).\n        break;\n    }\n\n    if (vectorState.operation == StateExtensive::Operation::Cleanup)\n    {\n        // All other work done, pack data in blocks even tighter if possible\n        const size_t prevMoveCount = m_Moves.size();\n        for (size_t i = 0; i < vector.GetBlockCount(); ++i)\n        {\n            if (ReallocWithinBlock(vector, vector.GetBlock(i)))\n                return true;\n        }\n\n        if (prevMoveCount == m_Moves.size())\n            vectorState.operation = StateExtensive::Operation::Done;\n    }\n    return false;\n}\n\nvoid VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state)\n{\n    size_t allocCount = 0;\n    size_t freeCount = 0;\n    state.avgFreeSize = 0;\n    state.avgAllocSize = 0;\n\n    for (size_t i = 0; i < vector.GetBlockCount(); ++i)\n    {\n        VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;\n\n        allocCount += metadata->GetAllocationCount();\n        freeCount += metadata->GetFreeRegionsCount();\n        state.avgFreeSize += metadata->GetSumFreeSize();\n        state.avgAllocSize += metadata->GetSize();\n    }\n\n    state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount;\n    state.avgFreeSize /= freeCount;\n}\n\nbool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType,\n    VmaBlockVector& vector, size_t firstFreeBlock,\n    bool& texturePresent, bool& bufferPresent, bool& otherPresent)\n{\n    const size_t prevMoveCount = m_Moves.size();\n    for (size_t i = firstFreeBlock ; i;)\n    {\n        VmaDeviceMemoryBlock* block = vector.GetBlock(--i);\n        VmaBlockMetadata* metadata = block->m_pMetadata;\n\n        for (VmaAllocHandle handle = metadata->GetAllocationListBegin();\n            handle != VK_NULL_HANDLE;\n            handle = metadata->GetNextAllocation(handle))\n        {\n            MoveAllocationData moveData = GetMoveData(handle, metadata);\n            // Ignore newly created allocations by defragmentation algorithm\n            if (moveData.move.srcAllocation->GetUserData() == this)\n                continue;\n            switch (CheckCounters(moveData.move.srcAllocation->GetSize()))\n            {\n            case CounterStatus::Ignore:\n                continue;\n            case CounterStatus::End:\n                return true;\n            case CounterStatus::Pass:\n                break;\n            default:\n                VMA_ASSERT(0);\n            }\n\n            // Move only single type of resources at once\n            if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType))\n            {\n                // Try to fit allocation into free blocks\n                if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector))\n                    return false;\n            }\n\n            if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL))\n                texturePresent = true;\n            else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER))\n                bufferPresent = true;\n            else\n                otherPresent = true;\n        }\n    }\n    return prevMoveCount == m_Moves.size();\n}\n#endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS\n\n#ifndef _VMA_POOL_T_FUNCTIONS\nVmaPool_T::VmaPool_T(\n    VmaAllocator hAllocator,\n    const VmaPoolCreateInfo& createInfo,\n    VkDeviceSize preferredBlockSize)\n    : m_BlockVector(\n        hAllocator,\n        this, // hParentPool\n        createInfo.memoryTypeIndex,\n        createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,\n        createInfo.minBlockCount,\n        createInfo.maxBlockCount,\n        (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),\n        createInfo.blockSize != 0, // explicitBlockSize\n        createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm\n        createInfo.priority,\n        VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),\n        createInfo.pMemoryAllocateNext),\n    m_Id(0),\n    m_Name(VMA_NULL) {}\n\nVmaPool_T::~VmaPool_T()\n{\n    VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);\n\n    const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();\n    VmaFreeString(allocs, m_Name);\n}\n\nvoid VmaPool_T::SetName(const char* pName)\n{\n    const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();\n    VmaFreeString(allocs, m_Name);\n\n    if (pName != VMA_NULL)\n    {\n        m_Name = VmaCreateStringCopy(allocs, pName);\n    }\n    else\n    {\n        m_Name = VMA_NULL;\n    }\n}\n#endif // _VMA_POOL_T_FUNCTIONS\n\n#ifndef _VMA_ALLOCATOR_T_FUNCTIONS\nVmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :\n    m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),\n    m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),\n    m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),\n    m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),\n    m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),\n    m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),\n    m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),\n    m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),\n    m_UseKhrMaintenance4((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT) != 0),\n    m_UseKhrMaintenance5((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT) != 0),\n    m_UseKhrExternalMemoryWin32((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT) != 0),\n    m_hDevice(pCreateInfo->device),\n    m_hInstance(pCreateInfo->instance),\n    m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),\n    m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?\n        *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),\n    m_AllocationObjectAllocator(&m_AllocationCallbacks),\n    m_HeapSizeLimitMask(0),\n    m_DeviceMemoryCount(0),\n    m_PreferredLargeHeapBlockSize(0),\n    m_PhysicalDevice(pCreateInfo->physicalDevice),\n    m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),\n    m_NextPoolId(0),\n    m_GlobalMemoryTypeBits(UINT32_MAX)\n{\n    if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))\n    {\n        m_UseKhrDedicatedAllocation = false;\n        m_UseKhrBindMemory2 = false;\n    }\n\n    if(VMA_DEBUG_DETECT_CORRUPTION)\n    {\n        // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.\n        VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);\n    }\n\n    VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);\n\n    if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))\n    {\n#if !(VMA_DEDICATED_ALLOCATION)\n        if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0)\n        {\n            VMA_ASSERT(0 && \"VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.\");\n        }\n#endif\n#if !(VMA_BIND_MEMORY2)\n        if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)\n        {\n            VMA_ASSERT(0 && \"VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.\");\n        }\n#endif\n    }\n#if !(VMA_MEMORY_BUDGET)\n    if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)\n    {\n        VMA_ASSERT(0 && \"VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.\");\n    }\n#endif\n#if !(VMA_BUFFER_DEVICE_ADDRESS)\n    if(m_UseKhrBufferDeviceAddress)\n    {\n        VMA_ASSERT(0 && \"VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.\");\n    }\n#endif\n#if VMA_VULKAN_VERSION < 1004000\n    VMA_ASSERT(m_VulkanApiVersion < VK_MAKE_VERSION(1, 4, 0) && \"vulkanApiVersion >= VK_API_VERSION_1_4 but required Vulkan version is disabled by preprocessor macros.\");\n#endif\n#if VMA_VULKAN_VERSION < 1003000\n    VMA_ASSERT(m_VulkanApiVersion < VK_MAKE_VERSION(1, 3, 0) && \"vulkanApiVersion >= VK_API_VERSION_1_3 but required Vulkan version is disabled by preprocessor macros.\");\n#endif\n#if VMA_VULKAN_VERSION < 1002000\n    VMA_ASSERT(m_VulkanApiVersion < VK_MAKE_VERSION(1, 2, 0) && \"vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.\");\n#endif\n#if VMA_VULKAN_VERSION < 1001000\n    VMA_ASSERT(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0) && \"vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.\");\n#endif\n#if !(VMA_MEMORY_PRIORITY)\n    if(m_UseExtMemoryPriority)\n    {\n        VMA_ASSERT(0 && \"VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.\");\n    }\n#endif\n#if !(VMA_KHR_MAINTENANCE4)\n    if(m_UseKhrMaintenance4)\n    {\n        VMA_ASSERT(0 && \"VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.\");\n    }\n#endif\n#if !(VMA_KHR_MAINTENANCE5)\n    if(m_UseKhrMaintenance5)\n    {\n        VMA_ASSERT(0 && \"VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.\");\n    }\n#endif\n#if !(VMA_KHR_MAINTENANCE5)\n    if(m_UseKhrMaintenance5)\n    {\n        VMA_ASSERT(0 && \"VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.\");\n    }\n#endif\n\n#if !(VMA_EXTERNAL_MEMORY_WIN32)\n    if(m_UseKhrExternalMemoryWin32)\n    {\n        VMA_ASSERT(0 && \"VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.\");\n    }\n#endif\n\n    memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));\n    memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));\n    memset(&m_MemProps, 0, sizeof(m_MemProps));\n\n    memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));\n    memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));\n\n#if VMA_EXTERNAL_MEMORY\n    memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes));\n#endif // #if VMA_EXTERNAL_MEMORY\n\n    if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)\n    {\n        m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;\n        m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;\n        m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;\n    }\n\n    ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);\n\n    (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);\n    (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);\n\n    VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));\n    VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));\n    VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));\n    VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));\n\n    m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?\n        pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);\n\n    m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();\n\n#if VMA_EXTERNAL_MEMORY\n    if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL)\n    {\n        memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes,\n            sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount());\n    }\n#endif // #if VMA_EXTERNAL_MEMORY\n\n    if(pCreateInfo->pHeapSizeLimit != VMA_NULL)\n    {\n        for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)\n        {\n            const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];\n            if(limit != VK_WHOLE_SIZE)\n            {\n                m_HeapSizeLimitMask |= 1u << heapIndex;\n                if(limit < m_MemProps.memoryHeaps[heapIndex].size)\n                {\n                    m_MemProps.memoryHeaps[heapIndex].size = limit;\n                }\n            }\n        }\n    }\n\n    for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)\n    {\n        // Create only supported types\n        if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0)\n        {\n            const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);\n            m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(\n                this,\n                VK_NULL_HANDLE, // hParentPool\n                memTypeIndex,\n                preferredBlockSize,\n                0,\n                SIZE_MAX,\n                GetBufferImageGranularity(),\n                false, // explicitBlockSize\n                0, // algorithm\n                0.5f, // priority (0.5 is the default per Vulkan spec)\n                GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment\n                VMA_NULL); // // pMemoryAllocateNext\n            // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,\n            // because minBlockCount is 0.\n        }\n    }\n}\n\nVkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)\n{\n    VkResult res = VK_SUCCESS;\n\n#if VMA_MEMORY_BUDGET\n    if(m_UseExtMemoryBudget)\n    {\n        UpdateVulkanBudget();\n    }\n#endif // #if VMA_MEMORY_BUDGET\n\n    return res;\n}\n\nVmaAllocator_T::~VmaAllocator_T()\n{\n    VMA_ASSERT(m_Pools.IsEmpty());\n\n    for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )\n    {\n        vma_delete(this, m_pBlockVectors[memTypeIndex]);\n    }\n}\n\nvoid VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)\n{\n#if VMA_STATIC_VULKAN_FUNCTIONS == 1\n    ImportVulkanFunctions_Static();\n#endif\n\n    if(pVulkanFunctions != VMA_NULL)\n    {\n        ImportVulkanFunctions_Custom(pVulkanFunctions);\n    }\n\n#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1\n    ImportVulkanFunctions_Dynamic();\n#endif\n\n    ValidateVulkanFunctions();\n}\n\n#if VMA_STATIC_VULKAN_FUNCTIONS == 1\n\nvoid VmaAllocator_T::ImportVulkanFunctions_Static()\n{\n    // Vulkan 1.0\n    m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr;\n    m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr;\n    m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;\n    m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;\n    m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;\n    m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;\n    m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;\n    m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;\n    m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;\n    m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;\n    m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;\n    m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;\n    m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;\n    m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;\n    m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;\n    m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;\n    m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;\n    m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;\n    m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;\n\n    // Vulkan 1.1\n#if VMA_VULKAN_VERSION >= 1001000\n    if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))\n    {\n        m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;\n        m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;\n        m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;\n        m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;\n    }\n#endif\n\n#if VMA_VULKAN_VERSION >= 1001000\n    if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))\n    {\n        m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;\n    }\n#endif\n\n#if VMA_VULKAN_VERSION >= 1003000\n    if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))\n    {\n        m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements;\n        m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements;\n    }\n#endif\n}\n\n#endif // VMA_STATIC_VULKAN_FUNCTIONS == 1\n\nvoid VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)\n{\n    VMA_ASSERT(pVulkanFunctions != VMA_NULL);\n\n#define VMA_COPY_IF_NOT_NULL(funcName) \\\n    if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;\n\n    VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr);\n    VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr);\n    VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);\n    VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);\n    VMA_COPY_IF_NOT_NULL(vkAllocateMemory);\n    VMA_COPY_IF_NOT_NULL(vkFreeMemory);\n    VMA_COPY_IF_NOT_NULL(vkMapMemory);\n    VMA_COPY_IF_NOT_NULL(vkUnmapMemory);\n    VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);\n    VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);\n    VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);\n    VMA_COPY_IF_NOT_NULL(vkBindImageMemory);\n    VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);\n    VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);\n    VMA_COPY_IF_NOT_NULL(vkCreateBuffer);\n    VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);\n    VMA_COPY_IF_NOT_NULL(vkCreateImage);\n    VMA_COPY_IF_NOT_NULL(vkDestroyImage);\n    VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);\n\n#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000\n    VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);\n    VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);\n#endif\n\n#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000\n    VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);\n    VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);\n#endif\n\n#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000\n    VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);\n#endif\n\n#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000\n    VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements);\n    VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements);\n#endif\n#if VMA_EXTERNAL_MEMORY_WIN32\n    VMA_COPY_IF_NOT_NULL(vkGetMemoryWin32HandleKHR);\n#endif\n#undef VMA_COPY_IF_NOT_NULL\n}\n\n#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1\n\nvoid VmaAllocator_T::ImportVulkanFunctions_Dynamic()\n{\n    VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr &&\n        \"To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass \"\n        \"VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. \"\n        \"Other members can be null.\");\n\n#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \\\n    if(m_VulkanFunctions.memberName == VMA_NULL) \\\n        m_VulkanFunctions.memberName = \\\n            (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString);\n#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \\\n    if(m_VulkanFunctions.memberName == VMA_NULL) \\\n        m_VulkanFunctions.memberName = \\\n            (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString);\n\n    VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, \"vkGetPhysicalDeviceProperties\");\n    VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, \"vkGetPhysicalDeviceMemoryProperties\");\n    VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, \"vkAllocateMemory\");\n    VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, \"vkFreeMemory\");\n    VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, \"vkMapMemory\");\n    VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, \"vkUnmapMemory\");\n    VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, \"vkFlushMappedMemoryRanges\");\n    VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, \"vkInvalidateMappedMemoryRanges\");\n    VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, \"vkBindBufferMemory\");\n    VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, \"vkBindImageMemory\");\n    VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, \"vkGetBufferMemoryRequirements\");\n    VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, \"vkGetImageMemoryRequirements\");\n    VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, \"vkCreateBuffer\");\n    VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, \"vkDestroyBuffer\");\n    VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, \"vkCreateImage\");\n    VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, \"vkDestroyImage\");\n    VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, \"vkCmdCopyBuffer\");\n\n#if VMA_VULKAN_VERSION >= 1001000\n    if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))\n    {\n        VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, \"vkGetBufferMemoryRequirements2\");\n        VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, \"vkGetImageMemoryRequirements2\");\n        VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, \"vkBindBufferMemory2\");\n        VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, \"vkBindImageMemory2\");\n    }\n#endif\n\n#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000\n    if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))\n    {\n        VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, \"vkGetPhysicalDeviceMemoryProperties2\");\n        // Try to fetch the pointer from the other name, based on suspected driver bug - see issue #410.\n        VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, \"vkGetPhysicalDeviceMemoryProperties2KHR\");\n    }\n    else if(m_UseExtMemoryBudget)\n    {\n        VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, \"vkGetPhysicalDeviceMemoryProperties2KHR\");\n        // Try to fetch the pointer from the other name, based on suspected driver bug - see issue #410.\n        VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, \"vkGetPhysicalDeviceMemoryProperties2\");\n    }\n#endif\n\n#if VMA_DEDICATED_ALLOCATION\n    if(m_UseKhrDedicatedAllocation)\n    {\n        VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, \"vkGetBufferMemoryRequirements2KHR\");\n        VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, \"vkGetImageMemoryRequirements2KHR\");\n    }\n#endif\n\n#if VMA_BIND_MEMORY2\n    if(m_UseKhrBindMemory2)\n    {\n        VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, \"vkBindBufferMemory2KHR\");\n        VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, \"vkBindImageMemory2KHR\");\n    }\n#endif // #if VMA_BIND_MEMORY2\n\n#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000\n    if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))\n    {\n        VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, \"vkGetPhysicalDeviceMemoryProperties2\");\n    }\n    else if(m_UseExtMemoryBudget)\n    {\n        VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, \"vkGetPhysicalDeviceMemoryProperties2KHR\");\n    }\n#endif // #if VMA_MEMORY_BUDGET\n\n#if VMA_VULKAN_VERSION >= 1003000\n    if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))\n    {\n        VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, \"vkGetDeviceBufferMemoryRequirements\");\n        VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, \"vkGetDeviceImageMemoryRequirements\");\n    }\n#endif\n#if VMA_KHR_MAINTENANCE4\n    if(m_UseKhrMaintenance4)\n    {\n        VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirementsKHR, \"vkGetDeviceBufferMemoryRequirementsKHR\");\n        VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirementsKHR, \"vkGetDeviceImageMemoryRequirementsKHR\");\n    }\n#endif\n#if VMA_EXTERNAL_MEMORY_WIN32\n    if (m_UseKhrExternalMemoryWin32)\n    {\n        VMA_FETCH_DEVICE_FUNC(vkGetMemoryWin32HandleKHR, PFN_vkGetMemoryWin32HandleKHR, \"vkGetMemoryWin32HandleKHR\");\n    }\n#endif\n#undef VMA_FETCH_DEVICE_FUNC\n#undef VMA_FETCH_INSTANCE_FUNC\n}\n\n#endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1\n\nvoid VmaAllocator_T::ValidateVulkanFunctions()\n{\n    VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);\n    VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);\n\n#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000\n    if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)\n    {\n        VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);\n        VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);\n    }\n#endif\n\n#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000\n    if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)\n    {\n        VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);\n        VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);\n    }\n#endif\n\n#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000\n    if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))\n    {\n        VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);\n    }\n#endif\n#if VMA_EXTERNAL_MEMORY_WIN32\n    if (m_UseKhrExternalMemoryWin32)\n    {\n        VMA_ASSERT(m_VulkanFunctions.vkGetMemoryWin32HandleKHR != VMA_NULL);\n    }\n#endif\n\n    // Not validating these due to suspected driver bugs with these function\n    // pointers being null despite correct extension or Vulkan version is enabled.\n    // See issue #397. Their usage in VMA is optional anyway.\n    //\n    // VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL);\n    // VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL);\n}\n\nVkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)\n{\n    const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);\n    const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;\n    const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;\n    return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);\n}\n\nVkResult VmaAllocator_T::AllocateMemoryOfType(\n    VmaPool pool,\n    VkDeviceSize size,\n    VkDeviceSize alignment,\n    bool dedicatedPreferred,\n    VkBuffer dedicatedBuffer,\n    VkImage dedicatedImage,\n    VmaBufferImageUsage dedicatedBufferImageUsage,\n    const VmaAllocationCreateInfo& createInfo,\n    uint32_t memTypeIndex,\n    VmaSuballocationType suballocType,\n    VmaDedicatedAllocationList& dedicatedAllocations,\n    VmaBlockVector& blockVector,\n    size_t allocationCount,\n    VmaAllocation* pAllocations)\n{\n    VMA_ASSERT(pAllocations != VMA_NULL);\n    VMA_DEBUG_LOG_FORMAT(\"  AllocateMemory: MemoryTypeIndex=%\" PRIu32 \", AllocationCount=%zu, Size=%\" PRIu64, memTypeIndex, allocationCount, size);\n\n    VmaAllocationCreateInfo finalCreateInfo = createInfo;\n    VkResult res = CalcMemTypeParams(\n        finalCreateInfo,\n        memTypeIndex,\n        size,\n        allocationCount);\n    if(res != VK_SUCCESS)\n        return res;\n\n    if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)\n    {\n        return AllocateDedicatedMemory(\n            pool,\n            size,\n            suballocType,\n            dedicatedAllocations,\n            memTypeIndex,\n            (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,\n            (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,\n            (finalCreateInfo.flags &\n                (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,\n            (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,\n            finalCreateInfo.pUserData,\n            finalCreateInfo.priority,\n            dedicatedBuffer,\n            dedicatedImage,\n            dedicatedBufferImageUsage,\n            allocationCount,\n            pAllocations,\n            blockVector.GetAllocationNextPtr());\n    }\n    else\n    {\n        const bool canAllocateDedicated =\n            (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&\n            (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize());\n\n        if(canAllocateDedicated)\n        {\n            // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.\n            if(size > blockVector.GetPreferredBlockSize() / 2)\n            {\n                dedicatedPreferred = true;\n            }\n            // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,\n            // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above\n            // 3/4 of the maximum allocation count.\n            if(m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 &&\n                m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)\n            {\n                dedicatedPreferred = false;\n            }\n\n            if(dedicatedPreferred)\n            {\n                res = AllocateDedicatedMemory(\n                    pool,\n                    size,\n                    suballocType,\n                    dedicatedAllocations,\n                    memTypeIndex,\n                    (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,\n                    (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,\n                    (finalCreateInfo.flags &\n                        (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,\n                    (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,\n                    finalCreateInfo.pUserData,\n                    finalCreateInfo.priority,\n                    dedicatedBuffer,\n                    dedicatedImage,\n                    dedicatedBufferImageUsage,\n                    allocationCount,\n                    pAllocations,\n                    blockVector.GetAllocationNextPtr());\n                if(res == VK_SUCCESS)\n                {\n                    // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.\n                    VMA_DEBUG_LOG(\"    Allocated as DedicatedMemory\");\n                    return VK_SUCCESS;\n                }\n            }\n        }\n\n        res = blockVector.Allocate(\n            size,\n            alignment,\n            finalCreateInfo,\n            suballocType,\n            allocationCount,\n            pAllocations);\n        if(res == VK_SUCCESS)\n            return VK_SUCCESS;\n\n        // Try dedicated memory.\n        if(canAllocateDedicated && !dedicatedPreferred)\n        {\n            res = AllocateDedicatedMemory(\n                pool,\n                size,\n                suballocType,\n                dedicatedAllocations,\n                memTypeIndex,\n                (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,\n                (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,\n                (finalCreateInfo.flags &\n                    (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,\n                (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,\n                finalCreateInfo.pUserData,\n                finalCreateInfo.priority,\n                dedicatedBuffer,\n                dedicatedImage,\n                dedicatedBufferImageUsage,\n                allocationCount,\n                pAllocations,\n                blockVector.GetAllocationNextPtr());\n            if(res == VK_SUCCESS)\n            {\n                // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.\n                VMA_DEBUG_LOG(\"    Allocated as DedicatedMemory\");\n                return VK_SUCCESS;\n            }\n        }\n        // Everything failed: Return error code.\n        VMA_DEBUG_LOG(\"    vkAllocateMemory FAILED\");\n        return res;\n    }\n}\n\nVkResult VmaAllocator_T::AllocateDedicatedMemory(\n    VmaPool pool,\n    VkDeviceSize size,\n    VmaSuballocationType suballocType,\n    VmaDedicatedAllocationList& dedicatedAllocations,\n    uint32_t memTypeIndex,\n    bool map,\n    bool isUserDataString,\n    bool isMappingAllowed,\n    bool canAliasMemory,\n    void* pUserData,\n    float priority,\n    VkBuffer dedicatedBuffer,\n    VkImage dedicatedImage,\n    VmaBufferImageUsage dedicatedBufferImageUsage,\n    size_t allocationCount,\n    VmaAllocation* pAllocations,\n    const void* pNextChain)\n{\n    VMA_ASSERT(allocationCount > 0 && pAllocations);\n\n    VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };\n    allocInfo.memoryTypeIndex = memTypeIndex;\n    allocInfo.allocationSize = size;\n    allocInfo.pNext = pNextChain;\n\n#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000\n    VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };\n    if(!canAliasMemory)\n    {\n        if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))\n        {\n            if(dedicatedBuffer != VK_NULL_HANDLE)\n            {\n                VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);\n                dedicatedAllocInfo.buffer = dedicatedBuffer;\n                VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);\n            }\n            else if(dedicatedImage != VK_NULL_HANDLE)\n            {\n                dedicatedAllocInfo.image = dedicatedImage;\n                VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);\n            }\n        }\n    }\n#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000\n\n#if VMA_BUFFER_DEVICE_ADDRESS\n    VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };\n    if(m_UseKhrBufferDeviceAddress)\n    {\n        bool canContainBufferWithDeviceAddress = true;\n        if(dedicatedBuffer != VK_NULL_HANDLE)\n        {\n            canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == VmaBufferImageUsage::UNKNOWN ||\n                dedicatedBufferImageUsage.Contains(VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT);\n        }\n        else if(dedicatedImage != VK_NULL_HANDLE)\n        {\n            canContainBufferWithDeviceAddress = false;\n        }\n        if(canContainBufferWithDeviceAddress)\n        {\n            allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;\n            VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);\n        }\n    }\n#endif // #if VMA_BUFFER_DEVICE_ADDRESS\n\n#if VMA_MEMORY_PRIORITY\n    VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };\n    if(m_UseExtMemoryPriority)\n    {\n        VMA_ASSERT(priority >= 0.f && priority <= 1.f);\n        priorityInfo.priority = priority;\n        VmaPnextChainPushFront(&allocInfo, &priorityInfo);\n    }\n#endif // #if VMA_MEMORY_PRIORITY\n\n#if VMA_EXTERNAL_MEMORY\n    // Attach VkExportMemoryAllocateInfoKHR if necessary.\n    VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };\n    exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex);\n    if(exportMemoryAllocInfo.handleTypes != 0)\n    {\n        VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);\n    }\n#endif // #if VMA_EXTERNAL_MEMORY\n\n    size_t allocIndex;\n    VkResult res = VK_SUCCESS;\n    for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)\n    {\n        res = AllocateDedicatedMemoryPage(\n            pool,\n            size,\n            suballocType,\n            memTypeIndex,\n            allocInfo,\n            map,\n            isUserDataString,\n            isMappingAllowed,\n            pUserData,\n            pAllocations + allocIndex);\n        if(res != VK_SUCCESS)\n        {\n            break;\n        }\n    }\n\n    if(res == VK_SUCCESS)\n    {\n        for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)\n        {\n            dedicatedAllocations.Register(pAllocations[allocIndex]);\n        }\n        VMA_DEBUG_LOG_FORMAT(\"    Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%\" PRIu32, allocationCount, memTypeIndex);\n    }\n    else\n    {\n        // Free all already created allocations.\n        while(allocIndex--)\n        {\n            VmaAllocation currAlloc = pAllocations[allocIndex];\n            VkDeviceMemory hMemory = currAlloc->GetMemory();\n\n            /*\n            There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory\n            before vkFreeMemory.\n\n            if(currAlloc->GetMappedData() != VMA_NULL)\n            {\n                (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);\n            }\n            */\n\n            FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);\n            m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());\n            m_AllocationObjectAllocator.Free(currAlloc);\n        }\n\n        memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);\n    }\n\n    return res;\n}\n\nVkResult VmaAllocator_T::AllocateDedicatedMemoryPage(\n    VmaPool pool,\n    VkDeviceSize size,\n    VmaSuballocationType suballocType,\n    uint32_t memTypeIndex,\n    const VkMemoryAllocateInfo& allocInfo,\n    bool map,\n    bool isUserDataString,\n    bool isMappingAllowed,\n    void* pUserData,\n    VmaAllocation* pAllocation)\n{\n    VkDeviceMemory hMemory = VK_NULL_HANDLE;\n    VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);\n    if(res < 0)\n    {\n        VMA_DEBUG_LOG(\"    vkAllocateMemory FAILED\");\n        return res;\n    }\n\n    void* pMappedData = VMA_NULL;\n    if(map)\n    {\n        res = (*m_VulkanFunctions.vkMapMemory)(\n            m_hDevice,\n            hMemory,\n            0,\n            VK_WHOLE_SIZE,\n            0,\n            &pMappedData);\n        if(res < 0)\n        {\n            VMA_DEBUG_LOG(\"    vkMapMemory FAILED\");\n            FreeVulkanMemory(memTypeIndex, size, hMemory);\n            return res;\n        }\n    }\n\n    *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed);\n    (*pAllocation)->InitDedicatedAllocation(this, pool, memTypeIndex, hMemory, suballocType, pMappedData, size);\n    if (isUserDataString)\n        (*pAllocation)->SetName(this, (const char*)pUserData);\n    else\n        (*pAllocation)->SetUserData(this, pUserData);\n    m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);\n    if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)\n    {\n        FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);\n    }\n\n    return VK_SUCCESS;\n}\n\nvoid VmaAllocator_T::GetBufferMemoryRequirements(\n    VkBuffer hBuffer,\n    VkMemoryRequirements& memReq,\n    bool& requiresDedicatedAllocation,\n    bool& prefersDedicatedAllocation) const\n{\n#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000\n    if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))\n    {\n        VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };\n        memReqInfo.buffer = hBuffer;\n\n        VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };\n\n        VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };\n        VmaPnextChainPushFront(&memReq2, &memDedicatedReq);\n\n        (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);\n\n        memReq = memReq2.memoryRequirements;\n        requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);\n        prefersDedicatedAllocation  = (memDedicatedReq.prefersDedicatedAllocation  != VK_FALSE);\n    }\n    else\n#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000\n    {\n        (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);\n        requiresDedicatedAllocation = false;\n        prefersDedicatedAllocation  = false;\n    }\n}\n\nvoid VmaAllocator_T::GetImageMemoryRequirements(\n    VkImage hImage,\n    VkMemoryRequirements& memReq,\n    bool& requiresDedicatedAllocation,\n    bool& prefersDedicatedAllocation) const\n{\n#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000\n    if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))\n    {\n        VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };\n        memReqInfo.image = hImage;\n\n        VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };\n\n        VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };\n        VmaPnextChainPushFront(&memReq2, &memDedicatedReq);\n\n        (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);\n\n        memReq = memReq2.memoryRequirements;\n        requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);\n        prefersDedicatedAllocation  = (memDedicatedReq.prefersDedicatedAllocation  != VK_FALSE);\n    }\n    else\n#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000\n    {\n        (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);\n        requiresDedicatedAllocation = false;\n        prefersDedicatedAllocation  = false;\n    }\n}\n\nVkResult VmaAllocator_T::FindMemoryTypeIndex(\n    uint32_t memoryTypeBits,\n    const VmaAllocationCreateInfo* pAllocationCreateInfo,\n    VmaBufferImageUsage bufImgUsage,\n    uint32_t* pMemoryTypeIndex) const\n{\n    memoryTypeBits &= GetGlobalMemoryTypeBits();\n\n    if(pAllocationCreateInfo->memoryTypeBits != 0)\n    {\n        memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;\n    }\n\n    VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0;\n    if(!FindMemoryPreferences(\n        IsIntegratedGpu(),\n        *pAllocationCreateInfo,\n        bufImgUsage,\n        requiredFlags, preferredFlags, notPreferredFlags))\n    {\n        return VK_ERROR_FEATURE_NOT_PRESENT;\n    }\n\n    *pMemoryTypeIndex = UINT32_MAX;\n    uint32_t minCost = UINT32_MAX;\n    for(uint32_t memTypeIndex = 0, memTypeBit = 1;\n        memTypeIndex < GetMemoryTypeCount();\n        ++memTypeIndex, memTypeBit <<= 1)\n    {\n        // This memory type is acceptable according to memoryTypeBits bitmask.\n        if((memTypeBit & memoryTypeBits) != 0)\n        {\n            const VkMemoryPropertyFlags currFlags =\n                m_MemProps.memoryTypes[memTypeIndex].propertyFlags;\n            // This memory type contains requiredFlags.\n            if((requiredFlags & ~currFlags) == 0)\n            {\n                // Calculate cost as number of bits from preferredFlags not present in this memory type.\n                uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) +\n                    VMA_COUNT_BITS_SET(currFlags & notPreferredFlags);\n                // Remember memory type with lowest cost.\n                if(currCost < minCost)\n                {\n                    *pMemoryTypeIndex = memTypeIndex;\n                    if(currCost == 0)\n                    {\n                        return VK_SUCCESS;\n                    }\n                    minCost = currCost;\n                }\n            }\n        }\n    }\n    return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;\n}\n\nVkResult VmaAllocator_T::CalcMemTypeParams(\n    VmaAllocationCreateInfo& inoutCreateInfo,\n    uint32_t memTypeIndex,\n    VkDeviceSize size,\n    size_t allocationCount)\n{\n    // If memory type is not HOST_VISIBLE, disable MAPPED.\n    if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&\n        (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)\n    {\n        inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;\n    }\n\n    if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&\n        (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0)\n    {\n        const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);\n        VmaBudget heapBudget = {};\n        GetHeapBudgets(&heapBudget, heapIndex, 1);\n        if(heapBudget.usage + size * allocationCount > heapBudget.budget)\n        {\n            return VK_ERROR_OUT_OF_DEVICE_MEMORY;\n        }\n    }\n    return VK_SUCCESS;\n}\n\nVkResult VmaAllocator_T::CalcAllocationParams(\n    VmaAllocationCreateInfo& inoutCreateInfo,\n    bool dedicatedRequired,\n    bool dedicatedPreferred)\n{\n    VMA_ASSERT((inoutCreateInfo.flags &\n        (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) !=\n        (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) &&\n        \"Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect.\");\n    VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 ||\n        (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) &&\n        \"Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.\");\n    if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST)\n    {\n        if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0)\n        {\n            VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 &&\n                \"When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.\");\n        }\n    }\n\n    // If memory is lazily allocated, it should be always dedicated.\n    if(dedicatedRequired ||\n        inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)\n    {\n        inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;\n    }\n\n    if(inoutCreateInfo.pool != VK_NULL_HANDLE)\n    {\n        if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() &&\n            (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)\n        {\n            VMA_ASSERT(0 && \"Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations.\");\n            return VK_ERROR_FEATURE_NOT_PRESENT;\n        }\n        inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority();\n    }\n\n    if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&\n        (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)\n    {\n        VMA_ASSERT(0 && \"Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.\");\n        return VK_ERROR_FEATURE_NOT_PRESENT;\n    }\n\n    if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY &&\n        (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)\n    {\n        inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;\n    }\n\n    // Non-auto USAGE values imply HOST_ACCESS flags.\n    // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools.\n    // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*.\n    // Otherwise they just protect from assert on mapping.\n    if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO &&\n        inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE &&\n        inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST)\n    {\n        if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0)\n        {\n            inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;\n        }\n    }\n\n    return VK_SUCCESS;\n}\n\nVkResult VmaAllocator_T::AllocateMemory(\n    const VkMemoryRequirements& vkMemReq,\n    bool requiresDedicatedAllocation,\n    bool prefersDedicatedAllocation,\n    VkBuffer dedicatedBuffer,\n    VkImage dedicatedImage,\n    VmaBufferImageUsage dedicatedBufferImageUsage,\n    const VmaAllocationCreateInfo& createInfo,\n    VmaSuballocationType suballocType,\n    size_t allocationCount,\n    VmaAllocation* pAllocations)\n{\n    memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);\n\n    VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));\n\n    if(vkMemReq.size == 0)\n    {\n        return VK_ERROR_INITIALIZATION_FAILED;\n    }\n\n    VmaAllocationCreateInfo createInfoFinal = createInfo;\n    VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation);\n    if(res != VK_SUCCESS)\n        return res;\n\n    if(createInfoFinal.pool != VK_NULL_HANDLE)\n    {\n        VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector;\n        return AllocateMemoryOfType(\n            createInfoFinal.pool,\n            vkMemReq.size,\n            vkMemReq.alignment,\n            prefersDedicatedAllocation,\n            dedicatedBuffer,\n            dedicatedImage,\n            dedicatedBufferImageUsage,\n            createInfoFinal,\n            blockVector.GetMemoryTypeIndex(),\n            suballocType,\n            createInfoFinal.pool->m_DedicatedAllocations,\n            blockVector,\n            allocationCount,\n            pAllocations);\n    }\n    else\n    {\n        // Bit mask of memory Vulkan types acceptable for this allocation.\n        uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;\n        uint32_t memTypeIndex = UINT32_MAX;\n        res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);\n        // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.\n        if(res != VK_SUCCESS)\n            return res;\n        do\n        {\n            VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex];\n            VMA_ASSERT(blockVector && \"Trying to use unsupported memory type!\");\n            res = AllocateMemoryOfType(\n                VK_NULL_HANDLE,\n                vkMemReq.size,\n                vkMemReq.alignment,\n                requiresDedicatedAllocation || prefersDedicatedAllocation,\n                dedicatedBuffer,\n                dedicatedImage,\n                dedicatedBufferImageUsage,\n                createInfoFinal,\n                memTypeIndex,\n                suballocType,\n                m_DedicatedAllocations[memTypeIndex],\n                *blockVector,\n                allocationCount,\n                pAllocations);\n            // Allocation succeeded\n            if(res == VK_SUCCESS)\n                return VK_SUCCESS;\n\n            // Remove old memTypeIndex from list of possibilities.\n            memoryTypeBits &= ~(1u << memTypeIndex);\n            // Find alternative memTypeIndex.\n            res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);\n        } while(res == VK_SUCCESS);\n\n        // No other matching memory type index could be found.\n        // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.\n        return VK_ERROR_OUT_OF_DEVICE_MEMORY;\n    }\n}\n\nvoid VmaAllocator_T::FreeMemory(\n    size_t allocationCount,\n    const VmaAllocation* pAllocations)\n{\n    VMA_ASSERT(pAllocations);\n\n    for(size_t allocIndex = allocationCount; allocIndex--; )\n    {\n        VmaAllocation allocation = pAllocations[allocIndex];\n\n        if(allocation != VK_NULL_HANDLE)\n        {\n            if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)\n            {\n                FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);\n            }\n\n            switch(allocation->GetType())\n            {\n            case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:\n                {\n                    VmaBlockVector* pBlockVector = VMA_NULL;\n                    VmaPool hPool = allocation->GetParentPool();\n                    if(hPool != VK_NULL_HANDLE)\n                    {\n                        pBlockVector = &hPool->m_BlockVector;\n                    }\n                    else\n                    {\n                        const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();\n                        pBlockVector = m_pBlockVectors[memTypeIndex];\n                        VMA_ASSERT(pBlockVector && \"Trying to free memory of unsupported type!\");\n                    }\n                    pBlockVector->Free(allocation);\n                }\n                break;\n            case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:\n                FreeDedicatedMemory(allocation);\n                break;\n            default:\n                VMA_ASSERT(0);\n            }\n        }\n    }\n}\n\nvoid VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats)\n{\n    // Initialize.\n    VmaClearDetailedStatistics(pStats->total);\n    for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)\n        VmaClearDetailedStatistics(pStats->memoryType[i]);\n    for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)\n        VmaClearDetailedStatistics(pStats->memoryHeap[i]);\n\n    // Process default pools.\n    for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)\n    {\n        VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];\n        if (pBlockVector != VMA_NULL)\n            pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]);\n    }\n\n    // Process custom pools.\n    {\n        VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);\n        for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))\n        {\n            VmaBlockVector& blockVector = pool->m_BlockVector;\n            const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex();\n            blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);\n            pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);\n        }\n    }\n\n    // Process dedicated allocations.\n    for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)\n    {\n        m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]);\n    }\n\n    // Sum from memory types to memory heaps.\n    for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)\n    {\n        const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;\n        VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]);\n    }\n\n    // Sum from memory heaps to total.\n    for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex)\n        VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]);\n\n    VMA_ASSERT(pStats->total.statistics.allocationCount == 0 ||\n        pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin);\n    VMA_ASSERT(pStats->total.unusedRangeCount == 0 ||\n        pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin);\n}\n\nvoid VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount)\n{\n#if VMA_MEMORY_BUDGET\n    if(m_UseExtMemoryBudget)\n    {\n        if(m_Budget.m_OperationsSinceBudgetFetch < 30)\n        {\n            VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);\n            for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)\n            {\n                const uint32_t heapIndex = firstHeap + i;\n\n                outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];\n                outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];\n                outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];\n                outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];\n\n                if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])\n                {\n                    outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] +\n                        outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];\n                }\n                else\n                {\n                    outBudgets->usage = 0;\n                }\n\n                // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.\n                outBudgets->budget = VMA_MIN(\n                    m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);\n            }\n        }\n        else\n        {\n            UpdateVulkanBudget(); // Outside of mutex lock\n            GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion\n        }\n    }\n    else\n#endif\n    {\n        for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)\n        {\n            const uint32_t heapIndex = firstHeap + i;\n\n            outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];\n            outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];\n            outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];\n            outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];\n\n            outBudgets->usage = outBudgets->statistics.blockBytes;\n            outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.\n        }\n    }\n}\n\nvoid VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)\n{\n    pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();\n    pAllocationInfo->deviceMemory = hAllocation->GetMemory();\n    pAllocationInfo->offset = hAllocation->GetOffset();\n    pAllocationInfo->size = hAllocation->GetSize();\n    pAllocationInfo->pMappedData = hAllocation->GetMappedData();\n    pAllocationInfo->pUserData = hAllocation->GetUserData();\n    pAllocationInfo->pName = hAllocation->GetName();\n}\n\nvoid VmaAllocator_T::GetAllocationInfo2(VmaAllocation hAllocation, VmaAllocationInfo2* pAllocationInfo)\n{\n    GetAllocationInfo(hAllocation, &pAllocationInfo->allocationInfo);\n\n    switch (hAllocation->GetType())\n    {\n    case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:\n        pAllocationInfo->blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();\n        pAllocationInfo->dedicatedMemory = VK_FALSE;\n        break;\n    case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:\n        pAllocationInfo->blockSize = pAllocationInfo->allocationInfo.size;\n        pAllocationInfo->dedicatedMemory = VK_TRUE;\n        break;\n    default:\n        VMA_ASSERT(0);\n    }\n}\n\nVkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)\n{\n    VMA_DEBUG_LOG_FORMAT(\"  CreatePool: MemoryTypeIndex=%\" PRIu32 \", flags=%\" PRIu32, pCreateInfo->memoryTypeIndex, pCreateInfo->flags);\n\n    VmaPoolCreateInfo newCreateInfo = *pCreateInfo;\n\n    // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.\n    if(pCreateInfo->pMemoryAllocateNext)\n    {\n        VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);\n    }\n\n    if(newCreateInfo.maxBlockCount == 0)\n    {\n        newCreateInfo.maxBlockCount = SIZE_MAX;\n    }\n    if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)\n    {\n        return VK_ERROR_INITIALIZATION_FAILED;\n    }\n    // Memory type index out of range or forbidden.\n    if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||\n        ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)\n    {\n        return VK_ERROR_FEATURE_NOT_PRESENT;\n    }\n    if(newCreateInfo.minAllocationAlignment > 0)\n    {\n        VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));\n    }\n\n    const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);\n\n    *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);\n\n    VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();\n    if(res != VK_SUCCESS)\n    {\n        vma_delete(this, *pPool);\n        *pPool = VMA_NULL;\n        return res;\n    }\n\n    // Add to m_Pools.\n    {\n        VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);\n        (*pPool)->SetId(m_NextPoolId++);\n        m_Pools.PushBack(*pPool);\n    }\n\n    return VK_SUCCESS;\n}\n\nvoid VmaAllocator_T::DestroyPool(VmaPool pool)\n{\n    // Remove from m_Pools.\n    {\n        VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);\n        m_Pools.Remove(pool);\n    }\n\n    vma_delete(this, pool);\n}\n\nvoid VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats)\n{\n    VmaClearStatistics(*pPoolStats);\n    pool->m_BlockVector.AddStatistics(*pPoolStats);\n    pool->m_DedicatedAllocations.AddStatistics(*pPoolStats);\n}\n\nvoid VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats)\n{\n    VmaClearDetailedStatistics(*pPoolStats);\n    pool->m_BlockVector.AddDetailedStatistics(*pPoolStats);\n    pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats);\n}\n\nvoid VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)\n{\n    m_CurrentFrameIndex.store(frameIndex);\n\n#if VMA_MEMORY_BUDGET\n    if(m_UseExtMemoryBudget)\n    {\n        UpdateVulkanBudget();\n    }\n#endif // #if VMA_MEMORY_BUDGET\n}\n\nVkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)\n{\n    return hPool->m_BlockVector.CheckCorruption();\n}\n\nVkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)\n{\n    VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;\n\n    // Process default pools.\n    for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)\n    {\n        VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];\n        if(pBlockVector != VMA_NULL)\n        {\n            VkResult localRes = pBlockVector->CheckCorruption();\n            switch(localRes)\n            {\n            case VK_ERROR_FEATURE_NOT_PRESENT:\n                break;\n            case VK_SUCCESS:\n                finalRes = VK_SUCCESS;\n                break;\n            default:\n                return localRes;\n            }\n        }\n    }\n\n    // Process custom pools.\n    {\n        VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);\n        for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))\n        {\n            if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)\n            {\n                VkResult localRes = pool->m_BlockVector.CheckCorruption();\n                switch(localRes)\n                {\n                case VK_ERROR_FEATURE_NOT_PRESENT:\n                    break;\n                case VK_SUCCESS:\n                    finalRes = VK_SUCCESS;\n                    break;\n                default:\n                    return localRes;\n                }\n            }\n        }\n    }\n\n    return finalRes;\n}\n\nVkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)\n{\n    AtomicTransactionalIncrement<VMA_ATOMIC_UINT32> deviceMemoryCountIncrement;\n    const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);\n#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT\n    if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)\n    {\n        return VK_ERROR_TOO_MANY_OBJECTS;\n    }\n#endif\n\n    const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);\n\n    // HeapSizeLimit is in effect for this heap.\n    if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)\n    {\n        const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;\n        VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];\n        for(;;)\n        {\n            const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;\n            if(blockBytesAfterAllocation > heapSize)\n            {\n                return VK_ERROR_OUT_OF_DEVICE_MEMORY;\n            }\n            if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))\n            {\n                break;\n            }\n        }\n    }\n    else\n    {\n        m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;\n    }\n    ++m_Budget.m_BlockCount[heapIndex];\n\n    // VULKAN CALL vkAllocateMemory.\n    VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);\n\n    if(res == VK_SUCCESS)\n    {\n#if VMA_MEMORY_BUDGET\n        ++m_Budget.m_OperationsSinceBudgetFetch;\n#endif\n\n        // Informative callback.\n        if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)\n        {\n            (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);\n        }\n\n        deviceMemoryCountIncrement.Commit();\n    }\n    else\n    {\n        --m_Budget.m_BlockCount[heapIndex];\n        m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;\n    }\n\n    return res;\n}\n\nvoid VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)\n{\n    // Informative callback.\n    if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)\n    {\n        (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);\n    }\n\n    // VULKAN CALL vkFreeMemory.\n    (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());\n\n    const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);\n    --m_Budget.m_BlockCount[heapIndex];\n    m_Budget.m_BlockBytes[heapIndex] -= size;\n\n    --m_DeviceMemoryCount;\n}\n\nVkResult VmaAllocator_T::BindVulkanBuffer(\n    VkDeviceMemory memory,\n    VkDeviceSize memoryOffset,\n    VkBuffer buffer,\n    const void* pNext)\n{\n    if(pNext != VMA_NULL)\n    {\n#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2\n        if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&\n            m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)\n        {\n            VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };\n            bindBufferMemoryInfo.pNext = pNext;\n            bindBufferMemoryInfo.buffer = buffer;\n            bindBufferMemoryInfo.memory = memory;\n            bindBufferMemoryInfo.memoryOffset = memoryOffset;\n            return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);\n        }\n        else\n#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2\n        {\n            return VK_ERROR_EXTENSION_NOT_PRESENT;\n        }\n    }\n    else\n    {\n        return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);\n    }\n}\n\nVkResult VmaAllocator_T::BindVulkanImage(\n    VkDeviceMemory memory,\n    VkDeviceSize memoryOffset,\n    VkImage image,\n    const void* pNext)\n{\n    if(pNext != VMA_NULL)\n    {\n#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2\n        if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&\n            m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)\n        {\n            VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };\n            bindBufferMemoryInfo.pNext = pNext;\n            bindBufferMemoryInfo.image = image;\n            bindBufferMemoryInfo.memory = memory;\n            bindBufferMemoryInfo.memoryOffset = memoryOffset;\n            return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);\n        }\n        else\n#endif // #if VMA_BIND_MEMORY2\n        {\n            return VK_ERROR_EXTENSION_NOT_PRESENT;\n        }\n    }\n    else\n    {\n        return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);\n    }\n}\n\nVkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)\n{\n    switch(hAllocation->GetType())\n    {\n    case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:\n        {\n            VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();\n            char *pBytes = VMA_NULL;\n            VkResult res = pBlock->Map(this, 1, (void**)&pBytes);\n            if(res == VK_SUCCESS)\n            {\n                *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();\n                hAllocation->BlockAllocMap();\n            }\n            return res;\n        }\n    case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:\n        return hAllocation->DedicatedAllocMap(this, ppData);\n    default:\n        VMA_ASSERT(0);\n        return VK_ERROR_MEMORY_MAP_FAILED;\n    }\n}\n\nvoid VmaAllocator_T::Unmap(VmaAllocation hAllocation)\n{\n    switch(hAllocation->GetType())\n    {\n    case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:\n        {\n            VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();\n            hAllocation->BlockAllocUnmap();\n            pBlock->Unmap(this, 1);\n        }\n        break;\n    case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:\n        hAllocation->DedicatedAllocUnmap(this);\n        break;\n    default:\n        VMA_ASSERT(0);\n    }\n}\n\nVkResult VmaAllocator_T::BindBufferMemory(\n    VmaAllocation hAllocation,\n    VkDeviceSize allocationLocalOffset,\n    VkBuffer hBuffer,\n    const void* pNext)\n{\n    VkResult res = VK_ERROR_UNKNOWN_COPY;\n    switch(hAllocation->GetType())\n    {\n    case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:\n        res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);\n        break;\n    case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:\n    {\n        VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();\n        VMA_ASSERT(pBlock && \"Binding buffer to allocation that doesn't belong to any block.\");\n        res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);\n        break;\n    }\n    default:\n        VMA_ASSERT(0);\n    }\n    return res;\n}\n\nVkResult VmaAllocator_T::BindImageMemory(\n    VmaAllocation hAllocation,\n    VkDeviceSize allocationLocalOffset,\n    VkImage hImage,\n    const void* pNext)\n{\n    VkResult res = VK_ERROR_UNKNOWN_COPY;\n    switch(hAllocation->GetType())\n    {\n    case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:\n        res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);\n        break;\n    case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:\n    {\n        VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();\n        VMA_ASSERT(pBlock && \"Binding image to allocation that doesn't belong to any block.\");\n        res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);\n        break;\n    }\n    default:\n        VMA_ASSERT(0);\n    }\n    return res;\n}\n\nVkResult VmaAllocator_T::FlushOrInvalidateAllocation(\n    VmaAllocation hAllocation,\n    VkDeviceSize offset, VkDeviceSize size,\n    VMA_CACHE_OPERATION op)\n{\n    VkResult res = VK_SUCCESS;\n\n    VkMappedMemoryRange memRange = {};\n    if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))\n    {\n        switch(op)\n        {\n        case VMA_CACHE_FLUSH:\n            res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);\n            break;\n        case VMA_CACHE_INVALIDATE:\n            res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);\n            break;\n        default:\n            VMA_ASSERT(0);\n        }\n    }\n    // else: Just ignore this call.\n    return res;\n}\n\nVkResult VmaAllocator_T::FlushOrInvalidateAllocations(\n    uint32_t allocationCount,\n    const VmaAllocation* allocations,\n    const VkDeviceSize* offsets, const VkDeviceSize* sizes,\n    VMA_CACHE_OPERATION op)\n{\n    typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;\n    typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;\n    RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));\n\n    for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)\n    {\n        const VmaAllocation alloc = allocations[allocIndex];\n        const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;\n        const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;\n        VkMappedMemoryRange newRange;\n        if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))\n        {\n            ranges.push_back(newRange);\n        }\n    }\n\n    VkResult res = VK_SUCCESS;\n    if(!ranges.empty())\n    {\n        switch(op)\n        {\n        case VMA_CACHE_FLUSH:\n            res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());\n            break;\n        case VMA_CACHE_INVALIDATE:\n            res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());\n            break;\n        default:\n            VMA_ASSERT(0);\n        }\n    }\n    // else: Just ignore this call.\n    return res;\n}\n\nVkResult VmaAllocator_T::CopyMemoryToAllocation(\n    const void* pSrcHostPointer,\n    VmaAllocation dstAllocation,\n    VkDeviceSize dstAllocationLocalOffset,\n    VkDeviceSize size)\n{\n    void* dstMappedData = VMA_NULL;\n    VkResult res = Map(dstAllocation, &dstMappedData);\n    if(res == VK_SUCCESS)\n    {\n        memcpy((char*)dstMappedData + dstAllocationLocalOffset, pSrcHostPointer, (size_t)size);\n        Unmap(dstAllocation);\n        res = FlushOrInvalidateAllocation(dstAllocation, dstAllocationLocalOffset, size, VMA_CACHE_FLUSH);\n    }\n    return res;\n}\n\nVkResult VmaAllocator_T::CopyAllocationToMemory(\n    VmaAllocation srcAllocation,\n    VkDeviceSize srcAllocationLocalOffset,\n    void* pDstHostPointer,\n    VkDeviceSize size)\n{\n    void* srcMappedData = VMA_NULL;\n    VkResult res = Map(srcAllocation, &srcMappedData);\n    if(res == VK_SUCCESS)\n    {\n        res = FlushOrInvalidateAllocation(srcAllocation, srcAllocationLocalOffset, size, VMA_CACHE_INVALIDATE);\n        if(res == VK_SUCCESS)\n        {\n            memcpy(pDstHostPointer, (const char*)srcMappedData + srcAllocationLocalOffset, (size_t)size);\n            Unmap(srcAllocation);\n        }\n    }\n    return res;\n}\n\nvoid VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)\n{\n    VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);\n\n    const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();\n    VmaPool parentPool = allocation->GetParentPool();\n    if(parentPool == VK_NULL_HANDLE)\n    {\n        // Default pool\n        m_DedicatedAllocations[memTypeIndex].Unregister(allocation);\n    }\n    else\n    {\n        // Custom pool\n        parentPool->m_DedicatedAllocations.Unregister(allocation);\n    }\n\n    VkDeviceMemory hMemory = allocation->GetMemory();\n\n    /*\n    There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory\n    before vkFreeMemory.\n\n    if(allocation->GetMappedData() != VMA_NULL)\n    {\n        (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);\n    }\n    */\n\n    FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);\n\n    m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());\n    allocation->Destroy(this);\n    m_AllocationObjectAllocator.Free(allocation);\n\n    VMA_DEBUG_LOG_FORMAT(\"    Freed DedicatedMemory MemoryTypeIndex=%\" PRIu32, memTypeIndex);\n}\n\nuint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const\n{\n    VkBufferCreateInfo dummyBufCreateInfo;\n    VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);\n\n    uint32_t memoryTypeBits = 0;\n\n    // Create buffer.\n    VkBuffer buf = VK_NULL_HANDLE;\n    VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(\n        m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);\n    if(res == VK_SUCCESS)\n    {\n        // Query for supported memory types.\n        VkMemoryRequirements memReq;\n        (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);\n        memoryTypeBits = memReq.memoryTypeBits;\n\n        // Destroy buffer.\n        (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());\n    }\n\n    return memoryTypeBits;\n}\n\nuint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const\n{\n    // Make sure memory information is already fetched.\n    VMA_ASSERT(GetMemoryTypeCount() > 0);\n\n    uint32_t memoryTypeBits = UINT32_MAX;\n\n    if(!m_UseAmdDeviceCoherentMemory)\n    {\n        // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.\n        for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)\n        {\n            if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)\n            {\n                memoryTypeBits &= ~(1u << memTypeIndex);\n            }\n        }\n    }\n\n    return memoryTypeBits;\n}\n\nbool VmaAllocator_T::GetFlushOrInvalidateRange(\n    VmaAllocation allocation,\n    VkDeviceSize offset, VkDeviceSize size,\n    VkMappedMemoryRange& outRange) const\n{\n    const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();\n    if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))\n    {\n        const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;\n        const VkDeviceSize allocationSize = allocation->GetSize();\n        VMA_ASSERT(offset <= allocationSize);\n\n        outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;\n        outRange.pNext = VMA_NULL;\n        outRange.memory = allocation->GetMemory();\n\n        switch(allocation->GetType())\n        {\n        case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:\n            outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);\n            if(size == VK_WHOLE_SIZE)\n            {\n                outRange.size = allocationSize - outRange.offset;\n            }\n            else\n            {\n                VMA_ASSERT(offset + size <= allocationSize);\n                outRange.size = VMA_MIN(\n                    VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),\n                    allocationSize - outRange.offset);\n            }\n            break;\n        case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:\n        {\n            // 1. Still within this allocation.\n            outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);\n            if(size == VK_WHOLE_SIZE)\n            {\n                size = allocationSize - offset;\n            }\n            else\n            {\n                VMA_ASSERT(offset + size <= allocationSize);\n            }\n            outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);\n\n            // 2. Adjust to whole block.\n            const VkDeviceSize allocationOffset = allocation->GetOffset();\n            VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);\n            const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();\n            outRange.offset += allocationOffset;\n            outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);\n\n            break;\n        }\n        default:\n            VMA_ASSERT(0);\n        }\n        return true;\n    }\n    return false;\n}\n\n#if VMA_MEMORY_BUDGET\nvoid VmaAllocator_T::UpdateVulkanBudget()\n{\n    VMA_ASSERT(m_UseExtMemoryBudget);\n\n    VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };\n\n    VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };\n    VmaPnextChainPushFront(&memProps, &budgetProps);\n\n    GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);\n\n    {\n        VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);\n\n        for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)\n        {\n            m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];\n            m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];\n            m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();\n\n            // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.\n            if(m_Budget.m_VulkanBudget[heapIndex] == 0)\n            {\n                m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.\n            }\n            else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)\n            {\n                m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;\n            }\n            if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)\n            {\n                m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];\n            }\n        }\n        m_Budget.m_OperationsSinceBudgetFetch = 0;\n    }\n}\n#endif // VMA_MEMORY_BUDGET\n\nvoid VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)\n{\n    if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&\n        hAllocation->IsMappingAllowed() &&\n        (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)\n    {\n        void* pData = VMA_NULL;\n        VkResult res = Map(hAllocation, &pData);\n        if(res == VK_SUCCESS)\n        {\n            memset(pData, (int)pattern, (size_t)hAllocation->GetSize());\n            FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);\n            Unmap(hAllocation);\n        }\n        else\n        {\n            VMA_ASSERT(0 && \"VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.\");\n        }\n    }\n}\n\nuint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()\n{\n    uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();\n    if(memoryTypeBits == UINT32_MAX)\n    {\n        memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();\n        m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);\n    }\n    return memoryTypeBits;\n}\n\n#if VMA_STATS_STRING_ENABLED\nvoid VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)\n{\n    json.WriteString(\"DefaultPools\");\n    json.BeginObject();\n    {\n        for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)\n        {\n            VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex];\n            VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];\n            if (pBlockVector != VMA_NULL)\n            {\n                json.BeginString(\"Type \");\n                json.ContinueString(memTypeIndex);\n                json.EndString();\n                json.BeginObject();\n                {\n                    json.WriteString(\"PreferredBlockSize\");\n                    json.WriteNumber(pBlockVector->GetPreferredBlockSize());\n\n                    json.WriteString(\"Blocks\");\n                    pBlockVector->PrintDetailedMap(json);\n\n                    json.WriteString(\"DedicatedAllocations\");\n                    dedicatedAllocList.BuildStatsString(json);\n                }\n                json.EndObject();\n            }\n        }\n    }\n    json.EndObject();\n\n    json.WriteString(\"CustomPools\");\n    json.BeginObject();\n    {\n        VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);\n        if (!m_Pools.IsEmpty())\n        {\n            for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)\n            {\n                bool displayType = true;\n                size_t index = 0;\n                for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))\n                {\n                    VmaBlockVector& blockVector = pool->m_BlockVector;\n                    if (blockVector.GetMemoryTypeIndex() == memTypeIndex)\n                    {\n                        if (displayType)\n                        {\n                            json.BeginString(\"Type \");\n                            json.ContinueString(memTypeIndex);\n                            json.EndString();\n                            json.BeginArray();\n                            displayType = false;\n                        }\n\n                        json.BeginObject();\n                        {\n                            json.WriteString(\"Name\");\n                            json.BeginString();\n                            json.ContinueString((uint64_t)index++);\n                            if (pool->GetName())\n                            {\n                                json.ContinueString(\" - \");\n                                json.ContinueString(pool->GetName());\n                            }\n                            json.EndString();\n\n                            json.WriteString(\"PreferredBlockSize\");\n                            json.WriteNumber(blockVector.GetPreferredBlockSize());\n\n                            json.WriteString(\"Blocks\");\n                            blockVector.PrintDetailedMap(json);\n\n                            json.WriteString(\"DedicatedAllocations\");\n                            pool->m_DedicatedAllocations.BuildStatsString(json);\n                        }\n                        json.EndObject();\n                    }\n                }\n\n                if (!displayType)\n                    json.EndArray();\n            }\n        }\n    }\n    json.EndObject();\n}\n#endif // VMA_STATS_STRING_ENABLED\n#endif // _VMA_ALLOCATOR_T_FUNCTIONS\n\n\n#ifndef _VMA_PUBLIC_INTERFACE\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(\n    const VmaAllocatorCreateInfo* pCreateInfo,\n    VmaAllocator* pAllocator)\n{\n    VMA_ASSERT(pCreateInfo && pAllocator);\n    VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||\n        (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 4));\n    VMA_DEBUG_LOG(\"vmaCreateAllocator\");\n    *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);\n    VkResult result = (*pAllocator)->Init(pCreateInfo);\n    if(result < 0)\n    {\n        vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator);\n        *pAllocator = VK_NULL_HANDLE;\n    }\n    return result;\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(\n    VmaAllocator allocator)\n{\n    if(allocator != VK_NULL_HANDLE)\n    {\n        VMA_DEBUG_LOG(\"vmaDestroyAllocator\");\n        VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying.\n        vma_delete(&allocationCallbacks, allocator);\n    }\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)\n{\n    VMA_ASSERT(allocator && pAllocatorInfo);\n    pAllocatorInfo->instance = allocator->m_hInstance;\n    pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();\n    pAllocatorInfo->device = allocator->m_hDevice;\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(\n    VmaAllocator allocator,\n    const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)\n{\n    VMA_ASSERT(allocator && ppPhysicalDeviceProperties);\n    *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(\n    VmaAllocator allocator,\n    const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)\n{\n    VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);\n    *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(\n    VmaAllocator allocator,\n    uint32_t memoryTypeIndex,\n    VkMemoryPropertyFlags* pFlags)\n{\n    VMA_ASSERT(allocator && pFlags);\n    VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());\n    *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(\n    VmaAllocator allocator,\n    uint32_t frameIndex)\n{\n    VMA_ASSERT(allocator);\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    allocator->SetCurrentFrameIndex(frameIndex);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(\n    VmaAllocator allocator,\n    VmaTotalStatistics* pStats)\n{\n    VMA_ASSERT(allocator && pStats);\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n    allocator->CalculateStatistics(pStats);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(\n    VmaAllocator allocator,\n    VmaBudget* pBudgets)\n{\n    VMA_ASSERT(allocator && pBudgets);\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n    allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount());\n}\n\n#if VMA_STATS_STRING_ENABLED\n\nVMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(\n    VmaAllocator allocator,\n    char** ppStatsString,\n    VkBool32 detailedMap)\n{\n    VMA_ASSERT(allocator && ppStatsString);\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    VmaStringBuilder sb(allocator->GetAllocationCallbacks());\n    {\n        VmaBudget budgets[VK_MAX_MEMORY_HEAPS];\n        allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount());\n\n        VmaTotalStatistics stats;\n        allocator->CalculateStatistics(&stats);\n\n        VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);\n        json.BeginObject();\n        {\n            json.WriteString(\"General\");\n            json.BeginObject();\n            {\n                const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties;\n                const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps;\n\n                json.WriteString(\"API\");\n                json.WriteString(\"Vulkan\");\n\n                json.WriteString(\"apiVersion\");\n                json.BeginString();\n                json.ContinueString(VK_VERSION_MAJOR(deviceProperties.apiVersion));\n                json.ContinueString(\".\");\n                json.ContinueString(VK_VERSION_MINOR(deviceProperties.apiVersion));\n                json.ContinueString(\".\");\n                json.ContinueString(VK_VERSION_PATCH(deviceProperties.apiVersion));\n                json.EndString();\n\n                json.WriteString(\"GPU\");\n                json.WriteString(deviceProperties.deviceName);\n                json.WriteString(\"deviceType\");\n                json.WriteNumber(static_cast<uint32_t>(deviceProperties.deviceType));\n\n                json.WriteString(\"maxMemoryAllocationCount\");\n                json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount);\n                json.WriteString(\"bufferImageGranularity\");\n                json.WriteNumber(deviceProperties.limits.bufferImageGranularity);\n                json.WriteString(\"nonCoherentAtomSize\");\n                json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize);\n\n                json.WriteString(\"memoryHeapCount\");\n                json.WriteNumber(memoryProperties.memoryHeapCount);\n                json.WriteString(\"memoryTypeCount\");\n                json.WriteNumber(memoryProperties.memoryTypeCount);\n            }\n            json.EndObject();\n        }\n        {\n            json.WriteString(\"Total\");\n            VmaPrintDetailedStatistics(json, stats.total);\n        }\n        {\n            json.WriteString(\"MemoryInfo\");\n            json.BeginObject();\n            {\n                for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)\n                {\n                    json.BeginString(\"Heap \");\n                    json.ContinueString(heapIndex);\n                    json.EndString();\n                    json.BeginObject();\n                    {\n                        const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex];\n                        json.WriteString(\"Flags\");\n                        json.BeginArray(true);\n                        {\n                            if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)\n                                json.WriteString(\"DEVICE_LOCAL\");\n                        #if VMA_VULKAN_VERSION >= 1001000\n                            if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT)\n                                json.WriteString(\"MULTI_INSTANCE\");\n                        #endif\n\n                            VkMemoryHeapFlags flags = heapInfo.flags &\n                                ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT\n                        #if VMA_VULKAN_VERSION >= 1001000\n                                    | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT\n                        #endif\n                                    );\n                            if (flags != 0)\n                                json.WriteNumber(flags);\n                        }\n                        json.EndArray();\n\n                        json.WriteString(\"Size\");\n                        json.WriteNumber(heapInfo.size);\n\n                        json.WriteString(\"Budget\");\n                        json.BeginObject();\n                        {\n                            json.WriteString(\"BudgetBytes\");\n                            json.WriteNumber(budgets[heapIndex].budget);\n                            json.WriteString(\"UsageBytes\");\n                            json.WriteNumber(budgets[heapIndex].usage);\n                        }\n                        json.EndObject();\n\n                        json.WriteString(\"Stats\");\n                        VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]);\n\n                        json.WriteString(\"MemoryPools\");\n                        json.BeginObject();\n                        {\n                            for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)\n                            {\n                                if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)\n                                {\n                                    json.BeginString(\"Type \");\n                                    json.ContinueString(typeIndex);\n                                    json.EndString();\n                                    json.BeginObject();\n                                    {\n                                        json.WriteString(\"Flags\");\n                                        json.BeginArray(true);\n                                        {\n                                            VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;\n                                            if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)\n                                                json.WriteString(\"DEVICE_LOCAL\");\n                                            if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)\n                                                json.WriteString(\"HOST_VISIBLE\");\n                                            if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)\n                                                json.WriteString(\"HOST_COHERENT\");\n                                            if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT)\n                                                json.WriteString(\"HOST_CACHED\");\n                                            if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT)\n                                                json.WriteString(\"LAZILY_ALLOCATED\");\n                                        #if VMA_VULKAN_VERSION >= 1001000\n                                            if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT)\n                                                json.WriteString(\"PROTECTED\");\n                                        #endif\n                                        #if VK_AMD_device_coherent_memory\n                                            if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY)\n                                                json.WriteString(\"DEVICE_COHERENT_AMD\");\n                                            if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)\n                                                json.WriteString(\"DEVICE_UNCACHED_AMD\");\n                                        #endif\n\n                                            flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT\n                                        #if VMA_VULKAN_VERSION >= 1001000\n                                                | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT\n                                        #endif\n                                        #if VK_AMD_device_coherent_memory\n                                                | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY\n                                                | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY\n                                        #endif\n                                                | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT\n                                                | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT\n                                                | VK_MEMORY_PROPERTY_HOST_CACHED_BIT);\n                                            if (flags != 0)\n                                                json.WriteNumber(flags);\n                                        }\n                                        json.EndArray();\n\n                                        json.WriteString(\"Stats\");\n                                        VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]);\n                                    }\n                                    json.EndObject();\n                                }\n                            }\n\n                        }\n                        json.EndObject();\n                    }\n                    json.EndObject();\n                }\n            }\n            json.EndObject();\n        }\n\n        if (detailedMap == VK_TRUE)\n            allocator->PrintDetailedMap(json);\n\n        json.EndObject();\n    }\n\n    *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength());\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(\n    VmaAllocator allocator,\n    char* pStatsString)\n{\n    if(pStatsString != VMA_NULL)\n    {\n        VMA_ASSERT(allocator);\n        VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString);\n    }\n}\n\n#endif // VMA_STATS_STRING_ENABLED\n\n/*\nThis function is not protected by any mutex because it just reads immutable data.\n*/\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(\n    VmaAllocator allocator,\n    uint32_t memoryTypeBits,\n    const VmaAllocationCreateInfo* pAllocationCreateInfo,\n    uint32_t* pMemoryTypeIndex)\n{\n    VMA_ASSERT(allocator != VK_NULL_HANDLE);\n    VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);\n    VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);\n\n    return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, VmaBufferImageUsage::UNKNOWN, pMemoryTypeIndex);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(\n    VmaAllocator allocator,\n    const VkBufferCreateInfo* pBufferCreateInfo,\n    const VmaAllocationCreateInfo* pAllocationCreateInfo,\n    uint32_t* pMemoryTypeIndex)\n{\n    VMA_ASSERT(allocator != VK_NULL_HANDLE);\n    VMA_ASSERT(pBufferCreateInfo != VMA_NULL);\n    VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);\n    VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);\n\n    const VkDevice hDev = allocator->m_hDevice;\n    const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();\n    VkResult res;\n\n#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000\n    if(funcs->vkGetDeviceBufferMemoryRequirements)\n    {\n        // Can query straight from VkBufferCreateInfo :)\n        VkDeviceBufferMemoryRequirementsKHR devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS_KHR};\n        devBufMemReq.pCreateInfo = pBufferCreateInfo;\n\n        VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};\n        (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq);\n\n        res = allocator->FindMemoryTypeIndex(\n            memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo,\n            VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), pMemoryTypeIndex);\n    }\n    else\n#endif // VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000\n    {\n        // Must create a dummy buffer to query :(\n        VkBuffer hBuffer = VK_NULL_HANDLE;\n        res = funcs->vkCreateBuffer(\n            hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);\n        if(res == VK_SUCCESS)\n        {\n            VkMemoryRequirements memReq = {};\n            funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq);\n\n            res = allocator->FindMemoryTypeIndex(\n                memReq.memoryTypeBits, pAllocationCreateInfo,\n                VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), pMemoryTypeIndex);\n\n            funcs->vkDestroyBuffer(\n                hDev, hBuffer, allocator->GetAllocationCallbacks());\n        }\n    }\n    return res;\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(\n    VmaAllocator allocator,\n    const VkImageCreateInfo* pImageCreateInfo,\n    const VmaAllocationCreateInfo* pAllocationCreateInfo,\n    uint32_t* pMemoryTypeIndex)\n{\n    VMA_ASSERT(allocator != VK_NULL_HANDLE);\n    VMA_ASSERT(pImageCreateInfo != VMA_NULL);\n    VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);\n    VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);\n\n    const VkDevice hDev = allocator->m_hDevice;\n    const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();\n    VkResult res;\n\n#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000\n    if(funcs->vkGetDeviceImageMemoryRequirements)\n    {\n        // Can query straight from VkImageCreateInfo :)\n        VkDeviceImageMemoryRequirementsKHR devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS_KHR};\n        devImgMemReq.pCreateInfo = pImageCreateInfo;\n        VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 &&\n            \"Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect.\");\n\n        VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};\n        (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq);\n\n        res = allocator->FindMemoryTypeIndex(\n            memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo,\n            VmaBufferImageUsage(*pImageCreateInfo), pMemoryTypeIndex);\n    }\n    else\n#endif // VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000\n    {\n        // Must create a dummy image to query :(\n        VkImage hImage = VK_NULL_HANDLE;\n        res = funcs->vkCreateImage(\n            hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);\n        if(res == VK_SUCCESS)\n        {\n            VkMemoryRequirements memReq = {};\n            funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq);\n\n            res = allocator->FindMemoryTypeIndex(\n                memReq.memoryTypeBits, pAllocationCreateInfo,\n                VmaBufferImageUsage(*pImageCreateInfo), pMemoryTypeIndex);\n\n            funcs->vkDestroyImage(\n                hDev, hImage, allocator->GetAllocationCallbacks());\n        }\n    }\n    return res;\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(\n    VmaAllocator allocator,\n    const VmaPoolCreateInfo* pCreateInfo,\n    VmaPool* pPool)\n{\n    VMA_ASSERT(allocator && pCreateInfo && pPool);\n\n    VMA_DEBUG_LOG(\"vmaCreatePool\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return allocator->CreatePool(pCreateInfo, pPool);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(\n    VmaAllocator allocator,\n    VmaPool pool)\n{\n    VMA_ASSERT(allocator);\n\n    if(pool == VK_NULL_HANDLE)\n    {\n        return;\n    }\n\n    VMA_DEBUG_LOG(\"vmaDestroyPool\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    allocator->DestroyPool(pool);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(\n    VmaAllocator allocator,\n    VmaPool pool,\n    VmaStatistics* pPoolStats)\n{\n    VMA_ASSERT(allocator && pool && pPoolStats);\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    allocator->GetPoolStatistics(pool, pPoolStats);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(\n    VmaAllocator allocator,\n    VmaPool pool,\n    VmaDetailedStatistics* pPoolStats)\n{\n    VMA_ASSERT(allocator && pool && pPoolStats);\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    allocator->CalculatePoolStatistics(pool, pPoolStats);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)\n{\n    VMA_ASSERT(allocator && pool);\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    VMA_DEBUG_LOG(\"vmaCheckPoolCorruption\");\n\n    return allocator->CheckPoolCorruption(pool);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(\n    VmaAllocator allocator,\n    VmaPool pool,\n    const char** ppName)\n{\n    VMA_ASSERT(allocator && pool && ppName);\n\n    VMA_DEBUG_LOG(\"vmaGetPoolName\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    *ppName = pool->GetName();\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(\n    VmaAllocator allocator,\n    VmaPool pool,\n    const char* pName)\n{\n    VMA_ASSERT(allocator && pool);\n\n    VMA_DEBUG_LOG(\"vmaSetPoolName\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    pool->SetName(pName);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(\n    VmaAllocator allocator,\n    const VkMemoryRequirements* pVkMemoryRequirements,\n    const VmaAllocationCreateInfo* pCreateInfo,\n    VmaAllocation* pAllocation,\n    VmaAllocationInfo* pAllocationInfo)\n{\n    VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);\n\n    VMA_DEBUG_LOG(\"vmaAllocateMemory\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    VkResult result = allocator->AllocateMemory(\n        *pVkMemoryRequirements,\n        false, // requiresDedicatedAllocation\n        false, // prefersDedicatedAllocation\n        VK_NULL_HANDLE, // dedicatedBuffer\n        VK_NULL_HANDLE, // dedicatedImage\n        VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage\n        *pCreateInfo,\n        VMA_SUBALLOCATION_TYPE_UNKNOWN,\n        1, // allocationCount\n        pAllocation);\n\n    if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)\n    {\n        allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);\n    }\n\n    return result;\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(\n    VmaAllocator allocator,\n    const VkMemoryRequirements* pVkMemoryRequirements,\n    const VmaAllocationCreateInfo* pCreateInfo,\n    size_t allocationCount,\n    VmaAllocation* pAllocations,\n    VmaAllocationInfo* pAllocationInfo)\n{\n    if(allocationCount == 0)\n    {\n        return VK_SUCCESS;\n    }\n\n    VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);\n\n    VMA_DEBUG_LOG(\"vmaAllocateMemoryPages\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    VkResult result = allocator->AllocateMemory(\n        *pVkMemoryRequirements,\n        false, // requiresDedicatedAllocation\n        false, // prefersDedicatedAllocation\n        VK_NULL_HANDLE, // dedicatedBuffer\n        VK_NULL_HANDLE, // dedicatedImage\n        VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage\n        *pCreateInfo,\n        VMA_SUBALLOCATION_TYPE_UNKNOWN,\n        allocationCount,\n        pAllocations);\n\n    if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)\n    {\n        for(size_t i = 0; i < allocationCount; ++i)\n        {\n            allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);\n        }\n    }\n\n    return result;\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(\n    VmaAllocator allocator,\n    VkBuffer buffer,\n    const VmaAllocationCreateInfo* pCreateInfo,\n    VmaAllocation* pAllocation,\n    VmaAllocationInfo* pAllocationInfo)\n{\n    VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);\n\n    VMA_DEBUG_LOG(\"vmaAllocateMemoryForBuffer\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    VkMemoryRequirements vkMemReq = {};\n    bool requiresDedicatedAllocation = false;\n    bool prefersDedicatedAllocation = false;\n    allocator->GetBufferMemoryRequirements(buffer, vkMemReq,\n        requiresDedicatedAllocation,\n        prefersDedicatedAllocation);\n\n    VkResult result = allocator->AllocateMemory(\n        vkMemReq,\n        requiresDedicatedAllocation,\n        prefersDedicatedAllocation,\n        buffer, // dedicatedBuffer\n        VK_NULL_HANDLE, // dedicatedImage\n        VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage\n        *pCreateInfo,\n        VMA_SUBALLOCATION_TYPE_BUFFER,\n        1, // allocationCount\n        pAllocation);\n\n    if(pAllocationInfo && result == VK_SUCCESS)\n    {\n        allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);\n    }\n\n    return result;\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(\n    VmaAllocator allocator,\n    VkImage image,\n    const VmaAllocationCreateInfo* pCreateInfo,\n    VmaAllocation* pAllocation,\n    VmaAllocationInfo* pAllocationInfo)\n{\n    VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);\n\n    VMA_DEBUG_LOG(\"vmaAllocateMemoryForImage\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    VkMemoryRequirements vkMemReq = {};\n    bool requiresDedicatedAllocation = false;\n    bool prefersDedicatedAllocation  = false;\n    allocator->GetImageMemoryRequirements(image, vkMemReq,\n        requiresDedicatedAllocation, prefersDedicatedAllocation);\n\n    VkResult result = allocator->AllocateMemory(\n        vkMemReq,\n        requiresDedicatedAllocation,\n        prefersDedicatedAllocation,\n        VK_NULL_HANDLE, // dedicatedBuffer\n        image, // dedicatedImage\n        VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage\n        *pCreateInfo,\n        VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,\n        1, // allocationCount\n        pAllocation);\n\n    if(pAllocationInfo && result == VK_SUCCESS)\n    {\n        allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);\n    }\n\n    return result;\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(\n    VmaAllocator allocator,\n    VmaAllocation allocation)\n{\n    VMA_ASSERT(allocator);\n\n    if(allocation == VK_NULL_HANDLE)\n    {\n        return;\n    }\n\n    VMA_DEBUG_LOG(\"vmaFreeMemory\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    allocator->FreeMemory(\n        1, // allocationCount\n        &allocation);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(\n    VmaAllocator allocator,\n    size_t allocationCount,\n    const VmaAllocation* pAllocations)\n{\n    if(allocationCount == 0)\n    {\n        return;\n    }\n\n    VMA_ASSERT(allocator);\n\n    VMA_DEBUG_LOG(\"vmaFreeMemoryPages\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    allocator->FreeMemory(allocationCount, pAllocations);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(\n    VmaAllocator allocator,\n    VmaAllocation allocation,\n    VmaAllocationInfo* pAllocationInfo)\n{\n    VMA_ASSERT(allocator && allocation && pAllocationInfo);\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    allocator->GetAllocationInfo(allocation, pAllocationInfo);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo2(\n    VmaAllocator allocator,\n    VmaAllocation allocation,\n    VmaAllocationInfo2* pAllocationInfo)\n{\n    VMA_ASSERT(allocator && allocation && pAllocationInfo);\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    allocator->GetAllocationInfo2(allocation, pAllocationInfo);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(\n    VmaAllocator allocator,\n    VmaAllocation allocation,\n    void* pUserData)\n{\n    VMA_ASSERT(allocator && allocation);\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    allocation->SetUserData(allocator, pUserData);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    const char* VMA_NULLABLE pName)\n{\n    allocation->SetName(allocator, pName);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VkMemoryPropertyFlags* VMA_NOT_NULL pFlags)\n{\n    VMA_ASSERT(allocator && allocation && pFlags);\n    const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();\n    *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(\n    VmaAllocator allocator,\n    VmaAllocation allocation,\n    void** ppData)\n{\n    VMA_ASSERT(allocator && allocation && ppData);\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return allocator->Map(allocation, ppData);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(\n    VmaAllocator allocator,\n    VmaAllocation allocation)\n{\n    VMA_ASSERT(allocator && allocation);\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    allocator->Unmap(allocation);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(\n    VmaAllocator allocator,\n    VmaAllocation allocation,\n    VkDeviceSize offset,\n    VkDeviceSize size)\n{\n    VMA_ASSERT(allocator && allocation);\n\n    VMA_DEBUG_LOG(\"vmaFlushAllocation\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(\n    VmaAllocator allocator,\n    VmaAllocation allocation,\n    VkDeviceSize offset,\n    VkDeviceSize size)\n{\n    VMA_ASSERT(allocator && allocation);\n\n    VMA_DEBUG_LOG(\"vmaInvalidateAllocation\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(\n    VmaAllocator allocator,\n    uint32_t allocationCount,\n    const VmaAllocation* allocations,\n    const VkDeviceSize* offsets,\n    const VkDeviceSize* sizes)\n{\n    VMA_ASSERT(allocator);\n\n    if(allocationCount == 0)\n    {\n        return VK_SUCCESS;\n    }\n\n    VMA_ASSERT(allocations);\n\n    VMA_DEBUG_LOG(\"vmaFlushAllocations\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(\n    VmaAllocator allocator,\n    uint32_t allocationCount,\n    const VmaAllocation* allocations,\n    const VkDeviceSize* offsets,\n    const VkDeviceSize* sizes)\n{\n    VMA_ASSERT(allocator);\n\n    if(allocationCount == 0)\n    {\n        return VK_SUCCESS;\n    }\n\n    VMA_ASSERT(allocations);\n\n    VMA_DEBUG_LOG(\"vmaInvalidateAllocations\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyMemoryToAllocation(\n    VmaAllocator allocator,\n    const void* pSrcHostPointer,\n    VmaAllocation dstAllocation,\n    VkDeviceSize dstAllocationLocalOffset,\n    VkDeviceSize size)\n{\n    VMA_ASSERT(allocator && pSrcHostPointer && dstAllocation);\n\n    if(size == 0)\n    {\n        return VK_SUCCESS;\n    }\n\n    VMA_DEBUG_LOG(\"vmaCopyMemoryToAllocation\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return allocator->CopyMemoryToAllocation(pSrcHostPointer, dstAllocation, dstAllocationLocalOffset, size);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyAllocationToMemory(\n    VmaAllocator allocator,\n    VmaAllocation srcAllocation,\n    VkDeviceSize srcAllocationLocalOffset,\n    void* pDstHostPointer,\n    VkDeviceSize size)\n{\n    VMA_ASSERT(allocator && srcAllocation && pDstHostPointer);\n\n    if(size == 0)\n    {\n        return VK_SUCCESS;\n    }\n\n    VMA_DEBUG_LOG(\"vmaCopyAllocationToMemory\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return allocator->CopyAllocationToMemory(srcAllocation, srcAllocationLocalOffset, pDstHostPointer, size);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(\n    VmaAllocator allocator,\n    uint32_t memoryTypeBits)\n{\n    VMA_ASSERT(allocator);\n\n    VMA_DEBUG_LOG(\"vmaCheckCorruption\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return allocator->CheckCorruption(memoryTypeBits);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(\n    VmaAllocator allocator,\n    const VmaDefragmentationInfo* pInfo,\n    VmaDefragmentationContext* pContext)\n{\n    VMA_ASSERT(allocator && pInfo && pContext);\n\n    VMA_DEBUG_LOG(\"vmaBeginDefragmentation\");\n\n    if (pInfo->pool != VMA_NULL)\n    {\n        // Check if run on supported algorithms\n        if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)\n            return VK_ERROR_FEATURE_NOT_PRESENT;\n    }\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo);\n    return VK_SUCCESS;\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(\n    VmaAllocator allocator,\n    VmaDefragmentationContext context,\n    VmaDefragmentationStats* pStats)\n{\n    VMA_ASSERT(allocator && context);\n\n    VMA_DEBUG_LOG(\"vmaEndDefragmentation\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    if (pStats)\n        context->GetStats(*pStats);\n    vma_delete(allocator, context);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaDefragmentationContext VMA_NOT_NULL context,\n    VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)\n{\n    VMA_ASSERT(context && pPassInfo);\n\n    VMA_DEBUG_LOG(\"vmaBeginDefragmentationPass\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return context->DefragmentPassBegin(*pPassInfo);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaDefragmentationContext VMA_NOT_NULL context,\n    VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)\n{\n    VMA_ASSERT(context && pPassInfo);\n\n    VMA_DEBUG_LOG(\"vmaEndDefragmentationPass\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return context->DefragmentPassEnd(*pPassInfo);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(\n    VmaAllocator allocator,\n    VmaAllocation allocation,\n    VkBuffer buffer)\n{\n    VMA_ASSERT(allocator && allocation && buffer);\n\n    VMA_DEBUG_LOG(\"vmaBindBufferMemory\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(\n    VmaAllocator allocator,\n    VmaAllocation allocation,\n    VkDeviceSize allocationLocalOffset,\n    VkBuffer buffer,\n    const void* pNext)\n{\n    VMA_ASSERT(allocator && allocation && buffer);\n\n    VMA_DEBUG_LOG(\"vmaBindBufferMemory2\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(\n    VmaAllocator allocator,\n    VmaAllocation allocation,\n    VkImage image)\n{\n    VMA_ASSERT(allocator && allocation && image);\n\n    VMA_DEBUG_LOG(\"vmaBindImageMemory\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(\n    VmaAllocator allocator,\n    VmaAllocation allocation,\n    VkDeviceSize allocationLocalOffset,\n    VkImage image,\n    const void* pNext)\n{\n    VMA_ASSERT(allocator && allocation && image);\n\n    VMA_DEBUG_LOG(\"vmaBindImageMemory2\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n        return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(\n    VmaAllocator allocator,\n    const VkBufferCreateInfo* pBufferCreateInfo,\n    const VmaAllocationCreateInfo* pAllocationCreateInfo,\n    VkBuffer* pBuffer,\n    VmaAllocation* pAllocation,\n    VmaAllocationInfo* pAllocationInfo)\n{\n    VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);\n\n    if(pBufferCreateInfo->size == 0)\n    {\n        return VK_ERROR_INITIALIZATION_FAILED;\n    }\n    if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&\n        !allocator->m_UseKhrBufferDeviceAddress)\n    {\n        VMA_ASSERT(0 && \"Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.\");\n        return VK_ERROR_INITIALIZATION_FAILED;\n    }\n\n    VMA_DEBUG_LOG(\"vmaCreateBuffer\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    *pBuffer = VK_NULL_HANDLE;\n    *pAllocation = VK_NULL_HANDLE;\n\n    // 1. Create VkBuffer.\n    VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(\n        allocator->m_hDevice,\n        pBufferCreateInfo,\n        allocator->GetAllocationCallbacks(),\n        pBuffer);\n    if(res >= 0)\n    {\n        // 2. vkGetBufferMemoryRequirements.\n        VkMemoryRequirements vkMemReq = {};\n        bool requiresDedicatedAllocation = false;\n        bool prefersDedicatedAllocation  = false;\n        allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,\n            requiresDedicatedAllocation, prefersDedicatedAllocation);\n\n        // 3. Allocate memory using allocator.\n        res = allocator->AllocateMemory(\n            vkMemReq,\n            requiresDedicatedAllocation,\n            prefersDedicatedAllocation,\n            *pBuffer, // dedicatedBuffer\n            VK_NULL_HANDLE, // dedicatedImage\n            VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), // dedicatedBufferImageUsage\n            *pAllocationCreateInfo,\n            VMA_SUBALLOCATION_TYPE_BUFFER,\n            1, // allocationCount\n            pAllocation);\n\n        if(res >= 0)\n        {\n            // 3. Bind buffer with memory.\n            if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)\n            {\n                res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);\n            }\n            if(res >= 0)\n            {\n                // All steps succeeded.\n                #if VMA_STATS_STRING_ENABLED\n                    (*pAllocation)->InitBufferUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5);\n                #endif\n                if(pAllocationInfo != VMA_NULL)\n                {\n                    allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);\n                }\n\n                return VK_SUCCESS;\n            }\n            allocator->FreeMemory(\n                1, // allocationCount\n                pAllocation);\n            *pAllocation = VK_NULL_HANDLE;\n            (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());\n            *pBuffer = VK_NULL_HANDLE;\n            return res;\n        }\n        (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());\n        *pBuffer = VK_NULL_HANDLE;\n        return res;\n    }\n    return res;\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(\n    VmaAllocator allocator,\n    const VkBufferCreateInfo* pBufferCreateInfo,\n    const VmaAllocationCreateInfo* pAllocationCreateInfo,\n    VkDeviceSize minAlignment,\n    VkBuffer* pBuffer,\n    VmaAllocation* pAllocation,\n    VmaAllocationInfo* pAllocationInfo)\n{\n    VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation);\n\n    if(pBufferCreateInfo->size == 0)\n    {\n        return VK_ERROR_INITIALIZATION_FAILED;\n    }\n    if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&\n        !allocator->m_UseKhrBufferDeviceAddress)\n    {\n        VMA_ASSERT(0 && \"Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.\");\n        return VK_ERROR_INITIALIZATION_FAILED;\n    }\n\n    VMA_DEBUG_LOG(\"vmaCreateBufferWithAlignment\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    *pBuffer = VK_NULL_HANDLE;\n    *pAllocation = VK_NULL_HANDLE;\n\n    // 1. Create VkBuffer.\n    VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(\n        allocator->m_hDevice,\n        pBufferCreateInfo,\n        allocator->GetAllocationCallbacks(),\n        pBuffer);\n    if(res >= 0)\n    {\n        // 2. vkGetBufferMemoryRequirements.\n        VkMemoryRequirements vkMemReq = {};\n        bool requiresDedicatedAllocation = false;\n        bool prefersDedicatedAllocation  = false;\n        allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,\n            requiresDedicatedAllocation, prefersDedicatedAllocation);\n\n        // 2a. Include minAlignment\n        vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment);\n\n        // 3. Allocate memory using allocator.\n        res = allocator->AllocateMemory(\n            vkMemReq,\n            requiresDedicatedAllocation,\n            prefersDedicatedAllocation,\n            *pBuffer, // dedicatedBuffer\n            VK_NULL_HANDLE, // dedicatedImage\n            VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), // dedicatedBufferImageUsage\n            *pAllocationCreateInfo,\n            VMA_SUBALLOCATION_TYPE_BUFFER,\n            1, // allocationCount\n            pAllocation);\n\n        if(res >= 0)\n        {\n            // 3. Bind buffer with memory.\n            if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)\n            {\n                res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);\n            }\n            if(res >= 0)\n            {\n                // All steps succeeded.\n                #if VMA_STATS_STRING_ENABLED\n                    (*pAllocation)->InitBufferUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5);\n                #endif\n                if(pAllocationInfo != VMA_NULL)\n                {\n                    allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);\n                }\n\n                return VK_SUCCESS;\n            }\n            allocator->FreeMemory(\n                1, // allocationCount\n                pAllocation);\n            *pAllocation = VK_NULL_HANDLE;\n            (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());\n            *pBuffer = VK_NULL_HANDLE;\n            return res;\n        }\n        (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());\n        *pBuffer = VK_NULL_HANDLE;\n        return res;\n    }\n    return res;\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,\n    VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)\n{\n    return vmaCreateAliasingBuffer2(allocator, allocation, 0, pBufferCreateInfo, pBuffer);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VkDeviceSize allocationLocalOffset,\n    const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,\n    VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)\n{\n    VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation);\n    VMA_ASSERT(allocationLocalOffset + pBufferCreateInfo->size <= allocation->GetSize());\n\n    VMA_DEBUG_LOG(\"vmaCreateAliasingBuffer2\");\n\n    *pBuffer = VK_NULL_HANDLE;\n\n    if (pBufferCreateInfo->size == 0)\n    {\n        return VK_ERROR_INITIALIZATION_FAILED;\n    }\n    if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&\n        !allocator->m_UseKhrBufferDeviceAddress)\n    {\n        VMA_ASSERT(0 && \"Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.\");\n        return VK_ERROR_INITIALIZATION_FAILED;\n    }\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    // 1. Create VkBuffer.\n    VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(\n        allocator->m_hDevice,\n        pBufferCreateInfo,\n        allocator->GetAllocationCallbacks(),\n        pBuffer);\n    if (res >= 0)\n    {\n        // 2. Bind buffer with memory.\n        res = allocator->BindBufferMemory(allocation, allocationLocalOffset, *pBuffer, VMA_NULL);\n        if (res >= 0)\n        {\n            return VK_SUCCESS;\n        }\n        (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());\n    }\n    return res;\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(\n    VmaAllocator allocator,\n    VkBuffer buffer,\n    VmaAllocation allocation)\n{\n    VMA_ASSERT(allocator);\n\n    if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)\n    {\n        return;\n    }\n\n    VMA_DEBUG_LOG(\"vmaDestroyBuffer\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    if(buffer != VK_NULL_HANDLE)\n    {\n        (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());\n    }\n\n    if(allocation != VK_NULL_HANDLE)\n    {\n        allocator->FreeMemory(\n            1, // allocationCount\n            &allocation);\n    }\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(\n    VmaAllocator allocator,\n    const VkImageCreateInfo* pImageCreateInfo,\n    const VmaAllocationCreateInfo* pAllocationCreateInfo,\n    VkImage* pImage,\n    VmaAllocation* pAllocation,\n    VmaAllocationInfo* pAllocationInfo)\n{\n    VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);\n\n    if(pImageCreateInfo->extent.width == 0 ||\n        pImageCreateInfo->extent.height == 0 ||\n        pImageCreateInfo->extent.depth == 0 ||\n        pImageCreateInfo->mipLevels == 0 ||\n        pImageCreateInfo->arrayLayers == 0)\n    {\n        return VK_ERROR_INITIALIZATION_FAILED;\n    }\n\n    VMA_DEBUG_LOG(\"vmaCreateImage\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    *pImage = VK_NULL_HANDLE;\n    *pAllocation = VK_NULL_HANDLE;\n\n    // 1. Create VkImage.\n    VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(\n        allocator->m_hDevice,\n        pImageCreateInfo,\n        allocator->GetAllocationCallbacks(),\n        pImage);\n    if(res == VK_SUCCESS)\n    {\n        VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?\n            VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :\n            VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;\n\n        // 2. Allocate memory using allocator.\n        VkMemoryRequirements vkMemReq = {};\n        bool requiresDedicatedAllocation = false;\n        bool prefersDedicatedAllocation  = false;\n        allocator->GetImageMemoryRequirements(*pImage, vkMemReq,\n            requiresDedicatedAllocation, prefersDedicatedAllocation);\n\n        res = allocator->AllocateMemory(\n            vkMemReq,\n            requiresDedicatedAllocation,\n            prefersDedicatedAllocation,\n            VK_NULL_HANDLE, // dedicatedBuffer\n            *pImage, // dedicatedImage\n            VmaBufferImageUsage(*pImageCreateInfo), // dedicatedBufferImageUsage\n            *pAllocationCreateInfo,\n            suballocType,\n            1, // allocationCount\n            pAllocation);\n\n        if(res == VK_SUCCESS)\n        {\n            // 3. Bind image with memory.\n            if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)\n            {\n                res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);\n            }\n            if(res == VK_SUCCESS)\n            {\n                // All steps succeeded.\n                #if VMA_STATS_STRING_ENABLED\n                    (*pAllocation)->InitImageUsage(*pImageCreateInfo);\n                #endif\n                if(pAllocationInfo != VMA_NULL)\n                {\n                    allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);\n                }\n\n                return VK_SUCCESS;\n            }\n            allocator->FreeMemory(\n                1, // allocationCount\n                pAllocation);\n            *pAllocation = VK_NULL_HANDLE;\n            (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());\n            *pImage = VK_NULL_HANDLE;\n            return res;\n        }\n        (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());\n        *pImage = VK_NULL_HANDLE;\n        return res;\n    }\n    return res;\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,\n    VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)\n{\n    return vmaCreateAliasingImage2(allocator, allocation, 0, pImageCreateInfo, pImage);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation,\n    VkDeviceSize allocationLocalOffset,\n    const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,\n    VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)\n{\n    VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation);\n\n    *pImage = VK_NULL_HANDLE;\n\n    VMA_DEBUG_LOG(\"vmaCreateImage2\");\n\n    if (pImageCreateInfo->extent.width == 0 ||\n        pImageCreateInfo->extent.height == 0 ||\n        pImageCreateInfo->extent.depth == 0 ||\n        pImageCreateInfo->mipLevels == 0 ||\n        pImageCreateInfo->arrayLayers == 0)\n    {\n        return VK_ERROR_INITIALIZATION_FAILED;\n    }\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    // 1. Create VkImage.\n    VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(\n        allocator->m_hDevice,\n        pImageCreateInfo,\n        allocator->GetAllocationCallbacks(),\n        pImage);\n    if (res >= 0)\n    {\n        // 2. Bind image with memory.\n        res = allocator->BindImageMemory(allocation, allocationLocalOffset, *pImage, VMA_NULL);\n        if (res >= 0)\n        {\n            return VK_SUCCESS;\n        }\n        (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());\n    }\n    return res;\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(\n    VmaAllocator VMA_NOT_NULL allocator,\n    VkImage VMA_NULLABLE_NON_DISPATCHABLE image,\n    VmaAllocation VMA_NULLABLE allocation)\n{\n    VMA_ASSERT(allocator);\n\n    if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)\n    {\n        return;\n    }\n\n    VMA_DEBUG_LOG(\"vmaDestroyImage\");\n\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK\n\n    if(image != VK_NULL_HANDLE)\n    {\n        (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());\n    }\n    if(allocation != VK_NULL_HANDLE)\n    {\n        allocator->FreeMemory(\n            1, // allocationCount\n            &allocation);\n    }\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(\n    const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,\n    VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock)\n{\n    VMA_ASSERT(pCreateInfo && pVirtualBlock);\n    VMA_ASSERT(pCreateInfo->size > 0);\n    VMA_DEBUG_LOG(\"vmaCreateVirtualBlock\");\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK;\n    *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo);\n    VkResult res = (*pVirtualBlock)->Init();\n    if(res < 0)\n    {\n        vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock);\n        *pVirtualBlock = VK_NULL_HANDLE;\n    }\n    return res;\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock)\n{\n    if(virtualBlock != VK_NULL_HANDLE)\n    {\n        VMA_DEBUG_LOG(\"vmaDestroyVirtualBlock\");\n        VMA_DEBUG_GLOBAL_MUTEX_LOCK;\n        VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying.\n        vma_delete(&allocationCallbacks, virtualBlock);\n    }\n}\n\nVMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock)\n{\n    VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);\n    VMA_DEBUG_LOG(\"vmaIsVirtualBlockEmpty\");\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK;\n    return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE;\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo)\n{\n    VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL);\n    VMA_DEBUG_LOG(\"vmaGetVirtualAllocationInfo\");\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK;\n    virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo);\n}\n\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,\n    VkDeviceSize* VMA_NULLABLE pOffset)\n{\n    VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL);\n    VMA_DEBUG_LOG(\"vmaVirtualAllocate\");\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK;\n    return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation)\n{\n    if(allocation != VK_NULL_HANDLE)\n    {\n        VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);\n        VMA_DEBUG_LOG(\"vmaVirtualFree\");\n        VMA_DEBUG_GLOBAL_MUTEX_LOCK;\n        virtualBlock->Free(allocation);\n    }\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock)\n{\n    VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);\n    VMA_DEBUG_LOG(\"vmaClearVirtualBlock\");\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK;\n    virtualBlock->Clear();\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData)\n{\n    VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);\n    VMA_DEBUG_LOG(\"vmaSetVirtualAllocationUserData\");\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK;\n    virtualBlock->SetAllocationUserData(allocation, pUserData);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    VmaStatistics* VMA_NOT_NULL pStats)\n{\n    VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);\n    VMA_DEBUG_LOG(\"vmaGetVirtualBlockStatistics\");\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK;\n    virtualBlock->GetStatistics(*pStats);\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    VmaDetailedStatistics* VMA_NOT_NULL pStats)\n{\n    VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);\n    VMA_DEBUG_LOG(\"vmaCalculateVirtualBlockStatistics\");\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK;\n    virtualBlock->CalculateDetailedStatistics(*pStats);\n}\n\n#if VMA_STATS_STRING_ENABLED\n\nVMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap)\n{\n    VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL);\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK;\n    const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks();\n    VmaStringBuilder sb(allocationCallbacks);\n    virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb);\n    *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength());\n}\n\nVMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,\n    char* VMA_NULLABLE pStatsString)\n{\n    if(pStatsString != VMA_NULL)\n    {\n        VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);\n        VMA_DEBUG_GLOBAL_MUTEX_LOCK;\n        VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString);\n    }\n}\n#if VMA_EXTERNAL_MEMORY_WIN32\nVMA_CALL_PRE VkResult VMA_CALL_POST vmaGetMemoryWin32Handle(VmaAllocator VMA_NOT_NULL allocator,\n    VmaAllocation VMA_NOT_NULL allocation, HANDLE hTargetProcess, HANDLE* VMA_NOT_NULL pHandle)\n{\n    VMA_ASSERT(allocator && allocation && pHandle);\n    VMA_DEBUG_GLOBAL_MUTEX_LOCK;\n    return allocation->GetWin32Handle(allocator, hTargetProcess, pHandle);\n}\n#endif // VMA_EXTERNAL_MEMORY_WIN32 \n#endif // VMA_STATS_STRING_ENABLED\n#endif // _VMA_PUBLIC_INTERFACE\n#endif // VMA_IMPLEMENTATION\n\n/**\n\\page quick_start Quick start\n\n\\section quick_start_project_setup Project setup\n\nVulkan Memory Allocator comes in form of a \"stb-style\" single header file.\nWhile you can pull the entire repository e.g. as Git module, there is also Cmake script provided,\nyou don't need to build it as a separate library project.\nYou can add file \"vk_mem_alloc.h\" directly to your project and submit it to code repository next to your other source files.\n\n\"Single header\" doesn't mean that everything is contained in C/C++ declarations,\nlike it tends to be in case of inline functions or C++ templates.\nIt means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro.\nIf you don't do it properly, it will result in linker errors.\n\nTo do it properly:\n\n-# Include \"vk_mem_alloc.h\" file in each CPP file where you want to use the library.\n   This includes declarations of all members of the library.\n-# In exactly one CPP file define following macro before this include.\n   It enables also internal definitions.\n\n\\code\n#define VMA_IMPLEMENTATION\n#include \"vk_mem_alloc.h\"\n\\endcode\n\nIt may be a good idea to create dedicated CPP file just for this purpose, e.g. \"VmaUsage.cpp\".\n\nThis library includes header `<vulkan/vulkan.h>`, which in turn\nincludes `<windows.h>` on Windows. If you need some specific macros defined\nbefore including these headers (like `WIN32_LEAN_AND_MEAN` or\n`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define\nthem before every `#include` of this library.\nIt may be a good idea to create a dedicate header file for this purpose, e.g. \"VmaUsage.h\",\nthat will be included in other source files instead of VMA header directly.\n\nThis library is written in C++, but has C-compatible interface.\nThus, you can include and use \"vk_mem_alloc.h\" in C or C++ code, but full\nimplementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C.\nSome features of C++14 are used and required. Features of C++20 are used optionally when available.\nSome headers of standard C and C++ library are used, but STL containers, RTTI, or C++ exceptions are not used.\n\n\n\\section quick_start_initialization Initialization\n\nVMA offers library interface in a style similar to Vulkan, with object handles like #VmaAllocation,\nstructures describing parameters of objects to be created like #VmaAllocationCreateInfo,\nand errors codes returned from functions using `VkResult` type.\n\nThe first and the main object that needs to be created is #VmaAllocator.\nIt represents the initialization of the entire library.\nOnly one such object should be created per `VkDevice`.\nYou should create it at program startup, after `VkDevice` was created, and before any device memory allocator needs to be made.\nIt must be destroyed before `VkDevice` is destroyed.\n\nAt program startup:\n\n-# Initialize Vulkan to have `VkInstance`, `VkPhysicalDevice`, `VkDevice` object.\n-# Fill VmaAllocatorCreateInfo structure and call vmaCreateAllocator() to create #VmaAllocator object.\n\nOnly members `physicalDevice`, `device`, `instance` are required.\nHowever, you should inform the library which Vulkan version do you use by setting\nVmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable\nby setting VmaAllocatorCreateInfo::flags.\nOtherwise, VMA would use only features of Vulkan 1.0 core with no extensions.\nSee below for details.\n\n\\subsection quick_start_initialization_selecting_vulkan_version Selecting Vulkan version\n\nVMA supports Vulkan version down to 1.0, for backward compatibility.\nIf you want to use higher version, you need to inform the library about it.\nThis is a two-step process.\n\n<b>Step 1: Compile time.</b> By default, VMA compiles with code supporting the highest\nVulkan version found in the included `<vulkan/vulkan.h>` that is also supported by the library.\nIf this is OK, you don't need to do anything.\nHowever, if you want to compile VMA as if only some lower Vulkan version was available,\ndefine macro `VMA_VULKAN_VERSION` before every `#include \"vk_mem_alloc.h\"`.\nIt should have decimal numeric value in form of ABBBCCC, where A = major, BBB = minor, CCC = patch Vulkan version.\nFor example, to compile against Vulkan 1.2:\n\n\\code\n#define VMA_VULKAN_VERSION 1002000 // Vulkan 1.2\n#include \"vk_mem_alloc.h\"\n\\endcode\n\n<b>Step 2: Runtime.</b> Even when compiled with higher Vulkan version available,\nVMA can use only features of a lower version, which is configurable during creation of the #VmaAllocator object.\nBy default, only Vulkan 1.0 is used.\nTo initialize the allocator with support for higher Vulkan version, you need to set member\nVmaAllocatorCreateInfo::vulkanApiVersion to an appropriate value, e.g. using constants like `VK_API_VERSION_1_2`.\nSee code sample below.\n\n\\subsection quick_start_initialization_importing_vulkan_functions Importing Vulkan functions\n\nYou may need to configure importing Vulkan functions. There are 3 ways to do this:\n\n-# **If you link with Vulkan static library** (e.g. \"vulkan-1.lib\" on Windows):\n   - You don't need to do anything.\n   - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default.\n-# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`,\n   `vkGetDeviceProcAddr` (this is the option presented in the example below):\n   - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1.\n   - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr,\n     VmaVulkanFunctions::vkGetDeviceProcAddr.\n   - The library will fetch pointers to all other functions it needs internally.\n-# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like\n   [Volk](https://github.com/zeux/volk):\n   - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0.\n   - Pass these pointers via structure #VmaVulkanFunctions.\n\n\\subsection quick_start_initialization_enabling_extensions Enabling extensions\n\nVMA can automatically use following Vulkan extensions.\nIf you found them available on the selected physical device and you enabled them\nwhile creating `VkInstance` / `VkDevice` object, inform VMA about their availability\nby setting appropriate flags in VmaAllocatorCreateInfo::flags.\n\nVulkan extension              | VMA flag\n------------------------------|-----------------------------------------------------\nVK_KHR_dedicated_allocation   | #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT\nVK_KHR_bind_memory2           | #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT\nVK_KHR_maintenance4           | #VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT\nVK_KHR_maintenance5           | #VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT\nVK_EXT_memory_budget          | #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT\nVK_KHR_buffer_device_address  | #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT\nVK_EXT_memory_priority        | #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT\nVK_AMD_device_coherent_memory | #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT\nVK_KHR_external_memory_win32  | #VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT\n\nExample with fetching pointers to Vulkan functions dynamically:\n\n\\code\n#define VMA_STATIC_VULKAN_FUNCTIONS 0\n#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1\n#include \"vk_mem_alloc.h\"\n\n...\n\nVmaVulkanFunctions vulkanFunctions = {};\nvulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr;\nvulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr;\n\nVmaAllocatorCreateInfo allocatorCreateInfo = {};\nallocatorCreateInfo.flags = VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT;\nallocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2;\nallocatorCreateInfo.physicalDevice = physicalDevice;\nallocatorCreateInfo.device = device;\nallocatorCreateInfo.instance = instance;\nallocatorCreateInfo.pVulkanFunctions = &vulkanFunctions;\n\nVmaAllocator allocator;\nvmaCreateAllocator(&allocatorCreateInfo, &allocator);\n\n// Entire program...\n\n// At the end, don't forget to:\nvmaDestroyAllocator(allocator);\n\\endcode\n\n\n\\subsection quick_start_initialization_other_config Other configuration options\n\nThere are additional configuration options available through preprocessor macros that you can define\nbefore including VMA header and through parameters passed in #VmaAllocatorCreateInfo.\nThey include a possibility to use your own callbacks for host memory allocations (`VkAllocationCallbacks`),\ncallbacks for device memory allocations (instead of `vkAllocateMemory`, `vkFreeMemory`),\nor your custom `VMA_ASSERT` macro, among others.\nFor more information, see: @ref configuration.\n\n\n\\section quick_start_resource_allocation Resource allocation\n\nWhen you want to create a buffer or image:\n\n-# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure.\n-# Fill VmaAllocationCreateInfo structure.\n-# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory\n   already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory.\n\n\\code\nVkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nbufferInfo.size = 65536;\nbufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;\n\nVmaAllocationCreateInfo allocInfo = {};\nallocInfo.usage = VMA_MEMORY_USAGE_AUTO;\n\nVkBuffer buffer;\nVmaAllocation allocation;\nvmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);\n\\endcode\n\nDon't forget to destroy your buffer and allocation objects when no longer needed:\n\n\\code\nvmaDestroyBuffer(allocator, buffer, allocation);\n\\endcode\n\nIf you need to map the buffer, you must set flag\n#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT\nin VmaAllocationCreateInfo::flags.\nThere are many additional parameters that can control the choice of memory type to be used for the allocation\nand other features.\nFor more information, see documentation chapters: @ref choosing_memory_type, @ref memory_mapping.\n\n\n\\page choosing_memory_type Choosing memory type\n\nPhysical devices in Vulkan support various combinations of memory heaps and\ntypes. Help with choosing correct and optimal memory type for your specific\nresource is one of the key features of this library. You can use it by filling\nappropriate members of VmaAllocationCreateInfo structure, as described below.\nYou can also combine multiple methods.\n\n-# If you just want to find memory type index that meets your requirements, you\n   can use function: vmaFindMemoryTypeIndexForBufferInfo(),\n   vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex().\n-# If you want to allocate a region of device memory without association with any\n   specific image or buffer, you can use function vmaAllocateMemory(). Usage of\n   this function is not recommended and usually not needed.\n   vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once,\n   which may be useful for sparse binding.\n-# If you already have a buffer or an image created, you want to allocate memory\n   for it and then you will bind it yourself, you can use function\n   vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage().\n   For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory()\n   or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2().\n-# If you want to create a buffer or an image, allocate memory for it, and bind\n   them together, all in one call, you can use function vmaCreateBuffer(),\n   vmaCreateImage().\n   <b>This is the easiest and recommended way to use this library!</b>\n\nWhen using 3. or 4., the library internally queries Vulkan for memory types\nsupported for that buffer or image (function `vkGetBufferMemoryRequirements()`)\nand uses only one of these types.\n\nIf no memory type can be found that meets all the requirements, these functions\nreturn `VK_ERROR_FEATURE_NOT_PRESENT`.\n\nYou can leave VmaAllocationCreateInfo structure completely filled with zeros.\nIt means no requirements are specified for memory type.\nIt is valid, although not very useful.\n\n\\section choosing_memory_type_usage Usage\n\nThe easiest way to specify memory requirements is to fill member\nVmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage.\nIt defines high level, common usage types.\nSince version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically.\n\nFor example, if you want to create a uniform buffer that will be filled using\ntransfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can\ndo it using following code. The buffer will most likely end up in a memory type with\n`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device.\n\n\\code\nVkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nbufferInfo.size = 65536;\nbufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;\n\nVmaAllocationCreateInfo allocInfo = {};\nallocInfo.usage = VMA_MEMORY_USAGE_AUTO;\n\nVkBuffer buffer;\nVmaAllocation allocation;\nvmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);\n\\endcode\n\nIf you have a preference for putting the resource in GPU (device) memory or CPU (host) memory\non systems with discrete graphics card that have the memories separate, you can use\n#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST.\n\nWhen using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory,\nyou also need to specify one of the host access flags:\n#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.\nThis will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`\nso you can map it.\n\nFor example, a staging buffer that will be filled via mapped pointer and then\nused as a source of transfer to the buffer described previously can be created like this.\nIt will likely end up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT`\nbut not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM).\n\n\\code\nVkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nstagingBufferInfo.size = 65536;\nstagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;\n\nVmaAllocationCreateInfo stagingAllocInfo = {};\nstagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO;\nstagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;\n\nVkBuffer stagingBuffer;\nVmaAllocation stagingAllocation;\nvmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr);\n\\endcode\n\nFor more examples of creating different kinds of resources, see chapter \\ref usage_patterns.\nSee also: @ref memory_mapping.\n\nUsage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows\nabout the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed,\nso they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc.\nIf you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting\nmemory type, as described below.\n\n\\note\nOld usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`,\n`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`)\nare still available and work same way as in previous versions of the library\nfor backward compatibility, but they are deprecated.\n\n\\section choosing_memory_type_required_preferred_flags Required and preferred flags\n\nYou can specify more detailed requirements by filling members\nVmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags\nwith a combination of bits from enum `VkMemoryPropertyFlags`. For example,\nif you want to create a buffer that will be persistently mapped on host (so it\nmust be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`,\nuse following code:\n\n\\code\nVmaAllocationCreateInfo allocInfo = {};\nallocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;\nallocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;\nallocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;\n\nVkBuffer buffer;\nVmaAllocation allocation;\nvmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);\n\\endcode\n\nA memory type is chosen that has all the required flags and as many preferred\nflags set as possible.\n\nValue passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags,\nplus some extra \"magic\" (heuristics).\n\n\\section choosing_memory_type_explicit_memory_types Explicit memory types\n\nIf you inspected memory types available on the physical device and <b>you have\na preference for memory types that you want to use</b>, you can fill member\nVmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set\nmeans that a memory type with that index is allowed to be used for the\nallocation. Special value 0, just like `UINT32_MAX`, means there are no\nrestrictions to memory type index.\n\nPlease note that this member is NOT just a memory type index.\nStill you can use it to choose just one, specific memory type.\nFor example, if you already determined that your buffer should be created in\nmemory type 2, use following code:\n\n\\code\nuint32_t memoryTypeIndex = 2;\n\nVmaAllocationCreateInfo allocInfo = {};\nallocInfo.memoryTypeBits = 1u << memoryTypeIndex;\n\nVkBuffer buffer;\nVmaAllocation allocation;\nvmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);\n\\endcode\n\nYou can also use this parameter to <b>exclude some memory types</b>.\nIf you inspect memory heaps and types available on the current physical device and\nyou determine that for some reason you don't want to use a specific memory type for the allocation,\nyou can enable automatic memory type selection but exclude certain memory type or types\nby setting all bits of `memoryTypeBits` to 1 except the ones you choose.\n\n\\code\n// ...\nuint32_t excludedMemoryTypeIndex = 2;\nVmaAllocationCreateInfo allocInfo = {};\nallocInfo.usage = VMA_MEMORY_USAGE_AUTO;\nallocInfo.memoryTypeBits = ~(1u << excludedMemoryTypeIndex);\n// ...\n\\endcode\n\n\n\\section choosing_memory_type_custom_memory_pools Custom memory pools\n\nIf you allocate from custom memory pool, all the ways of specifying memory\nrequirements described above are not applicable and the aforementioned members\nof VmaAllocationCreateInfo structure are ignored. Memory type is selected\nexplicitly when creating the pool and then used to make all the allocations from\nthat pool. For further details, see \\ref custom_memory_pools.\n\n\\section choosing_memory_type_dedicated_allocations Dedicated allocations\n\nMemory for allocations is reserved out of larger block of `VkDeviceMemory`\nallocated from Vulkan internally. That is the main feature of this whole library.\nYou can still request a separate memory block to be created for an allocation,\njust like you would do in a trivial solution without using any allocator.\nIn that case, a buffer or image is always bound to that memory at offset 0.\nThis is called a \"dedicated allocation\".\nYou can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.\nThe library can also internally decide to use dedicated allocation in some cases, e.g.:\n\n- When the size of the allocation is large.\n- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled\n  and it reports that dedicated allocation is required or recommended for the resource.\n- When allocation of next big memory block fails due to not enough device memory,\n  but allocation with the exact requested size succeeds.\n\n\n\\page memory_mapping Memory mapping\n\nTo \"map memory\" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`,\nto be able to read from it or write to it in CPU code.\nMapping is possible only of memory allocated from a memory type that has\n`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.\nFunctions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose.\nYou can use them directly with memory allocated by this library,\nbut it is not recommended because of following issue:\nMapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed.\nThis includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan.\nIt is also not thread-safe.\nBecause of this, Vulkan Memory Allocator provides following facilities:\n\n\\note If you want to be able to map an allocation, you need to specify one of the flags\n#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT\nin VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable\nwhen using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values.\nFor other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable,\nbut these flags can still be used for consistency.\n\n\\section memory_mapping_copy_functions Copy functions\n\nThe easiest way to copy data from a host pointer to an allocation is to use convenience function vmaCopyMemoryToAllocation().\nIt automatically maps the Vulkan memory temporarily (if not already mapped), performs `memcpy`,\nand calls `vkFlushMappedMemoryRanges` (if required - if memory type is not `HOST_COHERENT`).\n\nIt is also the safest one, because using `memcpy` avoids a risk of accidentally introducing memory reads\n(e.g. by doing `pMappedVectors[i] += v`), which may be very slow on memory types that are not `HOST_CACHED`.\n\n\\code\nstruct ConstantBuffer\n{\n    ...\n};\nConstantBuffer constantBufferData = ...\n\nVkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nbufCreateInfo.size = sizeof(ConstantBuffer);\nbufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;\n\nVmaAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;\nallocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;\n\nVkBuffer buf;\nVmaAllocation alloc;\nvmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr);\n\nvmaCopyMemoryToAllocation(allocator, &constantBufferData, alloc, 0, sizeof(ConstantBuffer));\n\\endcode\n\nCopy in the other direction - from an allocation to a host pointer can be performed the same way using function vmaCopyAllocationToMemory().\n\n\\section memory_mapping_mapping_functions Mapping functions\n\nThe library provides following functions for mapping of a specific allocation: vmaMapMemory(), vmaUnmapMemory().\nThey are safer and more convenient to use than standard Vulkan functions.\nYou can map an allocation multiple times simultaneously - mapping is reference-counted internally.\nYou can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block.\nThe way it is implemented is that the library always maps entire memory block, not just region of the allocation.\nFor further details, see description of vmaMapMemory() function.\nExample:\n\n\\code\n// Having these objects initialized:\nstruct ConstantBuffer\n{\n    ...\n};\nConstantBuffer constantBufferData = ...\n\nVmaAllocator allocator = ...\nVkBuffer constantBuffer = ...\nVmaAllocation constantBufferAllocation = ...\n\n// You can map and fill your buffer using following code:\n\nvoid* mappedData;\nvmaMapMemory(allocator, constantBufferAllocation, &mappedData);\nmemcpy(mappedData, &constantBufferData, sizeof(constantBufferData));\nvmaUnmapMemory(allocator, constantBufferAllocation);\n\\endcode\n\nWhen mapping, you may see a warning from Vulkan validation layer similar to this one:\n\n<i>Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.</i>\n\nIt happens because the library maps entire `VkDeviceMemory` block, where different\ntypes of images and buffers may end up together, especially on GPUs with unified memory like Intel.\nYou can safely ignore it if you are sure you access only memory of the intended\nobject that you wanted to map.\n\n\n\\section memory_mapping_persistently_mapped_memory Persistently mapped memory\n\nKeeping your memory persistently mapped is generally OK in Vulkan.\nYou don't need to unmap it before using its data on the GPU.\nThe library provides a special feature designed for that:\nAllocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in\nVmaAllocationCreateInfo::flags stay mapped all the time,\nso you can just access CPU pointer to it any time\nwithout a need to call any \"map\" or \"unmap\" function.\nExample:\n\n\\code\nVkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nbufCreateInfo.size = sizeof(ConstantBuffer);\nbufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;\n\nVmaAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;\nallocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |\n    VMA_ALLOCATION_CREATE_MAPPED_BIT;\n\nVkBuffer buf;\nVmaAllocation alloc;\nVmaAllocationInfo allocInfo;\nvmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);\n\n// Buffer is already mapped. You can access its memory.\nmemcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));\n\\endcode\n\n\\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up\nin a mappable memory type.\nFor this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or\n#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.\n#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation.\nFor an example of how to make use of this fact, see section \\ref usage_patterns_advanced_data_uploading.\n\n\\section memory_mapping_cache_control Cache flush and invalidate\n\nMemory in Vulkan doesn't need to be unmapped before using it on GPU,\nbut unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set,\nyou need to manually **invalidate** cache before reading of mapped pointer\nand **flush** cache after writing to mapped pointer.\nMap/unmap operations don't do that automatically.\nVulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`,\n`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient\nfunctions that refer to given allocation object: vmaFlushAllocation(),\nvmaInvalidateAllocation(),\nor multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations().\n\nRegions of memory specified for flush/invalidate must be aligned to\n`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library.\nIn any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations\nwithin blocks are aligned to this value, so their offsets are always multiply of\n`nonCoherentAtomSize` and two different allocations never share same \"line\" of this size.\n\nAlso, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA)\ncurrently provide `HOST_COHERENT` flag on all memory types that are\n`HOST_VISIBLE`, so on PC you may not need to bother.\n\n\n\\page staying_within_budget Staying within budget\n\nWhen developing a graphics-intensive game or program, it is important to avoid allocating\nmore GPU memory than it is physically available. When the memory is over-committed,\nvarious bad things can happen, depending on the specific GPU, graphics driver, and\noperating system:\n\n- It may just work without any problems.\n- The application may slow down because some memory blocks are moved to system RAM\n  and the GPU has to access them through PCI Express bus.\n- A new allocation may take very long time to complete, even few seconds, and possibly\n  freeze entire system.\n- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.\n- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST`\n  returned somewhere later.\n\n\\section staying_within_budget_querying_for_budget Querying for budget\n\nTo query for current memory usage and available budget, use function vmaGetHeapBudgets().\nReturned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap.\n\nPlease note that this function returns different information and works faster than\nvmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every\nallocation, while vmaCalculateStatistics() is intended to be used rarely,\nonly to obtain statistical information, e.g. for debugging purposes.\n\nIt is recommended to use <b>VK_EXT_memory_budget</b> device extension to obtain information\nabout the budget from Vulkan device. VMA is able to use this extension automatically.\nWhen not enabled, the allocator behaves same way, but then it estimates current usage\nand available budget based on its internal information and Vulkan memory heap sizes,\nwhich may be less precise. In order to use this extension:\n\n1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2\n   required by it are available and enable them. Please note that the first is a device\n   extension and the second is instance extension!\n2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object.\n3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from\n   Vulkan inside of it to avoid overhead of querying it with every allocation.\n\n\\section staying_within_budget_controlling_memory_usage Controlling memory usage\n\nThere are many ways in which you can try to stay within the budget.\n\nFirst, when making new allocation requires allocating a new memory block, the library\ntries not to exceed the budget automatically. If a block with default recommended size\n(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even\ndedicated memory for just this resource.\n\nIf the size of the requested resource plus current memory usage is more than the\nbudget, by default the library still tries to create it, leaving it to the Vulkan\nimplementation whether the allocation succeeds or fails. You can change this behavior\nby using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is\nnot made if it would exceed the budget or if the budget is already exceeded.\nVMA then tries to make the allocation from the next eligible Vulkan memory type.\nIf all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.\nExample usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag\nwhen creating resources that are not essential for the application (e.g. the texture\nof a specific object) and not to pass it when creating critically important resources\n(e.g. render targets).\n\nOn AMD graphics cards there is a custom vendor extension available: <b>VK_AMD_memory_overallocation_behavior</b>\nthat allows to control the behavior of the Vulkan implementation in out-of-memory cases -\nwhether it should fail with an error code or still allow the allocation.\nUsage of this extension involves only passing extra structure on Vulkan device creation,\nso it is out of scope of this library.\n\nFinally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure\na new allocation is created only when it fits inside one of the existing memory blocks.\nIf it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.\nThis also ensures that the function call is very fast because it never goes to Vulkan\nto obtain a new block.\n\n\\note Creating \\ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount\nset to more than 0 will currently try to allocate memory blocks without checking whether they\nfit within budget.\n\n\n\\page resource_aliasing Resource aliasing (overlap)\n\nNew explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory\nmanagement, give an opportunity to alias (overlap) multiple resources in the\nsame region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL).\nIt can be useful to save video memory, but it must be used with caution.\n\nFor example, if you know the flow of your whole render frame in advance, you\nare going to use some intermediate textures or buffers only during a small range of render passes,\nand you know these ranges don't overlap in time, you can bind these resources to\nthe same place in memory, even if they have completely different parameters (width, height, format etc.).\n\n![Resource aliasing (overlap)](../gfx/Aliasing.png)\n\nSuch scenario is possible using VMA, but you need to create your images manually.\nThen you need to calculate parameters of an allocation to be made using formula:\n\n- allocation size = max(size of each image)\n- allocation alignment = max(alignment of each image)\n- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image)\n\nFollowing example shows two different images bound to the same place in memory,\nallocated to fit largest of them.\n\n\\code\n// A 512x512 texture to be sampled.\nVkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };\nimg1CreateInfo.imageType = VK_IMAGE_TYPE_2D;\nimg1CreateInfo.extent.width = 512;\nimg1CreateInfo.extent.height = 512;\nimg1CreateInfo.extent.depth = 1;\nimg1CreateInfo.mipLevels = 10;\nimg1CreateInfo.arrayLayers = 1;\nimg1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB;\nimg1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;\nimg1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;\nimg1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;\nimg1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;\n\n// A full screen texture to be used as color attachment.\nVkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };\nimg2CreateInfo.imageType = VK_IMAGE_TYPE_2D;\nimg2CreateInfo.extent.width = 1920;\nimg2CreateInfo.extent.height = 1080;\nimg2CreateInfo.extent.depth = 1;\nimg2CreateInfo.mipLevels = 1;\nimg2CreateInfo.arrayLayers = 1;\nimg2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;\nimg2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;\nimg2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;\nimg2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;\nimg2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;\n\nVkImage img1;\nres = vkCreateImage(device, &img1CreateInfo, nullptr, &img1);\nVkImage img2;\nres = vkCreateImage(device, &img2CreateInfo, nullptr, &img2);\n\nVkMemoryRequirements img1MemReq;\nvkGetImageMemoryRequirements(device, img1, &img1MemReq);\nVkMemoryRequirements img2MemReq;\nvkGetImageMemoryRequirements(device, img2, &img2MemReq);\n\nVkMemoryRequirements finalMemReq = {};\nfinalMemReq.size = std::max(img1MemReq.size, img2MemReq.size);\nfinalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment);\nfinalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits;\n// Validate if(finalMemReq.memoryTypeBits != 0)\n\nVmaAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;\n\nVmaAllocation alloc;\nres = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr);\n\nres = vmaBindImageMemory(allocator, alloc, img1);\nres = vmaBindImageMemory(allocator, alloc, img2);\n\n// You can use img1, img2 here, but not at the same time!\n\nvmaFreeMemory(allocator, alloc);\nvkDestroyImage(allocator, img2, nullptr);\nvkDestroyImage(allocator, img1, nullptr);\n\\endcode\n\nVMA also provides convenience functions that create a buffer or image and bind it to memory\nrepresented by an existing #VmaAllocation:\nvmaCreateAliasingBuffer(), vmaCreateAliasingBuffer2(),\nvmaCreateAliasingImage(), vmaCreateAliasingImage2().\nVersions with \"2\" offer additional parameter `allocationLocalOffset`.\n\nRemember that using resources that alias in memory requires proper synchronization.\nYou need to issue a memory barrier to make sure commands that use `img1` and `img2`\ndon't overlap on GPU timeline.\nYou also need to treat a resource after aliasing as uninitialized - containing garbage data.\nFor example, if you use `img1` and then want to use `img2`, you need to issue\nan image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`.\n\nAdditional considerations:\n\n- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases.\nSee chapter 11.8. \"Memory Aliasing\" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag.\n- You can create more complex layout where different images and buffers are bound\nat different offsets inside one large allocation. For example, one can imagine\na big texture used in some render passes, aliasing with a set of many small buffers\nused between in some further passes. To bind a resource at non-zero offset in an allocation,\nuse vmaBindBufferMemory2() / vmaBindImageMemory2().\n- Before allocating memory for the resources you want to alias, check `memoryTypeBits`\nreturned in memory requirements of each resource to make sure the bits overlap.\nSome GPUs may expose multiple memory types suitable e.g. only for buffers or\nimages with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your\nresources may be disjoint. Aliasing them is not possible in that case.\n\n\n\\page custom_memory_pools Custom memory pools\n\nA memory pool contains a number of `VkDeviceMemory` blocks.\nThe library automatically creates and manages default pool for each memory type available on the device.\nDefault memory pool automatically grows in size.\nSize of allocated blocks is also variable and managed automatically.\nYou are using default pools whenever you leave VmaAllocationCreateInfo::pool = null.\n\nYou can create custom pool and allocate memory out of it.\nIt can be useful if you want to:\n\n- Keep certain kind of allocations separate from others.\n- Enforce particular, fixed size of Vulkan memory blocks.\n- Limit maximum amount of Vulkan memory allocated for that pool.\n- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool.\n- Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in\n  #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain.\n- Perform defragmentation on a specific subset of your allocations.\n\nTo use custom memory pools:\n\n-# Fill VmaPoolCreateInfo structure.\n-# Call vmaCreatePool() to obtain #VmaPool handle.\n-# When making an allocation, set VmaAllocationCreateInfo::pool to this handle.\n   You don't need to specify any other parameters of this structure, like `usage`.\n\nExample:\n\n\\code\n// Find memoryTypeIndex for the pool.\nVkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nsampleBufCreateInfo.size = 0x10000; // Doesn't matter.\nsampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;\n\nVmaAllocationCreateInfo sampleAllocCreateInfo = {};\nsampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;\n\nuint32_t memTypeIndex;\nVkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator,\n    &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex);\n// Check res...\n\n// Create a pool that can have at most 2 blocks, 128 MiB each.\nVmaPoolCreateInfo poolCreateInfo = {};\npoolCreateInfo.memoryTypeIndex = memTypeIndex;\npoolCreateInfo.blockSize = 128ull * 1024 * 1024;\npoolCreateInfo.maxBlockCount = 2;\n\nVmaPool pool;\nres = vmaCreatePool(allocator, &poolCreateInfo, &pool);\n// Check res...\n\n// Allocate a buffer out of it.\nVkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nbufCreateInfo.size = 1024;\nbufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;\n\nVmaAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.pool = pool;\n\nVkBuffer buf;\nVmaAllocation alloc;\nres = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr);\n// Check res...\n\\endcode\n\nYou have to free all allocations made from this pool before destroying it.\n\n\\code\nvmaDestroyBuffer(allocator, buf, alloc);\nvmaDestroyPool(allocator, pool);\n\\endcode\n\nNew versions of this library support creating dedicated allocations in custom pools.\nIt is supported only when VmaPoolCreateInfo::blockSize = 0.\nTo use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and\nVmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.\n\n\n\\section custom_memory_pools_MemTypeIndex Choosing memory type index\n\nWhen creating a pool, you must explicitly specify memory type index.\nTo find the one suitable for your buffers or images, you can use helper functions\nvmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().\nYou need to provide structures with example parameters of buffers or images\nthat you are going to create in that pool.\n\n\\code\nVkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nexampleBufCreateInfo.size = 1024; // Doesn't matter\nexampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;\n\nVmaAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;\n\nuint32_t memTypeIndex;\nvmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);\n\nVmaPoolCreateInfo poolCreateInfo = {};\npoolCreateInfo.memoryTypeIndex = memTypeIndex;\n// ...\n\\endcode\n\nWhen creating buffers/images allocated in that pool, provide following parameters:\n\n- `VkBufferCreateInfo`: Prefer to pass same parameters as above.\n  Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior.\n  Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers\n  or the other way around.\n- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member.\n  Other members are ignored anyway.\n\n\n\\section custom_memory_pools_when_not_use When not to use custom pools\n\nCustom pools are commonly overused by VMA users.\nWhile it may feel natural to keep some logical groups of resources separate in memory,\nin most cases it does more harm than good.\nUsing custom pool shouldn't be your first choice.\nInstead, please make all allocations from default pools first and only use custom pools\nif you can prove and measure that it is beneficial in some way,\ne.g. it results in lower memory usage, better performance, etc.\n\nUsing custom pools has disadvantages:\n\n- Each pool has its own collection of `VkDeviceMemory` blocks.\n  Some of them may be partially or even completely empty.\n  Spreading allocations across multiple pools increases the amount of wasted (allocated but unbound) memory.\n- You must manually choose specific memory type to be used by a custom pool (set as VmaPoolCreateInfo::memoryTypeIndex).\n  When using default pools, best memory type for each of your allocations can be selected automatically\n  using a carefully design algorithm that works across all kinds of GPUs.\n- If an allocation from a custom pool at specific memory type fails, entire allocation operation returns failure.\n  When using default pools, VMA tries another compatible memory type.\n- If you set VmaPoolCreateInfo::blockSize != 0, each memory block has the same size,\n  while default pools start from small blocks and only allocate next blocks larger and larger\n  up to the preferred block size.\n\nMany of the common concerns can be addressed in a different way than using custom pools:\n\n- If you want to keep your allocations of certain size (small versus large) or certain lifetime (transient versus long lived)\n  separate, you likely don't need to.\n  VMA uses a high quality allocation algorithm that manages memory well in various cases.\n  Please measure and check if using custom pools provides a benefit.\n- If you want to keep your images and buffers separate, you don't need to.\n  VMA respects `bufferImageGranularity` limit automatically.\n- If you want to keep your mapped and not mapped allocations separate, you don't need to.\n  VMA respects `nonCoherentAtomSize` limit automatically.\n  It also maps only those `VkDeviceMemory` blocks that need to map any allocation.\n  It even tries to keep mappable and non-mappable allocations in separate blocks to minimize the amount of mapped memory.\n- If you want to choose a custom size for the default memory block, you can set it globally instead\n  using VmaAllocatorCreateInfo::preferredLargeHeapBlockSize.\n- If you want to select specific memory type for your allocation,\n  you can set VmaAllocationCreateInfo::memoryTypeBits to `(1u << myMemoryTypeIndex)` instead.\n- If you need to create a buffer with certain minimum alignment, you can still do it\n  using default pools with dedicated function vmaCreateBufferWithAlignment().\n\n\n\\section linear_algorithm Linear allocation algorithm\n\nEach Vulkan memory block managed by this library has accompanying metadata that\nkeeps track of used and unused regions. By default, the metadata structure and\nalgorithm tries to find best place for new allocations among free regions to\noptimize memory usage. This way you can allocate and free objects in any order.\n\n![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png)\n\nSometimes there is a need to use simpler, linear allocation algorithm. You can\ncreate custom pool that uses such algorithm by adding flag\n#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating\n#VmaPool object. Then an alternative metadata management is used. It always\ncreates new allocations after last one and doesn't reuse free regions after\nallocations freed in the middle. It results in better allocation performance and\nless memory consumed by metadata.\n\n![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png)\n\nWith this one flag, you can create a custom pool that can be used in many ways:\nfree-at-once, stack, double stack, and ring buffer. See below for details.\nYou don't need to specify explicitly which of these options you are going to use - it is detected automatically.\n\n\\subsection linear_algorithm_free_at_once Free-at-once\n\nIn a pool that uses linear algorithm, you still need to free all the allocations\nindividually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free\nthem in any order. New allocations are always made after last one - free space\nin the middle is not reused. However, when you release all the allocation and\nthe pool becomes empty, allocation starts from the beginning again. This way you\ncan use linear algorithm to speed up creation of allocations that you are going\nto release all at once.\n\n![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png)\n\nThis mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount\nvalue that allows multiple memory blocks.\n\n\\subsection linear_algorithm_stack Stack\n\nWhen you free an allocation that was created last, its space can be reused.\nThanks to this, if you always release allocations in the order opposite to their\ncreation (LIFO - Last In First Out), you can achieve behavior of a stack.\n\n![Stack](../gfx/Linear_allocator_4_stack.png)\n\nThis mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount\nvalue that allows multiple memory blocks.\n\n\\subsection linear_algorithm_double_stack Double stack\n\nThe space reserved by a custom pool with linear algorithm may be used by two\nstacks:\n\n- First, default one, growing up from offset 0.\n- Second, \"upper\" one, growing down from the end towards lower offsets.\n\nTo make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT\nto VmaAllocationCreateInfo::flags.\n\n![Double stack](../gfx/Linear_allocator_7_double_stack.png)\n\nDouble stack is available only in pools with one memory block -\nVmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.\n\nWhen the two stacks' ends meet so there is not enough space between them for a\nnew allocation, such allocation fails with usual\n`VK_ERROR_OUT_OF_DEVICE_MEMORY` error.\n\n\\subsection linear_algorithm_ring_buffer Ring buffer\n\nWhen you free some allocations from the beginning and there is not enough free space\nfor a new one at the end of a pool, allocator's \"cursor\" wraps around to the\nbeginning and starts allocation there. Thanks to this, if you always release\nallocations in the same order as you created them (FIFO - First In First Out),\nyou can achieve behavior of a ring buffer / queue.\n\n![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png)\n\nRing buffer is available only in pools with one memory block -\nVmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.\n\n\\note \\ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.\n\n\n\\page defragmentation Defragmentation\n\nInterleaved allocations and deallocations of many objects of varying size can\ncause fragmentation over time, which can lead to a situation where the library is unable\nto find a continuous range of free memory for a new allocation despite there is\nenough free space, just scattered across many small free ranges between existing\nallocations.\n\nTo mitigate this problem, you can use defragmentation feature.\nIt doesn't happen automatically though and needs your cooperation,\nbecause VMA is a low level library that only allocates memory.\nIt cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures.\nIt cannot copy their contents as it doesn't record any commands to a command buffer.\n\nExample:\n\n\\code\nVmaDefragmentationInfo defragInfo = {};\ndefragInfo.pool = myPool;\ndefragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT;\n\nVmaDefragmentationContext defragCtx;\nVkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx);\n// Check res...\n\nfor(;;)\n{\n    VmaDefragmentationPassMoveInfo pass;\n    res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass);\n    if(res == VK_SUCCESS)\n        break;\n    else if(res != VK_INCOMPLETE)\n        // Handle error...\n\n    for(uint32_t i = 0; i < pass.moveCount; ++i)\n    {\n        // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents.\n        VmaAllocationInfo allocInfo;\n        vmaGetAllocationInfo(allocator, pass.pMoves[i].srcAllocation, &allocInfo);\n        MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData;\n\n        // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset.\n        VkImageCreateInfo imgCreateInfo = ...\n        VkImage newImg;\n        res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg);\n        // Check res...\n        res = vmaBindImageMemory(allocator, pass.pMoves[i].dstTmpAllocation, newImg);\n        // Check res...\n\n        // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place.\n        vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...);\n    }\n\n    // Make sure the copy commands finished executing.\n    vkWaitForFences(...);\n\n    // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation.\n    for(uint32_t i = 0; i < pass.moveCount; ++i)\n    {\n        // ...\n        vkDestroyImage(device, resData->img, nullptr);\n    }\n\n    // Update appropriate descriptors to point to the new places...\n\n    res = vmaEndDefragmentationPass(allocator, defragCtx, &pass);\n    if(res == VK_SUCCESS)\n        break;\n    else if(res != VK_INCOMPLETE)\n        // Handle error...\n}\n\nvmaEndDefragmentation(allocator, defragCtx, nullptr);\n\\endcode\n\nAlthough functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage()\ncreate/destroy an allocation and a buffer/image at once, these are just a shortcut for\ncreating the resource, allocating memory, and binding them together.\nDefragmentation works on memory allocations only. You must handle the rest manually.\nDefragmentation is an iterative process that should repreat \"passes\" as long as related functions\nreturn `VK_INCOMPLETE` not `VK_SUCCESS`.\nIn each pass:\n\n1. vmaBeginDefragmentationPass() function call:\n   - Calculates and returns the list of allocations to be moved in this pass.\n     Note this can be a time-consuming process.\n   - Reserves destination memory for them by creating temporary destination allocations\n     that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo().\n2. Inside the pass, **you should**:\n   - Inspect the returned list of allocations to be moved.\n   - Create new buffers/images and bind them at the returned destination temporary allocations.\n   - Copy data from source to destination resources if necessary.\n   - Destroy the source buffers/images, but NOT their allocations.\n3. vmaEndDefragmentationPass() function call:\n   - Frees the source memory reserved for the allocations that are moved.\n   - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory.\n   - Frees `VkDeviceMemory` blocks that became empty.\n\nUnlike in previous iterations of the defragmentation API, there is no list of \"movable\" allocations passed as a parameter.\nDefragmentation algorithm tries to move all suitable allocations.\nYou can, however, refuse to move some of them inside a defragmentation pass, by setting\n`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.\nThis is not recommended and may result in suboptimal packing of the allocations after defragmentation.\nIf you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool.\n\nInside a pass, for each allocation that should be moved:\n\n- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`.\n  - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass().\n- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared,\n  filled, and used temporarily in each rendering frame, you can just recreate this image\n  without copying its data.\n- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU\n  using `memcpy()`.\n- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.\n  This will cancel the move.\n  - vmaEndDefragmentationPass() will then free the destination memory\n    not the source memory of the allocation, leaving it unchanged.\n- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time),\n  you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.\n  - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object.\n\nYou can defragment a specific custom pool by setting VmaDefragmentationInfo::pool\n(like in the example above) or all the default pools by setting this member to null.\n\nDefragmentation is always performed in each pool separately.\nAllocations are never moved between different Vulkan memory types.\nThe size of the destination memory reserved for a moved allocation is the same as the original one.\nAlignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation.\nBuffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones.\n\nYou can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved\nin each pass, e.g. to call it in sync with render frames and not to experience too big hitches.\nSee members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass.\n\nIt is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA\nusage, possibly from multiple threads, with the exception that allocations\nreturned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended.\n\n<b>Mapping</b> is preserved on allocations that are moved during defragmentation.\nWhether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations\nare mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried\nusing VmaAllocationInfo::pMappedData.\n\n\\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.\n\n\n\\page statistics Statistics\n\nThis library contains several functions that return information about its internal state,\nespecially the amount of memory allocated from Vulkan.\n\n\\section statistics_numeric_statistics Numeric statistics\n\nIf you need to obtain basic statistics about memory usage per heap, together with current budget,\nyou can call function vmaGetHeapBudgets() and inspect structure #VmaBudget.\nThis is useful to keep track of memory usage and stay within budget\n(see also \\ref staying_within_budget).\nExample:\n\n\\code\nuint32_t heapIndex = ...\n\nVmaBudget budgets[VK_MAX_MEMORY_HEAPS];\nvmaGetHeapBudgets(allocator, budgets);\n\nprintf(\"My heap currently has %u allocations taking %llu B,\\n\",\n    budgets[heapIndex].statistics.allocationCount,\n    budgets[heapIndex].statistics.allocationBytes);\nprintf(\"allocated out of %u Vulkan device memory blocks taking %llu B,\\n\",\n    budgets[heapIndex].statistics.blockCount,\n    budgets[heapIndex].statistics.blockBytes);\nprintf(\"Vulkan reports total usage %llu B with budget %llu B.\\n\",\n    budgets[heapIndex].usage,\n    budgets[heapIndex].budget);\n\\endcode\n\nYou can query for more detailed statistics per memory heap, type, and totals,\nincluding minimum and maximum allocation size and unused range size,\nby calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics.\nThis function is slower though, as it has to traverse all the internal data structures,\nso it should be used only for debugging purposes.\n\nYou can query for statistics of a custom pool using function vmaGetPoolStatistics()\nor vmaCalculatePoolStatistics().\n\nYou can query for information about a specific allocation using function vmaGetAllocationInfo().\nIt fill structure #VmaAllocationInfo.\n\n\\section statistics_json_dump JSON dump\n\nYou can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString().\nThe result is guaranteed to be correct JSON.\nIt uses ANSI encoding.\nAny strings provided by user (see [Allocation names](@ref allocation_names))\nare copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding,\nthis JSON string can be treated as using this encoding.\nIt must be freed using function vmaFreeStatsString().\n\nThe format of this JSON string is not part of official documentation of the library,\nbut it will not change in backward-incompatible way without increasing library major version number\nand appropriate mention in changelog.\n\nThe JSON string contains all the data that can be obtained using vmaCalculateStatistics().\nIt can also contain detailed map of allocated memory blocks and their regions -\nfree and occupied by allocations.\nThis allows e.g. to visualize the memory or assess fragmentation.\n\n\n\\page allocation_annotation Allocation names and user data\n\n\\section allocation_user_data Allocation user data\n\nYou can annotate allocations with your own information, e.g. for debugging purposes.\nTo do that, fill VmaAllocationCreateInfo::pUserData field when creating\nan allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer,\nsome handle, index, key, ordinal number or any other value that would associate\nthe allocation with your custom metadata.\nIt is useful to identify appropriate data structures in your engine given #VmaAllocation,\ne.g. when doing \\ref defragmentation.\n\n\\code\nVkBufferCreateInfo bufCreateInfo = ...\n\nMyBufferMetadata* pMetadata = CreateBufferMetadata();\n\nVmaAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;\nallocCreateInfo.pUserData = pMetadata;\n\nVkBuffer buffer;\nVmaAllocation allocation;\nvmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr);\n\\endcode\n\nThe pointer may be later retrieved as VmaAllocationInfo::pUserData:\n\n\\code\nVmaAllocationInfo allocInfo;\nvmaGetAllocationInfo(allocator, allocation, &allocInfo);\nMyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData;\n\\endcode\n\nIt can also be changed using function vmaSetAllocationUserData().\n\nValues of (non-zero) allocations' `pUserData` are printed in JSON report created by\nvmaBuildStatsString() in hexadecimal form.\n\n\\section allocation_names Allocation names\n\nAn allocation can also carry a null-terminated string, giving a name to the allocation.\nTo set it, call vmaSetAllocationName().\nThe library creates internal copy of the string, so the pointer you pass doesn't need\nto be valid for whole lifetime of the allocation. You can free it after the call.\n\n\\code\nstd::string imageName = \"Texture: \";\nimageName += fileName;\nvmaSetAllocationName(allocator, allocation, imageName.c_str());\n\\endcode\n\nThe string can be later retrieved by inspecting VmaAllocationInfo::pName.\nIt is also printed in JSON report created by vmaBuildStatsString().\n\n\\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it.\nYou must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library.\n\n\n\\page virtual_allocator Virtual allocator\n\nAs an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of \"virtual allocator\".\nIt doesn't allocate any real GPU memory. It just keeps track of used and free regions of a \"virtual block\".\nYou can use it to allocate your own memory or other objects, even completely unrelated to Vulkan.\nA common use case is sub-allocation of pieces of one large GPU buffer.\n\n\\section virtual_allocator_creating_virtual_block Creating virtual block\n\nTo use this functionality, there is no main \"allocator\" object.\nYou don't need to have #VmaAllocator object created.\nAll you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator:\n\n-# Fill in #VmaVirtualBlockCreateInfo structure.\n-# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object.\n\nExample:\n\n\\code\nVmaVirtualBlockCreateInfo blockCreateInfo = {};\nblockCreateInfo.size = 1048576; // 1 MB\n\nVmaVirtualBlock block;\nVkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block);\n\\endcode\n\n\\section virtual_allocator_making_virtual_allocations Making virtual allocations\n\n#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions\nusing the same code as the main Vulkan memory allocator.\nSimilarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type\nthat represents an opaque handle to an allocation within the virtual block.\n\nIn order to make such allocation:\n\n-# Fill in #VmaVirtualAllocationCreateInfo structure.\n-# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation.\n   You can also receive `VkDeviceSize offset` that was assigned to the allocation.\n\nExample:\n\n\\code\nVmaVirtualAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.size = 4096; // 4 KB\n\nVmaVirtualAllocation alloc;\nVkDeviceSize offset;\nres = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset);\nif(res == VK_SUCCESS)\n{\n    // Use the 4 KB of your memory starting at offset.\n}\nelse\n{\n    // Allocation failed - no space for it could be found. Handle this error!\n}\n\\endcode\n\n\\section virtual_allocator_deallocation Deallocation\n\nWhen no longer needed, an allocation can be freed by calling vmaVirtualFree().\nYou can only pass to this function an allocation that was previously returned by vmaVirtualAllocate()\ncalled for the same #VmaVirtualBlock.\n\nWhen whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock().\nAll allocations must be freed before the block is destroyed, which is checked internally by an assert.\nHowever, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once -\na feature not available in normal Vulkan memory allocator. Example:\n\n\\code\nvmaVirtualFree(block, alloc);\nvmaDestroyVirtualBlock(block);\n\\endcode\n\n\\section virtual_allocator_allocation_parameters Allocation parameters\n\nYou can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData().\nIts default value is null.\nIt can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some\nlarger data structure containing more information. Example:\n\n\\code\nstruct CustomAllocData\n{\n    std::string m_AllocName;\n};\nCustomAllocData* allocData = new CustomAllocData();\nallocData->m_AllocName = \"My allocation 1\";\nvmaSetVirtualAllocationUserData(block, alloc, allocData);\n\\endcode\n\nThe pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function\nvmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo.\nIf you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation!\nExample:\n\n\\code\nVmaVirtualAllocationInfo allocInfo;\nvmaGetVirtualAllocationInfo(block, alloc, &allocInfo);\ndelete (CustomAllocData*)allocInfo.pUserData;\n\nvmaVirtualFree(block, alloc);\n\\endcode\n\n\\section virtual_allocator_alignment_and_units Alignment and units\n\nIt feels natural to express sizes and offsets in bytes.\nIf an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member\nVmaVirtualAllocationCreateInfo::alignment to request it. Example:\n\n\\code\nVmaVirtualAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.size = 4096; // 4 KB\nallocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B\n\nVmaVirtualAllocation alloc;\nres = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr);\n\\endcode\n\nAlignments of different allocations made from one block may vary.\nHowever, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`,\nyou can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes.\nIt might be more convenient, but you need to make sure to use this new unit consistently in all the places:\n\n- VmaVirtualBlockCreateInfo::size\n- VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment\n- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset\n\n\\section virtual_allocator_statistics Statistics\n\nYou can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics()\n(to get brief statistics that are fast to calculate)\nor vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate).\nThe functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator.\nExample:\n\n\\code\nVmaStatistics stats;\nvmaGetVirtualBlockStatistics(block, &stats);\nprintf(\"My virtual block has %llu bytes used by %u virtual allocations\\n\",\n    stats.allocationBytes, stats.allocationCount);\n\\endcode\n\nYou can also request a full list of allocations and free regions as a string in JSON format by calling\nvmaBuildVirtualBlockStatsString().\nReturned string must be later freed using vmaFreeVirtualBlockStatsString().\nThe format of this string differs from the one returned by the main Vulkan allocator, but it is similar.\n\n\\section virtual_allocator_additional_considerations Additional considerations\n\nThe \"virtual allocator\" functionality is implemented on a level of individual memory blocks.\nKeeping track of a whole collection of blocks, allocating new ones when out of free space,\ndeleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user.\n\nAlternative allocation algorithms are supported, just like in custom pools of the real GPU memory.\nSee enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT).\nYou can find their description in chapter \\ref custom_memory_pools.\nAllocation strategies are also supported.\nSee enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT).\n\nFollowing features are supported only by the allocator of the real GPU memory and not by virtual allocations:\nbuffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`.\n\n\n\\page debugging_memory_usage Debugging incorrect memory usage\n\nIf you suspect a bug with memory usage, like usage of uninitialized memory or\nmemory being overwritten out of bounds of an allocation,\nyou can use debug features of this library to verify this.\n\n\\section debugging_memory_usage_initialization Memory initialization\n\nIf you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used,\nyou can enable automatic memory initialization to verify this.\nTo do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1.\n\n\\code\n#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1\n#include \"vk_mem_alloc.h\"\n\\endcode\n\nIt makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`.\nBefore an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`.\nMemory is automatically mapped and unmapped if necessary.\n\nIf you find these values while debugging your program, good chances are that you incorrectly\nread Vulkan memory that is allocated but not initialized, or already freed, respectively.\n\nMemory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped.\nIt works also with dedicated allocations.\n\n\\section debugging_memory_usage_margins Margins\n\nBy default, allocations are laid out in memory blocks next to each other if possible\n(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`).\n\n![Allocations without margin](../gfx/Margins_1.png)\n\nDefine macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified\nnumber of bytes as a margin after every allocation.\n\n\\code\n#define VMA_DEBUG_MARGIN 16\n#include \"vk_mem_alloc.h\"\n\\endcode\n\n![Allocations with margin](../gfx/Margins_2.png)\n\nIf your bug goes away after enabling margins, it means it may be caused by memory\nbeing overwritten outside of allocation boundaries. It is not 100% certain though.\nChange in application behavior may also be caused by different order and distribution\nof allocations across memory blocks after margins are applied.\n\nMargins work with all types of memory.\n\nMargin is applied only to allocations made out of memory blocks and not to dedicated\nallocations, which have their own memory block of specific size.\nIt is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag\nor those automatically decided to put into dedicated allocations, e.g. due to its\nlarge size or recommended by VK_KHR_dedicated_allocation extension.\n\nMargins appear in [JSON dump](@ref statistics_json_dump) as part of free space.\n\nNote that enabling margins increases memory usage and fragmentation.\n\nMargins do not apply to \\ref virtual_allocator.\n\n\\section debugging_memory_usage_corruption_detection Corruption detection\n\nYou can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation\nof contents of the margins.\n\n\\code\n#define VMA_DEBUG_MARGIN 16\n#define VMA_DEBUG_DETECT_CORRUPTION 1\n#include \"vk_mem_alloc.h\"\n\\endcode\n\nWhen this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN`\n(it must be multiply of 4) after every allocation is filled with a magic number.\nThis idea is also know as \"canary\".\nMemory is automatically mapped and unmapped if necessary.\n\nThis number is validated automatically when the allocation is destroyed.\nIf it is not equal to the expected value, `VMA_ASSERT()` is executed.\nIt clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation,\nwhich indicates a serious bug.\n\nYou can also explicitly request checking margins of all allocations in all memory blocks\nthat belong to specified memory types by using function vmaCheckCorruption(),\nor in memory blocks that belong to specified custom pool, by using function\nvmaCheckPoolCorruption().\n\nMargin validation (corruption detection) works only for memory types that are\n`HOST_VISIBLE` and `HOST_COHERENT`.\n\n\n\\section debugging_memory_usage_leak_detection Leak detection features\n\nAt allocation and allocator destruction time VMA checks for unfreed and unmapped blocks using\n`VMA_ASSERT_LEAK()`. This macro defaults to an assertion, triggering a typically fatal error in Debug\nbuilds, and doing nothing in Release builds. You can provide your own definition of `VMA_ASSERT_LEAK()`\nto change this behavior.\n\nAt memory block destruction time VMA lists out all unfreed allocations using the `VMA_LEAK_LOG_FORMAT()`\nmacro, which defaults to `VMA_DEBUG_LOG_FORMAT`, which in turn defaults to a no-op.\nIf you're having trouble with leaks - for example, the aforementioned assertion triggers, but you don't\nquite know \\em why -, overriding this macro to print out the the leaking blocks, combined with assigning\nindividual names to allocations using vmaSetAllocationName(), can greatly aid in fixing them.\n\n\\page other_api_interop Interop with other graphics APIs\n\nVMA provides some features that help with interoperability with other graphics APIs, e.g. OpenGL.\n\n\\section opengl_interop_exporting_memory Exporting memory\n\nIf you want to attach `VkExportMemoryAllocateInfoKHR` or other structure to `pNext` chain of memory allocations made by the library:\n\nYou can create \\ref custom_memory_pools for such allocations.\nDefine and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext\nwhile creating the custom pool.\nPlease note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool,\nnot only while creating it, as no copy of the structure is made,\nbut its original pointer is used for each allocation instead.\n\nIf you want to export all memory allocated by VMA from certain memory types,\nalso dedicated allocations or other allocations made from default pools,\nan alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes.\nIt should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library\nthrough `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type.\nPlease note that new versions of the library also support dedicated allocations created in custom pools.\n\nYou should not mix these two methods in a way that allows to apply both to the same memory type.\nOtherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`.\n\n\n\\section opengl_interop_custom_alignment Custom alignment\n\nBuffers or images exported to a different API like OpenGL may require a different alignment,\nhigher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`.\nTo impose such alignment:\n\nYou can create \\ref custom_memory_pools for such allocations.\nSet VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation\nto be made out of this pool.\nThe alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image\nfrom a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically.\n\nIf you want to create a buffer with a specific minimum alignment out of default pools,\nuse special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`.\n\nNote the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated\nallocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block.\nYou can ensure that an allocation is created as dedicated by using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.\nContrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation.\n\n\\section opengl_interop_extended_allocation_information Extended allocation information\n\nIf you want to rely on VMA to allocate your buffers and images inside larger memory blocks,\nbut you need to know the size of the entire block and whether the allocation was made\nwith its own dedicated memory, use function vmaGetAllocationInfo2() to retrieve\nextended allocation information in structure #VmaAllocationInfo2.\n\n\n\n\\page usage_patterns Recommended usage patterns\n\nVulkan gives great flexibility in memory allocation.\nThis chapter shows the most common patterns.\n\nSee also slides from talk:\n[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New)\n\n\n\\section usage_patterns_gpu_only GPU-only resource\n\n<b>When:</b>\nAny resources that you frequently write and read on GPU,\ne.g. images used as color attachments (aka \"render targets\"), depth-stencil attachments,\nimages/buffers used as storage image/buffer (aka \"Unordered Access View (UAV)\").\n\n<b>What to do:</b>\nLet the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.\n\n\\code\nVkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };\nimgCreateInfo.imageType = VK_IMAGE_TYPE_2D;\nimgCreateInfo.extent.width = 3840;\nimgCreateInfo.extent.height = 2160;\nimgCreateInfo.extent.depth = 1;\nimgCreateInfo.mipLevels = 1;\nimgCreateInfo.arrayLayers = 1;\nimgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;\nimgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;\nimgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;\nimgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;\nimgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;\n\nVmaAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;\nallocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;\nallocCreateInfo.priority = 1.0f;\n\nVkImage img;\nVmaAllocation alloc;\nvmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);\n\\endcode\n\n<b>Also consider:</b>\nConsider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,\nespecially if they are large or if you plan to destroy and recreate them with different sizes\ne.g. when display resolution changes.\nPrefer to create such resources first and all other GPU resources (like textures and vertex buffers) later.\nWhen VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation\nto decrease chances to be evicted to system memory by the operating system.\n\n\\section usage_patterns_staging_copy_upload Staging copy for upload\n\n<b>When:</b>\nA \"staging\" buffer than you want to map and fill from CPU code, then use as a source of transfer\nto some GPU resource.\n\n<b>What to do:</b>\nUse flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT.\nLet the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`.\n\n\\code\nVkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nbufCreateInfo.size = 65536;\nbufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;\n\nVmaAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;\nallocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |\n    VMA_ALLOCATION_CREATE_MAPPED_BIT;\n\nVkBuffer buf;\nVmaAllocation alloc;\nVmaAllocationInfo allocInfo;\nvmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);\n\n...\n\nmemcpy(allocInfo.pMappedData, myData, myDataSize);\n\\endcode\n\n<b>Also consider:</b>\nYou can map the allocation using vmaMapMemory() or you can create it as persistenly mapped\nusing #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above.\n\n\n\\section usage_patterns_readback Readback\n\n<b>When:</b>\nBuffers for data written by or transferred from the GPU that you want to read back on the CPU,\ne.g. results of some computations.\n\n<b>What to do:</b>\nUse flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.\nLet the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`\nand `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.\n\n\\code\nVkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nbufCreateInfo.size = 65536;\nbufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;\n\nVmaAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;\nallocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |\n    VMA_ALLOCATION_CREATE_MAPPED_BIT;\n\nVkBuffer buf;\nVmaAllocation alloc;\nVmaAllocationInfo allocInfo;\nvmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);\n\n...\n\nconst float* downloadedData = (const float*)allocInfo.pMappedData;\n\\endcode\n\n\n\\section usage_patterns_advanced_data_uploading Advanced data uploading\n\nFor resources that you frequently write on CPU via mapped pointer and\nfrequently read on GPU e.g. as a uniform buffer (also called \"dynamic\"), multiple options are possible:\n\n-# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory,\n   even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card,\n   and make the device reach out to that resource directly.\n   - Reads performed by the device will then go through PCI Express bus.\n     The performance of this access may be limited, but it may be fine depending on the size\n     of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity\n     of access.\n-# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips),\n   a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL`\n   (fast to access from the GPU). Then, it is likely the best choice for such type of resource.\n-# Systems with a discrete graphics card and separate video memory may or may not expose\n   a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR).\n   If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS)\n   that is available to CPU for mapping.\n   - Writes performed by the host to that memory go through PCI Express bus.\n     The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0,\n     as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads.\n-# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory,\n   a separate \"staging\" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them.\n\nThankfully, VMA offers an aid to create and use such resources in the the way optimal\nfor the current Vulkan device. To help the library make the best choice,\nuse flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with\n#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT.\nIt will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR),\nbut if no such memory type is available or allocation from it fails\n(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS),\nit will fall back to `DEVICE_LOCAL` memory for fast GPU access.\nIt is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`,\nso you need to create another \"staging\" allocation and perform explicit transfers.\n\n\\code\nVkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nbufCreateInfo.size = 65536;\nbufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;\n\nVmaAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;\nallocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |\n    VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |\n    VMA_ALLOCATION_CREATE_MAPPED_BIT;\n\nVkBuffer buf;\nVmaAllocation alloc;\nVmaAllocationInfo allocInfo;\nVkResult result = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);\n// Check result...\n\nVkMemoryPropertyFlags memPropFlags;\nvmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags);\n\nif(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)\n{\n    // Allocation ended up in a mappable memory and is already mapped - write to it directly.\n\n    // [Executed in runtime]:\n    memcpy(allocInfo.pMappedData, myData, myDataSize);\n    result = vmaFlushAllocation(allocator, alloc, 0, VK_WHOLE_SIZE);\n    // Check result...\n\n    VkBufferMemoryBarrier bufMemBarrier = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER };\n    bufMemBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;\n    bufMemBarrier.dstAccessMask = VK_ACCESS_UNIFORM_READ_BIT;\n    bufMemBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;\n    bufMemBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;\n    bufMemBarrier.buffer = buf;\n    bufMemBarrier.offset = 0;\n    bufMemBarrier.size = VK_WHOLE_SIZE;\n\n    vkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,\n        0, 0, nullptr, 1, &bufMemBarrier, 0, nullptr);\n}\nelse\n{\n    // Allocation ended up in a non-mappable memory - a transfer using a staging buffer is required.\n    VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\n    stagingBufCreateInfo.size = 65536;\n    stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;\n\n    VmaAllocationCreateInfo stagingAllocCreateInfo = {};\n    stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;\n    stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |\n        VMA_ALLOCATION_CREATE_MAPPED_BIT;\n\n    VkBuffer stagingBuf;\n    VmaAllocation stagingAlloc;\n    VmaAllocationInfo stagingAllocInfo;\n    result = vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo,\n        &stagingBuf, &stagingAlloc, &stagingAllocInfo);\n    // Check result...\n\n    // [Executed in runtime]:\n    memcpy(stagingAllocInfo.pMappedData, myData, myDataSize);\n    result = vmaFlushAllocation(allocator, stagingAlloc, 0, VK_WHOLE_SIZE);\n    // Check result...\n\n    VkBufferMemoryBarrier bufMemBarrier = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER };\n    bufMemBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;\n    bufMemBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;\n    bufMemBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;\n    bufMemBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;\n    bufMemBarrier.buffer = stagingBuf;\n    bufMemBarrier.offset = 0;\n    bufMemBarrier.size = VK_WHOLE_SIZE;\n\n    vkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,\n        0, 0, nullptr, 1, &bufMemBarrier, 0, nullptr);\n\n    VkBufferCopy bufCopy = {\n        0, // srcOffset\n        0, // dstOffset,\n        myDataSize, // size\n    };\n\n    vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy);\n\n    VkBufferMemoryBarrier bufMemBarrier2 = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER };\n    bufMemBarrier2.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;\n    bufMemBarrier2.dstAccessMask = VK_ACCESS_UNIFORM_READ_BIT; // We created a uniform buffer\n    bufMemBarrier2.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;\n    bufMemBarrier2.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;\n    bufMemBarrier2.buffer = buf;\n    bufMemBarrier2.offset = 0;\n    bufMemBarrier2.size = VK_WHOLE_SIZE;\n\n    vkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,\n        0, 0, nullptr, 1, &bufMemBarrier2, 0, nullptr);\n}\n\\endcode\n\n\\section usage_patterns_other_use_cases Other use cases\n\nHere are some other, less obvious use cases and their recommended settings:\n\n- An image that is used only as transfer source and destination, but it should stay on the device,\n  as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame,\n  for temporal antialiasing or other temporal effects.\n  - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`\n  - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO\n- An image that is used only as transfer source and destination, but it should be placed\n  in the system RAM despite it doesn't need to be mapped, because it serves as a \"swap\" copy to evict\n  least recently used textures from VRAM.\n  - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`\n  - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST,\n    as VMA needs a hint here to differentiate from the previous case.\n- A buffer that you want to map and write from the CPU, directly read from the GPU\n  (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or\n  host memory due to its large size.\n  - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT`\n  - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST\n  - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT\n\n\n\\page configuration Configuration\n\nPlease check \"CONFIGURATION SECTION\" in the code to find macros that you can define\nbefore each include of this file or change directly in this file to provide\nyour own implementation of basic facilities like assert, `min()` and `max()` functions,\nmutex, atomic etc.\n\nFor example, define `VMA_ASSERT(expr)` before including the library to provide\ncustom implementation of the assertion, compatible with your project.\nBy default it is defined to standard C `assert(expr)` in `_DEBUG` configuration\nand empty otherwise.\n\nSimilarly, you can define `VMA_LEAK_LOG_FORMAT` macro to enable printing of leaked (unfreed) allocations,\nincluding their names and other parameters. Example:\n\n\\code\n#define VMA_LEAK_LOG_FORMAT(format, ...) do { \\\n        printf((format), __VA_ARGS__); \\\n        printf(\"\\n\"); \\\n    } while(false)\n\\endcode\n\n\\section config_Vulkan_functions Pointers to Vulkan functions\n\nThere are multiple ways to import pointers to Vulkan functions in the library.\nIn the simplest case you don't need to do anything.\nIf the compilation or linking of your program or the initialization of the #VmaAllocator\ndoesn't work for you, you can try to reconfigure it.\n\nFirst, the allocator tries to fetch pointers to Vulkan functions linked statically,\nlike this:\n\n\\code\nm_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;\n\\endcode\n\nIf you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`.\n\nSecond, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions.\nYou can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or\nby using a helper library like [volk](https://github.com/zeux/volk).\n\nThird, VMA tries to fetch remaining pointers that are still null by calling\n`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own.\nYou need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr.\nOther pointers will be fetched automatically.\nIf you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`.\n\nFinally, all the function pointers required by the library (considering selected\nVulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null.\n\n\n\\section custom_memory_allocator Custom host memory allocator\n\nIf you use custom allocator for CPU memory rather than default operator `new`\nand `delete` from C++, you can make this library using your allocator as well\nby filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These\nfunctions will be passed to Vulkan, as well as used by the library itself to\nmake any CPU-side allocations.\n\n\\section allocation_callbacks Device memory allocation callbacks\n\nThe library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally.\nYou can setup callbacks to be informed about these calls, e.g. for the purpose\nof gathering some statistics. To do it, fill optional member\nVmaAllocatorCreateInfo::pDeviceMemoryCallbacks.\n\n\\section heap_memory_limit Device heap memory limit\n\nWhen device memory of certain heap runs out of free space, new allocations may\nfail (returning error code) or they may succeed, silently pushing some existing_\nmemory blocks from GPU VRAM to system RAM (which degrades performance). This\nbehavior is implementation-dependent - it depends on GPU vendor and graphics\ndriver.\n\nOn AMD cards it can be controlled while creating Vulkan device object by using\nVK_AMD_memory_overallocation_behavior extension, if available.\n\nAlternatively, if you want to test how your program behaves with limited amount of Vulkan device\nmemory available without switching your graphics card to one that really has\nsmaller VRAM, you can use a feature of this library intended for this purpose.\nTo do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit.\n\n\n\n\\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation\n\nVK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve\nperformance on some GPUs. It augments Vulkan API with possibility to query\ndriver whether it prefers particular buffer or image to have its own, dedicated\nallocation (separate `VkDeviceMemory` block) for better efficiency - to be able\nto do some internal optimizations. The extension is supported by this library.\nIt will be used automatically when enabled.\n\nIt has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version\nand inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion,\nyou are all set.\n\nOtherwise, if you want to use it as an extension:\n\n1 . When creating Vulkan device, check if following 2 device extensions are\nsupported (call `vkEnumerateDeviceExtensionProperties()`).\nIf yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`).\n\n- VK_KHR_get_memory_requirements2\n- VK_KHR_dedicated_allocation\n\nIf you enabled these extensions:\n\n2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating\nyour #VmaAllocator to inform the library that you enabled required extensions\nand you want the library to use them.\n\n\\code\nallocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;\n\nvmaCreateAllocator(&allocatorInfo, &allocator);\n\\endcode\n\nThat is all. The extension will be automatically used whenever you create a\nbuffer using vmaCreateBuffer() or image using vmaCreateImage().\n\nWhen using the extension together with Vulkan Validation Layer, you will receive\nwarnings like this:\n\n_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._\n\nIt is OK, you should just ignore it. It happens because you use function\n`vkGetBufferMemoryRequirements2KHR()` instead of standard\n`vkGetBufferMemoryRequirements()`, while the validation layer seems to be\nunaware of it.\n\nTo learn more about this extension, see:\n\n- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation)\n- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5)\n\n\n\n\\page vk_ext_memory_priority VK_EXT_memory_priority\n\nVK_EXT_memory_priority is a device extension that allows to pass additional \"priority\"\nvalue to Vulkan memory allocations that the implementation may use prefer certain\nbuffers and images that are critical for performance to stay in device-local memory\nin cases when the memory is over-subscribed, while some others may be moved to the system memory.\n\nVMA offers convenient usage of this extension.\nIf you enable it, you can pass \"priority\" parameter when creating allocations or custom pools\nand the library automatically passes the value to Vulkan using this extension.\n\nIf you want to use this extension in connection with VMA, follow these steps:\n\n\\section vk_ext_memory_priority_initialization Initialization\n\n1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.\nCheck if the extension is supported - if returned array of `VkExtensionProperties` contains \"VK_EXT_memory_priority\".\n\n2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.\nAttach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned.\nCheck if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true.\n\n3) While creating device with `vkCreateDevice`, enable this extension - add \"VK_EXT_memory_priority\"\nto the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.\n\n4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.\nFill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.\nEnable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to\n`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`.\n\n5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you\nhave enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT\nto VmaAllocatorCreateInfo::flags.\n\n\\section vk_ext_memory_priority_usage Usage\n\nWhen using this extension, you should initialize following member:\n\n- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.\n- VmaPoolCreateInfo::priority when creating a custom pool.\n\nIt should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`.\nMemory allocated with higher value can be treated by the Vulkan implementation as higher priority\nand so it can have lower chances of being pushed out to system memory, experiencing degraded performance.\n\nIt might be a good idea to create performance-critical resources like color-attachment or depth-stencil images\nas dedicated and set high priority to them. For example:\n\n\\code\nVkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };\nimgCreateInfo.imageType = VK_IMAGE_TYPE_2D;\nimgCreateInfo.extent.width = 3840;\nimgCreateInfo.extent.height = 2160;\nimgCreateInfo.extent.depth = 1;\nimgCreateInfo.mipLevels = 1;\nimgCreateInfo.arrayLayers = 1;\nimgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;\nimgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;\nimgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;\nimgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;\nimgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;\n\nVmaAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;\nallocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;\nallocCreateInfo.priority = 1.0f;\n\nVkImage img;\nVmaAllocation alloc;\nvmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);\n\\endcode\n\n`priority` member is ignored in the following situations:\n\n- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters\n  from the parameters passed in #VmaPoolCreateInfo when the pool was created.\n- Allocations created in default pools: They inherit the priority from the parameters\n  VMA used when creating default pools, which means `priority == 0.5f`.\n\n\n\\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory\n\nVK_AMD_device_coherent_memory is a device extension that enables access to\nadditional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and\n`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for\nallocation of buffers intended for writing \"breadcrumb markers\" in between passes\nor draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases.\n\nWhen the extension is available but has not been enabled, Vulkan physical device\nstill exposes those memory types, but their usage is forbidden. VMA automatically\ntakes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt\nto allocate memory of such type is made.\n\nIf you want to use this extension in connection with VMA, follow these steps:\n\n\\section vk_amd_device_coherent_memory_initialization Initialization\n\n1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.\nCheck if the extension is supported - if returned array of `VkExtensionProperties` contains \"VK_AMD_device_coherent_memory\".\n\n2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.\nAttach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned.\nCheck if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true.\n\n3) While creating device with `vkCreateDevice`, enable this extension - add \"VK_AMD_device_coherent_memory\"\nto the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.\n\n4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.\nFill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.\nEnable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to\n`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`.\n\n5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you\nhave enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT\nto VmaAllocatorCreateInfo::flags.\n\n\\section vk_amd_device_coherent_memory_usage Usage\n\nAfter following steps described above, you can create VMA allocations and custom pools\nout of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible\ndevices. There are multiple ways to do it, for example:\n\n- You can request or prefer to allocate out of such memory types by adding\n  `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags\n  or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with\n  other ways of \\ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage.\n- If you manually found memory type index to use for this purpose, force allocation\n  from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`.\n\n\\section vk_amd_device_coherent_memory_more_information More information\n\nTo learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html)\n\nExample use of this extension can be found in the code of the sample and test suite\naccompanying this library.\n\n\n\\page vk_khr_external_memory_win32 VK_KHR_external_memory_win32\n\nOn Windows, the VK_KHR_external_memory_win32 device extension allows exporting a Win32 `HANDLE`\nof a `VkDeviceMemory` block, to be able to reference the memory on other Vulkan logical devices or instances,\nin multiple processes, and/or in multiple APIs.\nVMA offers support for it.\n\n\\section vk_khr_external_memory_win32_initialization Initialization\n\n1) Make sure the extension is defined in the code by including following header before including VMA:\n\n\\code\n#include <vulkan/vulkan_win32.h>\n\\endcode\n\n2) Check if \"VK_KHR_external_memory_win32\" is available among device extensions.\nEnable it when creating the `VkDevice` object.\n\n3) Enable the usage of this extension in VMA by setting flag #VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT\nwhen calling vmaCreateAllocator().\n\n4) Make sure that VMA has access to the `vkGetMemoryWin32HandleKHR` function by either enabling `VMA_DYNAMIC_VULKAN_FUNCTIONS` macro\nor setting VmaVulkanFunctions::vkGetMemoryWin32HandleKHR explicitly.\nFor more information, see \\ref quick_start_initialization_importing_vulkan_functions.\n\n\\section vk_khr_external_memory_win32_preparations Preparations\n\nYou can find example usage among tests, in file \"Tests.cpp\", function `TestWin32Handles()`.\n\nTo use the extenion, buffers need to be created with `VkExternalMemoryBufferCreateInfoKHR` attached to their `pNext` chain,\nand memory allocations need to be made with `VkExportMemoryAllocateInfoKHR` attached to their `pNext` chain.\nTo make use of them, you need to use \\ref custom_memory_pools. Example:\n\n\\code\n// Define an example buffer and allocation parameters.\nVkExternalMemoryBufferCreateInfoKHR externalMemBufCreateInfo = {\n    VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR,\n    nullptr,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT\n};\nVkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nexampleBufCreateInfo.size = 0x10000; // Doesn't matter here.\nexampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;\nexampleBufCreateInfo.pNext = &externalMemBufCreateInfo;\n\nVmaAllocationCreateInfo exampleAllocCreateInfo = {};\nexampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;\n\n// Find memory type index to use for the custom pool.\nuint32_t memTypeIndex;\nVkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_Allocator,\n    &exampleBufCreateInfo, &exampleAllocCreateInfo, &memTypeIndex);\n// Check res...\n\n// Create a custom pool.\nconstexpr static VkExportMemoryAllocateInfoKHR exportMemAllocInfo = {\n    VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR,\n    nullptr,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT\n};\nVmaPoolCreateInfo poolCreateInfo = {};\npoolCreateInfo.memoryTypeIndex = memTypeIndex;\npoolCreateInfo.pMemoryAllocateNext = (void*)&exportMemAllocInfo;\n\nVmaPool pool;\nres = vmaCreatePool(g_Allocator, &poolCreateInfo, &pool);\n// Check res...\n\n// YOUR OTHER CODE COMES HERE....\n\n// At the end, don't forget to destroy it!\nvmaDestroyPool(g_Allocator, pool);\n\\endcode\n\nNote that the structure passed as VmaPoolCreateInfo::pMemoryAllocateNext must remain alive and unchanged\nfor the whole lifetime of the custom pool, because it will be used when the pool allocates a new device memory block.\nNo copy is made internally. This is why variable `exportMemAllocInfo` is defined as `static`.\n\n\\section vk_khr_external_memory_win32_memory_allocation Memory allocation\n\nFinally, you can create a buffer with an allocation out of the custom pool.\nThe buffer should use same flags as the sample buffer used to find the memory type.\nIt should also specify `VkExternalMemoryBufferCreateInfoKHR` in its `pNext` chain.\n\n\\code\nVkExternalMemoryBufferCreateInfoKHR externalMemBufCreateInfo = {\n    VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR,\n    nullptr,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT\n};\nVkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };\nbufCreateInfo.size = // Your desired buffer size.\nbufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;\nbufCreateInfo.pNext = &externalMemBufCreateInfo;\n\nVmaAllocationCreateInfo allocCreateInfo = {};\nallocCreateInfo.pool = pool;  // It is enough to set this one member.\n\nVkBuffer buf;\nVmaAllocation alloc;\nres = vmaCreateBuffer(g_Allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr);\n// Check res...\n\n// YOUR OTHER CODE COMES HERE....\n\n// At the end, don't forget to destroy it!\nvmaDestroyBuffer(g_Allocator, buf, alloc);\n\\endcode\n\nIf you need each allocation to have its own device memory block and start at offset 0, you can still do \nby using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag. It works also with custom pools.\n\n\\section vk_khr_external_memory_win32_exporting_win32_handle Exporting Win32 handle\n\nAfter the allocation is created, you can acquire a Win32 `HANDLE` to the `VkDeviceMemory` block it belongs to.\nVMA function vmaGetMemoryWin32Handle() is a replacement of the Vulkan function `vkGetMemoryWin32HandleKHR`.\n\n\\code\nHANDLE handle;\nres = vmaGetMemoryWin32Handle(g_Allocator, alloc, nullptr, &handle);\n// Check res...\n\n// YOUR OTHER CODE COMES HERE....\n\n// At the end, you must close the handle.\nCloseHandle(handle);\n\\endcode\n\nDocumentation of the VK_KHR_external_memory_win32 extension states that:\n\n> If handleType is defined as an NT handle, vkGetMemoryWin32HandleKHR must be called no more than once for each valid unique combination of memory and handleType.\n\nThis is ensured automatically inside VMA.\nThe library fetches the handle on first use, remembers it internally, and closes it when the memory block or dedicated allocation is destroyed.\nEvery time you call vmaGetMemoryWin32Handle(), VMA calls `DuplicateHandle` and returns a new handle that you need to close.\n\nFor further information, please check documentation of the vmaGetMemoryWin32Handle() function.\n\n\n\\page enabling_buffer_device_address Enabling buffer device address\n\nDevice extension VK_KHR_buffer_device_address\nallow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code.\nIt has been promoted to core Vulkan 1.2.\n\nIf you want to use this feature in connection with VMA, follow these steps:\n\n\\section enabling_buffer_device_address_initialization Initialization\n\n1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device.\nCheck if the extension is supported - if returned array of `VkExtensionProperties` contains\n\"VK_KHR_buffer_device_address\".\n\n2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.\nAttach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned.\nCheck if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true.\n\n3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add\n\"VK_KHR_buffer_device_address\" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.\n\n4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.\nFill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.\nEnable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to\n`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`.\n\n5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you\nhave enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT\nto VmaAllocatorCreateInfo::flags.\n\n\\section enabling_buffer_device_address_usage Usage\n\nAfter following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA.\nThe library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to\nallocated memory blocks wherever it might be needed.\n\nPlease note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`.\nThe second part of this functionality related to \"capture and replay\" is not supported,\nas it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage.\n\n\\section enabling_buffer_device_address_more_information More information\n\nTo learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address)\n\nExample use of this extension can be found in the code of the sample and test suite\naccompanying this library.\n\n\\page general_considerations General considerations\n\n\\section general_considerations_thread_safety Thread safety\n\n- The library has no global state, so separate #VmaAllocator objects can be used\n  independently.\n  There should be no need to create multiple such objects though - one per `VkDevice` is enough.\n- By default, all calls to functions that take #VmaAllocator as first parameter\n  are safe to call from multiple threads simultaneously because they are\n  synchronized internally when needed.\n  This includes allocation and deallocation from default memory pool, as well as custom #VmaPool.\n- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT\n  flag, calls to functions that take such #VmaAllocator object must be\n  synchronized externally.\n- Access to a #VmaAllocation object must be externally synchronized. For example,\n  you must not call vmaGetAllocationInfo() and vmaMapMemory() from different\n  threads at the same time if you pass the same #VmaAllocation object to these\n  functions.\n- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously.\n\n\\section general_considerations_versioning_and_compatibility Versioning and compatibility\n\nThe library uses [**Semantic Versioning**](https://semver.org/),\nwhich means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where:\n\n- Incremented Patch version means a release is backward- and forward-compatible,\n  introducing only some internal improvements, bug fixes, optimizations etc.\n  or changes that are out of scope of the official API described in this documentation.\n- Incremented Minor version means a release is backward-compatible,\n  so existing code that uses the library should continue to work, while some new\n  symbols could have been added: new structures, functions, new values in existing\n  enums and bit flags, new structure members, but not new function parameters.\n- Incrementing Major version means a release could break some backward compatibility.\n\nAll changes between official releases are documented in file \"CHANGELOG.md\".\n\n\\warning Backward compatibility is considered on the level of C++ source code, not binary linkage.\nAdding new members to existing structures is treated as backward compatible if initializing\nthe new members to binary zero results in the old behavior.\nYou should always fully initialize all library structures to zeros and not rely on their\nexact binary size.\n\n\\section general_considerations_validation_layer_warnings Validation layer warnings\n\nWhen using this library, you can meet following types of warnings issued by\nVulkan validation layer. They don't necessarily indicate a bug, so you may need\nto just ignore them.\n\n- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.*\n  - It happens when VK_KHR_dedicated_allocation extension is enabled.\n    `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it.\n- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.*\n  - It happens when you map a buffer or image, because the library maps entire\n    `VkDeviceMemory` block, where different types of images and buffers may end\n    up together, especially on GPUs with unified memory like Intel.\n- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.*\n  - It may happen when you use [defragmentation](@ref defragmentation).\n\n\\section general_considerations_allocation_algorithm Allocation algorithm\n\nThe library uses following algorithm for allocation, in order:\n\n-# Try to find free range of memory in existing blocks.\n-# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size.\n-# If failed, try to create such block with size / 2, size / 4, size / 8.\n-# If failed, try to allocate separate `VkDeviceMemory` for this allocation,\n   just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.\n-# If failed, choose other memory type that meets the requirements specified in\n   VmaAllocationCreateInfo and go to point 1.\n-# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.\n\n\\section general_considerations_features_not_supported Features not supported\n\nFeatures deliberately excluded from the scope of this library:\n\n-# **Data transfer.** Uploading (streaming) and downloading data of buffers and images\n   between CPU and GPU memory and related synchronization is responsibility of the user.\n   Defining some \"texture\" object that would automatically stream its data from a\n   staging copy in CPU memory to GPU memory would rather be a feature of another,\n   higher-level library implemented on top of VMA.\n   VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory.\n-# **Recreation of buffers and images.** Although the library has functions for\n   buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to\n   recreate these objects yourself after defragmentation. That is because the big\n   structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in\n   #VmaAllocation object.\n-# **Handling CPU memory allocation failures.** When dynamically creating small C++\n   objects in CPU memory (not Vulkan memory), allocation failures are not checked\n   and handled gracefully, because that would complicate code significantly and\n   is usually not needed in desktop PC applications anyway.\n   Success of an allocation is just checked with an assert.\n-# **Code free of any compiler warnings.** Maintaining the library to compile and\n   work correctly on so many different platforms is hard enough. Being free of\n   any warnings, on any version of any compiler, is simply not feasible.\n   There are many preprocessor macros that make some variables unused, function parameters unreferenced,\n   or conditional expressions constant in some configurations.\n   The code of this library should not be bigger or more complicated just to silence these warnings.\n   It is recommended to disable such warnings instead.\n-# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but\n   are not going to be included into this repository.\n*/\n"
  },
  {
    "path": "deps/vulkan-headers/vulkan/vk_platform.h",
    "content": "//\n// File: vk_platform.h\n//\n/*\n** Copyright 2014-2022 The Khronos Group Inc.\n**\n** SPDX-License-Identifier: Apache-2.0\n*/\n\n\n#ifndef VK_PLATFORM_H_\n#define VK_PLATFORM_H_\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif // __cplusplus\n\n/*\n***************************************************************************************************\n*   Platform-specific directives and type declarations\n***************************************************************************************************\n*/\n\n/* Platform-specific calling convention macros.\n *\n * Platforms should define these so that Vulkan clients call Vulkan commands\n * with the same calling conventions that the Vulkan implementation expects.\n *\n * VKAPI_ATTR - Placed before the return type in function declarations.\n *              Useful for C++11 and GCC/Clang-style function attribute syntax.\n * VKAPI_CALL - Placed after the return type in function declarations.\n *              Useful for MSVC-style calling convention syntax.\n * VKAPI_PTR  - Placed between the '(' and '*' in function pointer types.\n *\n * Function declaration:  VKAPI_ATTR void VKAPI_CALL vkCommand(void);\n * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void);\n */\n#if defined(_WIN32)\n    // On Windows, Vulkan commands use the stdcall convention\n    #define VKAPI_ATTR\n    #define VKAPI_CALL __stdcall\n    #define VKAPI_PTR  VKAPI_CALL\n#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7\n    #error \"Vulkan is not supported for the 'armeabi' NDK ABI\"\n#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE)\n    // On Android 32-bit ARM targets, Vulkan functions use the \"hardfloat\"\n    // calling convention, i.e. float parameters are passed in registers. This\n    // is true even if the rest of the application passes floats on the stack,\n    // as it does by default when compiling for the armeabi-v7a NDK ABI.\n    #define VKAPI_ATTR __attribute__((pcs(\"aapcs-vfp\")))\n    #define VKAPI_CALL\n    #define VKAPI_PTR  VKAPI_ATTR\n#else\n    // On other platforms, use the default calling convention\n    #define VKAPI_ATTR\n    #define VKAPI_CALL\n    #define VKAPI_PTR\n#endif\n\n#if !defined(VK_NO_STDDEF_H)\n    #include <stddef.h>\n#endif // !defined(VK_NO_STDDEF_H)\n\n#if !defined(VK_NO_STDINT_H)\n    #if defined(_MSC_VER) && (_MSC_VER < 1600)\n        typedef signed   __int8  int8_t;\n        typedef unsigned __int8  uint8_t;\n        typedef signed   __int16 int16_t;\n        typedef unsigned __int16 uint16_t;\n        typedef signed   __int32 int32_t;\n        typedef unsigned __int32 uint32_t;\n        typedef signed   __int64 int64_t;\n        typedef unsigned __int64 uint64_t;\n    #else\n        #include <stdint.h>\n    #endif\n#endif // !defined(VK_NO_STDINT_H)\n\n#ifdef __cplusplus\n} // extern \"C\"\n#endif // __cplusplus\n\n#endif\n"
  },
  {
    "path": "deps/vulkan-headers/vulkan/vulkan.h",
    "content": "#ifndef VULKAN_H_\n#define VULKAN_H_ 1\n\n/*\n** Copyright 2015-2022 The Khronos Group Inc.\n**\n** SPDX-License-Identifier: Apache-2.0\n*/\n\n#include \"vk_platform.h\"\n#include \"vulkan_core.h\"\n\n#ifdef VK_USE_PLATFORM_ANDROID_KHR\n#include \"vulkan_android.h\"\n#endif\n\n#ifdef VK_USE_PLATFORM_FUCHSIA\n#include <zircon/types.h>\n#include \"vulkan_fuchsia.h\"\n#endif\n\n#ifdef VK_USE_PLATFORM_IOS_MVK\n#include \"vulkan_ios.h\"\n#endif\n\n\n#ifdef VK_USE_PLATFORM_MACOS_MVK\n#include \"vulkan_macos.h\"\n#endif\n\n#ifdef VK_USE_PLATFORM_METAL_EXT\n#include \"vulkan_metal.h\"\n#endif\n\n#ifdef VK_USE_PLATFORM_VI_NN\n#include \"vulkan_vi.h\"\n#endif\n\n\n#ifdef VK_USE_PLATFORM_WAYLAND_KHR\n#include <wayland-client.h>\n#include \"vulkan_wayland.h\"\n#endif\n\n\n#ifdef VK_USE_PLATFORM_WIN32_KHR\n#include <windows.h>\n#include \"vulkan_win32.h\"\n#endif\n\n\n#ifdef VK_USE_PLATFORM_XCB_KHR\n#include <xcb/xcb.h>\n#include \"vulkan_xcb.h\"\n#endif\n\n\n#ifdef VK_USE_PLATFORM_XLIB_KHR\n#include <X11/Xlib.h>\n#include \"vulkan_xlib.h\"\n#endif\n\n\n#ifdef VK_USE_PLATFORM_DIRECTFB_EXT\n#include <directfb.h>\n#include \"vulkan_directfb.h\"\n#endif\n\n\n#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT\n#include <X11/Xlib.h>\n#include <X11/extensions/Xrandr.h>\n#include \"vulkan_xlib_xrandr.h\"\n#endif\n\n\n#ifdef VK_USE_PLATFORM_GGP\n#include <ggp_c/vulkan_types.h>\n#include \"vulkan_ggp.h\"\n#endif\n\n\n#ifdef VK_USE_PLATFORM_SCREEN_QNX\n#include <screen/screen.h>\n#include \"vulkan_screen.h\"\n#endif\n\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n#include \"vulkan_beta.h\"\n#endif\n\n#endif // VULKAN_H_\n"
  },
  {
    "path": "deps/vulkan-headers/vulkan/vulkan_core.h",
    "content": "#ifndef VULKAN_CORE_H_\n#define VULKAN_CORE_H_ 1\n\n/*\n** Copyright 2015-2022 The Khronos Group Inc.\n**\n** SPDX-License-Identifier: Apache-2.0\n*/\n\n/*\n** This header is generated from the Khronos Vulkan XML API Registry.\n**\n*/\n\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n\n\n#define VK_VERSION_1_0 1\n#include \"vk_platform.h\"\n\n#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object;\n\n\n#ifndef VK_USE_64_BIT_PTR_DEFINES\n    #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)\n        #define VK_USE_64_BIT_PTR_DEFINES 1\n    #else\n        #define VK_USE_64_BIT_PTR_DEFINES 0\n    #endif\n#endif\n\n\n#ifndef VK_DEFINE_NON_DISPATCHABLE_HANDLE\n    #if (VK_USE_64_BIT_PTR_DEFINES==1)\n        #if (defined(__cplusplus) && (__cplusplus >= 201103L)) || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201103L))\n            #define VK_NULL_HANDLE nullptr\n        #else\n            #define VK_NULL_HANDLE ((void*)0)\n        #endif\n    #else\n        #define VK_NULL_HANDLE 0ULL\n    #endif\n#endif\n#ifndef VK_NULL_HANDLE\n    #define VK_NULL_HANDLE 0\n#endif\n\n\n#ifndef VK_DEFINE_NON_DISPATCHABLE_HANDLE\n    #if (VK_USE_64_BIT_PTR_DEFINES==1)\n        #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object;\n    #else\n        #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object;\n    #endif\n#endif\n\n// DEPRECATED: This define is deprecated. VK_MAKE_API_VERSION should be used instead.\n#define VK_MAKE_VERSION(major, minor, patch) \\\n    ((((uint32_t)(major)) << 22) | (((uint32_t)(minor)) << 12) | ((uint32_t)(patch)))\n\n// DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead.\n//#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 0) // Patch version should always be set to 0\n\n#define VK_MAKE_API_VERSION(variant, major, minor, patch) \\\n    ((((uint32_t)(variant)) << 29) | (((uint32_t)(major)) << 22) | (((uint32_t)(minor)) << 12) | ((uint32_t)(patch)))\n\n// Vulkan 1.0 version number\n#define VK_API_VERSION_1_0 VK_MAKE_API_VERSION(0, 1, 0, 0)// Patch version should always be set to 0\n\n// Version of this file\n#define VK_HEADER_VERSION 224\n\n// Complete version of this file\n#define VK_HEADER_VERSION_COMPLETE VK_MAKE_API_VERSION(0, 1, 3, VK_HEADER_VERSION)\n\n// DEPRECATED: This define is deprecated. VK_API_VERSION_MAJOR should be used instead.\n#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22)\n\n// DEPRECATED: This define is deprecated. VK_API_VERSION_MINOR should be used instead.\n#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3FFU)\n\n// DEPRECATED: This define is deprecated. VK_API_VERSION_PATCH should be used instead.\n#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xFFFU)\n\n#define VK_API_VERSION_VARIANT(version) ((uint32_t)(version) >> 29)\n#define VK_API_VERSION_MAJOR(version) (((uint32_t)(version) >> 22) & 0x7FU)\n#define VK_API_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3FFU)\n#define VK_API_VERSION_PATCH(version) ((uint32_t)(version) & 0xFFFU)\ntypedef uint32_t VkBool32;\ntypedef uint64_t VkDeviceAddress;\ntypedef uint64_t VkDeviceSize;\ntypedef uint32_t VkFlags;\ntypedef uint32_t VkSampleMask;\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage)\nVK_DEFINE_HANDLE(VkInstance)\nVK_DEFINE_HANDLE(VkPhysicalDevice)\nVK_DEFINE_HANDLE(VkDevice)\nVK_DEFINE_HANDLE(VkQueue)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore)\nVK_DEFINE_HANDLE(VkCommandBuffer)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool)\n#define VK_ATTACHMENT_UNUSED              (~0U)\n#define VK_FALSE                          0U\n#define VK_LOD_CLAMP_NONE                 1000.0F\n#define VK_QUEUE_FAMILY_IGNORED           (~0U)\n#define VK_REMAINING_ARRAY_LAYERS         (~0U)\n#define VK_REMAINING_MIP_LEVELS           (~0U)\n#define VK_SUBPASS_EXTERNAL               (~0U)\n#define VK_TRUE                           1U\n#define VK_WHOLE_SIZE                     (~0ULL)\n#define VK_MAX_MEMORY_TYPES               32U\n#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE  256U\n#define VK_UUID_SIZE                      16U\n#define VK_MAX_EXTENSION_NAME_SIZE        256U\n#define VK_MAX_DESCRIPTION_SIZE           256U\n#define VK_MAX_MEMORY_HEAPS               16U\n\ntypedef enum VkResult {\n    VK_SUCCESS = 0,\n    VK_NOT_READY = 1,\n    VK_TIMEOUT = 2,\n    VK_EVENT_SET = 3,\n    VK_EVENT_RESET = 4,\n    VK_INCOMPLETE = 5,\n    VK_ERROR_OUT_OF_HOST_MEMORY = -1,\n    VK_ERROR_OUT_OF_DEVICE_MEMORY = -2,\n    VK_ERROR_INITIALIZATION_FAILED = -3,\n    VK_ERROR_DEVICE_LOST = -4,\n    VK_ERROR_MEMORY_MAP_FAILED = -5,\n    VK_ERROR_LAYER_NOT_PRESENT = -6,\n    VK_ERROR_EXTENSION_NOT_PRESENT = -7,\n    VK_ERROR_FEATURE_NOT_PRESENT = -8,\n    VK_ERROR_INCOMPATIBLE_DRIVER = -9,\n    VK_ERROR_TOO_MANY_OBJECTS = -10,\n    VK_ERROR_FORMAT_NOT_SUPPORTED = -11,\n    VK_ERROR_FRAGMENTED_POOL = -12,\n    VK_ERROR_UNKNOWN = -13,\n    VK_ERROR_OUT_OF_POOL_MEMORY = -1000069000,\n    VK_ERROR_INVALID_EXTERNAL_HANDLE = -1000072003,\n    VK_ERROR_FRAGMENTATION = -1000161000,\n    VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS = -1000257000,\n    VK_PIPELINE_COMPILE_REQUIRED = 1000297000,\n    VK_ERROR_SURFACE_LOST_KHR = -1000000000,\n    VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001,\n    VK_SUBOPTIMAL_KHR = 1000001003,\n    VK_ERROR_OUT_OF_DATE_KHR = -1000001004,\n    VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001,\n    VK_ERROR_VALIDATION_FAILED_EXT = -1000011001,\n    VK_ERROR_INVALID_SHADER_NV = -1000012000,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_ERROR_IMAGE_USAGE_NOT_SUPPORTED_KHR = -1000023000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_ERROR_VIDEO_PICTURE_LAYOUT_NOT_SUPPORTED_KHR = -1000023001,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_ERROR_VIDEO_PROFILE_OPERATION_NOT_SUPPORTED_KHR = -1000023002,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_ERROR_VIDEO_PROFILE_FORMAT_NOT_SUPPORTED_KHR = -1000023003,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_ERROR_VIDEO_PROFILE_CODEC_NOT_SUPPORTED_KHR = -1000023004,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_ERROR_VIDEO_STD_VERSION_NOT_SUPPORTED_KHR = -1000023005,\n#endif\n    VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT = -1000158000,\n    VK_ERROR_NOT_PERMITTED_KHR = -1000174001,\n    VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT = -1000255000,\n    VK_THREAD_IDLE_KHR = 1000268000,\n    VK_THREAD_DONE_KHR = 1000268001,\n    VK_OPERATION_DEFERRED_KHR = 1000268002,\n    VK_OPERATION_NOT_DEFERRED_KHR = 1000268003,\n    VK_ERROR_COMPRESSION_EXHAUSTED_EXT = -1000338000,\n    VK_ERROR_OUT_OF_POOL_MEMORY_KHR = VK_ERROR_OUT_OF_POOL_MEMORY,\n    VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = VK_ERROR_INVALID_EXTERNAL_HANDLE,\n    VK_ERROR_FRAGMENTATION_EXT = VK_ERROR_FRAGMENTATION,\n    VK_ERROR_NOT_PERMITTED_EXT = VK_ERROR_NOT_PERMITTED_KHR,\n    VK_ERROR_INVALID_DEVICE_ADDRESS_EXT = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS,\n    VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS,\n    VK_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED,\n    VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED,\n    VK_RESULT_MAX_ENUM = 0x7FFFFFFF\n} VkResult;\n\ntypedef enum VkStructureType {\n    VK_STRUCTURE_TYPE_APPLICATION_INFO = 0,\n    VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1,\n    VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2,\n    VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3,\n    VK_STRUCTURE_TYPE_SUBMIT_INFO = 4,\n    VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5,\n    VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6,\n    VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7,\n    VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8,\n    VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9,\n    VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10,\n    VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11,\n    VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12,\n    VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13,\n    VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14,\n    VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15,\n    VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16,\n    VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17,\n    VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18,\n    VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19,\n    VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20,\n    VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21,\n    VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22,\n    VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23,\n    VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24,\n    VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25,\n    VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26,\n    VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27,\n    VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28,\n    VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29,\n    VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30,\n    VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34,\n    VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35,\n    VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36,\n    VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37,\n    VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38,\n    VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39,\n    VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40,\n    VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41,\n    VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42,\n    VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43,\n    VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44,\n    VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45,\n    VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46,\n    VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47,\n    VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES = 1000094000,\n    VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO = 1000157000,\n    VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO = 1000157001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES = 1000083000,\n    VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS = 1000127000,\n    VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO = 1000127001,\n    VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO = 1000060000,\n    VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO = 1000060003,\n    VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO = 1000060004,\n    VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO = 1000060005,\n    VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO = 1000060006,\n    VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO = 1000060013,\n    VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO = 1000060014,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES = 1000070000,\n    VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO = 1000070001,\n    VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2 = 1000146000,\n    VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2 = 1000146001,\n    VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2 = 1000146002,\n    VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2 = 1000146003,\n    VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2 = 1000146004,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 = 1000059000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 1000059001,\n    VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2 = 1000059002,\n    VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2 = 1000059003,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2 = 1000059004,\n    VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2 = 1000059005,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2 = 1000059006,\n    VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2 = 1000059007,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2 = 1000059008,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES = 1000117000,\n    VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO = 1000117001,\n    VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO = 1000117002,\n    VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO = 1000117003,\n    VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO = 1000053000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES = 1000053001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES = 1000053002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES = 1000120000,\n    VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO = 1000145000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES = 1000145001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES = 1000145002,\n    VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2 = 1000145003,\n    VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO = 1000156000,\n    VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO = 1000156001,\n    VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO = 1000156002,\n    VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO = 1000156003,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES = 1000156004,\n    VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES = 1000156005,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO = 1000085000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO = 1000071000,\n    VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES = 1000071001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO = 1000071002,\n    VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES = 1000071003,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES = 1000071004,\n    VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO = 1000072000,\n    VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO = 1000072001,\n    VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO = 1000072002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO = 1000112000,\n    VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES = 1000112001,\n    VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO = 1000113000,\n    VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO = 1000077000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO = 1000076000,\n    VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES = 1000076001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 1000168000,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT = 1000168001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES = 1000063000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES = 49,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES = 50,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES = 51,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES = 52,\n    VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO = 1000147000,\n    VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2 = 1000109000,\n    VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2 = 1000109001,\n    VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2 = 1000109002,\n    VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2 = 1000109003,\n    VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2 = 1000109004,\n    VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO = 1000109005,\n    VK_STRUCTURE_TYPE_SUBPASS_END_INFO = 1000109006,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES = 1000177000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES = 1000196000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES = 1000180000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES = 1000082000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES = 1000197000,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO = 1000161000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES = 1000161001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES = 1000161002,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO = 1000161003,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT = 1000161004,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES = 1000199000,\n    VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE = 1000199001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES = 1000221000,\n    VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO = 1000246000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES = 1000130000,\n    VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO = 1000130001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES = 1000211000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES = 1000108000,\n    VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO = 1000108001,\n    VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO = 1000108002,\n    VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO = 1000108003,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES = 1000253000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES = 1000175000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES = 1000241000,\n    VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT = 1000241001,\n    VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT = 1000241002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES = 1000261000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES = 1000207000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES = 1000207001,\n    VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO = 1000207002,\n    VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO = 1000207003,\n    VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO = 1000207004,\n    VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO = 1000207005,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES = 1000257000,\n    VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO = 1000244001,\n    VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO = 1000257002,\n    VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO = 1000257003,\n    VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO = 1000257004,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES = 53,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES = 54,\n    VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO = 1000192000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES = 1000215000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES = 1000245000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES = 1000276000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES = 1000295000,\n    VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO = 1000295001,\n    VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO = 1000295002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES = 1000297000,\n    VK_STRUCTURE_TYPE_MEMORY_BARRIER_2 = 1000314000,\n    VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2 = 1000314001,\n    VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2 = 1000314002,\n    VK_STRUCTURE_TYPE_DEPENDENCY_INFO = 1000314003,\n    VK_STRUCTURE_TYPE_SUBMIT_INFO_2 = 1000314004,\n    VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO = 1000314005,\n    VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO = 1000314006,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES = 1000314007,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES = 1000325000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES = 1000335000,\n    VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2 = 1000337000,\n    VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2 = 1000337001,\n    VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2 = 1000337002,\n    VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2 = 1000337003,\n    VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2 = 1000337004,\n    VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2 = 1000337005,\n    VK_STRUCTURE_TYPE_BUFFER_COPY_2 = 1000337006,\n    VK_STRUCTURE_TYPE_IMAGE_COPY_2 = 1000337007,\n    VK_STRUCTURE_TYPE_IMAGE_BLIT_2 = 1000337008,\n    VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2 = 1000337009,\n    VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2 = 1000337010,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES = 1000225000,\n    VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO = 1000225001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES = 1000225002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES = 1000138000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES = 1000138001,\n    VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK = 1000138002,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO = 1000138003,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES = 1000066000,\n    VK_STRUCTURE_TYPE_RENDERING_INFO = 1000044000,\n    VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO = 1000044001,\n    VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO = 1000044002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES = 1000044003,\n    VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO = 1000044004,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES = 1000280000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES = 1000280001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES = 1000281001,\n    VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3 = 1000360000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES = 1000413000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES = 1000413001,\n    VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS = 1000413002,\n    VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS = 1000413003,\n    VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000,\n    VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001,\n    VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007,\n    VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR = 1000060008,\n    VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR = 1000060009,\n    VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR = 1000060010,\n    VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR = 1000060011,\n    VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR = 1000060012,\n    VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000,\n    VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001,\n    VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000,\n    VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000,\n    VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000,\n    VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000,\n    VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000,\n    VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000,\n    VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000,\n    VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD = 1000018000,\n    VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT = 1000022000,\n    VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001,\n    VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_PROFILE_KHR = 1000023000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_CAPABILITIES_KHR = 1000023001,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_KHR = 1000023002,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_GET_MEMORY_PROPERTIES_KHR = 1000023003,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_BIND_MEMORY_KHR = 1000023004,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_SESSION_CREATE_INFO_KHR = 1000023005,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_CREATE_INFO_KHR = 1000023006,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_UPDATE_INFO_KHR = 1000023007,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_BEGIN_CODING_INFO_KHR = 1000023008,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_END_CODING_INFO_KHR = 1000023009,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_CODING_CONTROL_INFO_KHR = 1000023010,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_REFERENCE_SLOT_KHR = 1000023011,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_QUEUE_FAMILY_PROPERTIES_2_KHR = 1000023012,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_PROFILES_KHR = 1000023013,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_FORMAT_INFO_KHR = 1000023014,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_FORMAT_PROPERTIES_KHR = 1000023015,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_QUEUE_FAMILY_QUERY_RESULT_STATUS_PROPERTIES_2_KHR = 1000023016,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_INFO_KHR = 1000024000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_CAPABILITIES_KHR = 1000024001,\n#endif\n    VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000,\n    VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001,\n    VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT = 1000028000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT = 1000028001,\n    VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT = 1000028002,\n    VK_STRUCTURE_TYPE_CU_MODULE_CREATE_INFO_NVX = 1000029000,\n    VK_STRUCTURE_TYPE_CU_FUNCTION_CREATE_INFO_NVX = 1000029001,\n    VK_STRUCTURE_TYPE_CU_LAUNCH_INFO_NVX = 1000029002,\n    VK_STRUCTURE_TYPE_IMAGE_VIEW_HANDLE_INFO_NVX = 1000030000,\n    VK_STRUCTURE_TYPE_IMAGE_VIEW_ADDRESS_PROPERTIES_NVX = 1000030001,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_CAPABILITIES_EXT = 1000038000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000038001,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT = 1000038002,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_VCL_FRAME_INFO_EXT = 1000038003,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_DPB_SLOT_INFO_EXT = 1000038004,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_NALU_SLICE_EXT = 1000038005,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_EMIT_PICTURE_PARAMETERS_EXT = 1000038006,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_PROFILE_EXT = 1000038007,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_RATE_CONTROL_INFO_EXT = 1000038008,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_RATE_CONTROL_LAYER_INFO_EXT = 1000038009,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_REFERENCE_LISTS_EXT = 1000038010,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_CAPABILITIES_EXT = 1000039000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000039001,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_ADD_INFO_EXT = 1000039002,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_VCL_FRAME_INFO_EXT = 1000039003,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_DPB_SLOT_INFO_EXT = 1000039004,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_NALU_SLICE_SEGMENT_EXT = 1000039005,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_EMIT_PICTURE_PARAMETERS_EXT = 1000039006,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_PROFILE_EXT = 1000039007,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_REFERENCE_LISTS_EXT = 1000039008,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_RATE_CONTROL_INFO_EXT = 1000039009,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_RATE_CONTROL_LAYER_INFO_EXT = 1000039010,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_CAPABILITIES_EXT = 1000040000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PICTURE_INFO_EXT = 1000040001,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_MVC_EXT = 1000040002,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PROFILE_EXT = 1000040003,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000040004,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT = 1000040005,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_DPB_SLOT_INFO_EXT = 1000040006,\n#endif\n    VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD = 1000041000,\n    VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR = 1000044006,\n    VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_INFO_EXT = 1000044007,\n    VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_AMD = 1000044008,\n    VK_STRUCTURE_TYPE_MULTIVIEW_PER_VIEW_ATTRIBUTES_INFO_NVX = 1000044009,\n    VK_STRUCTURE_TYPE_STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP = 1000049000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV = 1000050000,\n    VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000,\n    VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV = 1000056001,\n    VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057000,\n    VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057001,\n    VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000,\n    VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000,\n    VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000,\n    VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT = 1000067000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT = 1000067001,\n    VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT = 1000068000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT = 1000068001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES_EXT = 1000068002,\n    VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000,\n    VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001,\n    VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002,\n    VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR = 1000073003,\n    VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR = 1000074000,\n    VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR = 1000074001,\n    VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR = 1000074002,\n    VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR = 1000075000,\n    VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078000,\n    VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078001,\n    VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR = 1000078002,\n    VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR = 1000078003,\n    VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR = 1000079000,\n    VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR = 1000079001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000,\n    VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT = 1000081000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT = 1000081001,\n    VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT = 1000081002,\n    VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000,\n    VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV = 1000087000,\n    VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT = 1000090000,\n    VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT = 1000091000,\n    VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT = 1000091001,\n    VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT = 1000091002,\n    VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT = 1000091003,\n    VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX = 1000097000,\n    VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV = 1000098000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT = 1000099000,\n    VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT = 1000099001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT = 1000101000,\n    VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT = 1000101001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT = 1000102000,\n    VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT = 1000102001,\n    VK_STRUCTURE_TYPE_HDR_METADATA_EXT = 1000105000,\n    VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR = 1000111000,\n    VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114000,\n    VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114001,\n    VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR = 1000114002,\n    VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR = 1000115000,\n    VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR = 1000115001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR = 1000116000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR = 1000116001,\n    VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR = 1000116002,\n    VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR = 1000116003,\n    VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR = 1000116004,\n    VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR = 1000116005,\n    VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_DESCRIPTION_KHR = 1000116006,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR = 1000119000,\n    VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR = 1000119001,\n    VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR = 1000119002,\n    VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR = 1000121000,\n    VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR = 1000121001,\n    VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR = 1000121002,\n    VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR = 1000121003,\n    VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR = 1000121004,\n    VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK = 1000122000,\n    VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000,\n    VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT = 1000128000,\n    VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT = 1000128001,\n    VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT = 1000128002,\n    VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT = 1000128003,\n    VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT = 1000128004,\n    VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID = 1000129000,\n    VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID = 1000129001,\n    VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID = 1000129002,\n    VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129003,\n    VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129004,\n    VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID = 1000129005,\n    VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_2_ANDROID = 1000129006,\n    VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT = 1000143000,\n    VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT = 1000143001,\n    VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT = 1000143002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT = 1000143003,\n    VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT = 1000143004,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT = 1000148000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT = 1000148001,\n    VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT = 1000148002,\n    VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV = 1000149000,\n    VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR = 1000150007,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR = 1000150000,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR = 1000150002,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR = 1000150003,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR = 1000150004,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR = 1000150005,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR = 1000150006,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR = 1000150009,\n    VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR = 1000150010,\n    VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR = 1000150011,\n    VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR = 1000150012,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR = 1000150013,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR = 1000150014,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR = 1000150017,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR = 1000150020,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR = 1000347000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR = 1000347001,\n    VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR = 1000150015,\n    VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR = 1000150016,\n    VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR = 1000150018,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR = 1000348013,\n    VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV = 1000152000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV = 1000154000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV = 1000154001,\n    VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT = 1000158000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT = 1000158002,\n    VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT = 1000158003,\n    VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT = 1000158004,\n    VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT = 1000158005,\n    VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_2_EXT = 1000158006,\n    VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160000,\n    VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160001,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR = 1000163000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR = 1000163001,\n#endif\n    VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV = 1000164000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV = 1000164001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV = 1000164002,\n    VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV = 1000164005,\n    VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001,\n    VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003,\n    VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004,\n    VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005,\n    VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006,\n    VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009,\n    VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV = 1000166000,\n    VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV = 1000166001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT = 1000170000,\n    VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT = 1000170001,\n    VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT = 1000178000,\n    VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT = 1000178001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT = 1000178002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR = 1000181000,\n    VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD = 1000183000,\n    VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT = 1000184000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_CAPABILITIES_EXT = 1000187000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000187001,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_ADD_INFO_EXT = 1000187002,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PROFILE_EXT = 1000187003,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PICTURE_INFO_EXT = 1000187004,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_DPB_SLOT_INFO_EXT = 1000187005,\n#endif\n    VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR = 1000174000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR = 1000388000,\n    VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR = 1000388001,\n    VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000,\n    VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = 1000190002,\n    VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP = 1000191000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV = 1000201000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV = 1000202000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV = 1000202001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV = 1000204000,\n    VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV = 1000205000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV = 1000205002,\n    VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV = 1000206000,\n    VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV = 1000206001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL = 1000209000,\n    VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL = 1000210000,\n    VK_STRUCTURE_TYPE_INITIALIZE_PERFORMANCE_API_INFO_INTEL = 1000210001,\n    VK_STRUCTURE_TYPE_PERFORMANCE_MARKER_INFO_INTEL = 1000210002,\n    VK_STRUCTURE_TYPE_PERFORMANCE_STREAM_MARKER_INFO_INTEL = 1000210003,\n    VK_STRUCTURE_TYPE_PERFORMANCE_OVERRIDE_INFO_INTEL = 1000210004,\n    VK_STRUCTURE_TYPE_PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL = 1000210005,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT = 1000212000,\n    VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD = 1000213000,\n    VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD = 1000213001,\n    VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA = 1000214000,\n    VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT = 1000217000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT = 1000218000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT = 1000218001,\n    VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT = 1000218002,\n    VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR = 1000226000,\n    VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR = 1000226001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR = 1000226002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR = 1000226003,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR = 1000226004,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD = 1000227000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD = 1000229000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT = 1000234000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT = 1000237000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT = 1000238000,\n    VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT = 1000238001,\n    VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR = 1000239000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV = 1000240000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT = 1000244000,\n    VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT = 1000244002,\n    VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT = 1000247000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_WAIT_FEATURES_KHR = 1000248000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV = 1000249000,\n    VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV = 1000250000,\n    VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV = 1000250001,\n    VK_STRUCTURE_TYPE_FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV = 1000250002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT = 1000251000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT = 1000252000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT = 1000254000,\n    VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT = 1000254001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT = 1000254002,\n    VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT = 1000255000,\n    VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT = 1000255002,\n    VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT = 1000255001,\n    VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT = 1000256000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT = 1000259000,\n    VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT = 1000259001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT = 1000259002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT = 1000260000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT = 1000265000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT = 1000267000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR = 1000269000,\n    VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR = 1000269001,\n    VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR = 1000269002,\n    VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR = 1000269003,\n    VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR = 1000269004,\n    VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR = 1000269005,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_2_FEATURES_EXT = 1000273000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV = 1000277000,\n    VK_STRUCTURE_TYPE_GRAPHICS_SHADER_GROUP_CREATE_INFO_NV = 1000277001,\n    VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV = 1000277002,\n    VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_TOKEN_NV = 1000277003,\n    VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV = 1000277004,\n    VK_STRUCTURE_TYPE_GENERATED_COMMANDS_INFO_NV = 1000277005,\n    VK_STRUCTURE_TYPE_GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV = 1000277006,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV = 1000277007,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INHERITED_VIEWPORT_SCISSOR_FEATURES_NV = 1000278000,\n    VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_VIEWPORT_SCISSOR_INFO_NV = 1000278001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT = 1000281000,\n    VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM = 1000282000,\n    VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM = 1000282001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT = 1000284000,\n    VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT = 1000284001,\n    VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT = 1000284002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT = 1000286000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT = 1000286001,\n    VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT = 1000287000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT = 1000287001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT = 1000287002,\n    VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR = 1000290000,\n    VK_STRUCTURE_TYPE_PRESENT_ID_KHR = 1000294000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_ID_FEATURES_KHR = 1000294001,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_INFO_KHR = 1000299000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_INFO_KHR = 1000299001,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_LAYER_INFO_KHR = 1000299002,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_STRUCTURE_TYPE_VIDEO_ENCODE_CAPABILITIES_KHR = 1000299003,\n#endif\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV = 1000300000,\n    VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV = 1000300001,\n    VK_STRUCTURE_TYPE_EXPORT_METAL_OBJECT_CREATE_INFO_EXT = 1000311000,\n    VK_STRUCTURE_TYPE_EXPORT_METAL_OBJECTS_INFO_EXT = 1000311001,\n    VK_STRUCTURE_TYPE_EXPORT_METAL_DEVICE_INFO_EXT = 1000311002,\n    VK_STRUCTURE_TYPE_EXPORT_METAL_COMMAND_QUEUE_INFO_EXT = 1000311003,\n    VK_STRUCTURE_TYPE_EXPORT_METAL_BUFFER_INFO_EXT = 1000311004,\n    VK_STRUCTURE_TYPE_IMPORT_METAL_BUFFER_INFO_EXT = 1000311005,\n    VK_STRUCTURE_TYPE_EXPORT_METAL_TEXTURE_INFO_EXT = 1000311006,\n    VK_STRUCTURE_TYPE_IMPORT_METAL_TEXTURE_INFO_EXT = 1000311007,\n    VK_STRUCTURE_TYPE_EXPORT_METAL_IO_SURFACE_INFO_EXT = 1000311008,\n    VK_STRUCTURE_TYPE_IMPORT_METAL_IO_SURFACE_INFO_EXT = 1000311009,\n    VK_STRUCTURE_TYPE_EXPORT_METAL_SHARED_EVENT_INFO_EXT = 1000311010,\n    VK_STRUCTURE_TYPE_IMPORT_METAL_SHARED_EVENT_INFO_EXT = 1000311011,\n    VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_2_NV = 1000314008,\n    VK_STRUCTURE_TYPE_CHECKPOINT_DATA_2_NV = 1000314009,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_FEATURES_EXT = 1000320000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_PROPERTIES_EXT = 1000320001,\n    VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT = 1000320002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_FEATURES_AMD = 1000321000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR = 1000203000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_PROPERTIES_KHR = 1000322000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_FEATURES_KHR = 1000323000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV = 1000326000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV = 1000326001,\n    VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV = 1000326002,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_MOTION_TRIANGLES_DATA_NV = 1000327000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MOTION_BLUR_FEATURES_NV = 1000327001,\n    VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MOTION_INFO_NV = 1000327002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT = 1000330000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT = 1000332000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT = 1000332001,\n    VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM = 1000333000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_FEATURES_KHR = 1000336000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_FEATURES_EXT = 1000338000,\n    VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_CONTROL_EXT = 1000338001,\n    VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_EXT = 1000338002,\n    VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_EXT = 1000338003,\n    VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_PROPERTIES_EXT = 1000338004,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_FEATURES_EXT = 1000339000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT = 1000340000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_ARM = 1000342000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RGBA10X6_FORMATS_FEATURES_EXT = 1000344000,\n    VK_STRUCTURE_TYPE_DIRECTFB_SURFACE_CREATE_INFO_EXT = 1000346000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE = 1000351000,\n    VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE = 1000351002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_INPUT_DYNAMIC_STATE_FEATURES_EXT = 1000352000,\n    VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT = 1000352001,\n    VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT = 1000352002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT = 1000353000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT = 1000355000,\n    VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT = 1000355001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT = 1000356000,\n    VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA = 1000364000,\n    VK_STRUCTURE_TYPE_MEMORY_ZIRCON_HANDLE_PROPERTIES_FUCHSIA = 1000364001,\n    VK_STRUCTURE_TYPE_MEMORY_GET_ZIRCON_HANDLE_INFO_FUCHSIA = 1000364002,\n    VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_ZIRCON_HANDLE_INFO_FUCHSIA = 1000365000,\n    VK_STRUCTURE_TYPE_SEMAPHORE_GET_ZIRCON_HANDLE_INFO_FUCHSIA = 1000365001,\n    VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA = 1000366000,\n    VK_STRUCTURE_TYPE_IMPORT_MEMORY_BUFFER_COLLECTION_FUCHSIA = 1000366001,\n    VK_STRUCTURE_TYPE_BUFFER_COLLECTION_IMAGE_CREATE_INFO_FUCHSIA = 1000366002,\n    VK_STRUCTURE_TYPE_BUFFER_COLLECTION_PROPERTIES_FUCHSIA = 1000366003,\n    VK_STRUCTURE_TYPE_BUFFER_CONSTRAINTS_INFO_FUCHSIA = 1000366004,\n    VK_STRUCTURE_TYPE_BUFFER_COLLECTION_BUFFER_CREATE_INFO_FUCHSIA = 1000366005,\n    VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA = 1000366006,\n    VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA = 1000366007,\n    VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA = 1000366008,\n    VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA = 1000366009,\n    VK_STRUCTURE_TYPE_SUBPASS_SHADING_PIPELINE_CREATE_INFO_HUAWEI = 1000369000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_FEATURES_HUAWEI = 1000369001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_PROPERTIES_HUAWEI = 1000369002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INVOCATION_MASK_FEATURES_HUAWEI = 1000370000,\n    VK_STRUCTURE_TYPE_MEMORY_GET_REMOTE_ADDRESS_INFO_NV = 1000371000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_RDMA_FEATURES_NV = 1000371001,\n    VK_STRUCTURE_TYPE_PIPELINE_PROPERTIES_IDENTIFIER_EXT = 1000372000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROPERTIES_FEATURES_EXT = 1000372001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_FEATURES_EXT = 1000376000,\n    VK_STRUCTURE_TYPE_SUBPASS_RESOLVE_PERFORMANCE_QUERY_EXT = 1000376001,\n    VK_STRUCTURE_TYPE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_INFO_EXT = 1000376002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT = 1000377000,\n    VK_STRUCTURE_TYPE_SCREEN_SURFACE_CREATE_INFO_QNX = 1000378000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COLOR_WRITE_ENABLE_FEATURES_EXT = 1000381000,\n    VK_STRUCTURE_TYPE_PIPELINE_COLOR_WRITE_CREATE_INFO_EXT = 1000381001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT = 1000382000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MAINTENANCE_1_FEATURES_KHR = 1000386000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT = 1000391000,\n    VK_STRUCTURE_TYPE_IMAGE_VIEW_MIN_LOD_CREATE_INFO_EXT = 1000391001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT = 1000392000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_PROPERTIES_EXT = 1000392001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_2D_VIEW_OF_3D_FEATURES_EXT = 1000393000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BORDER_COLOR_SWIZZLE_FEATURES_EXT = 1000411000,\n    VK_STRUCTURE_TYPE_SAMPLER_BORDER_COLOR_COMPONENT_MAPPING_CREATE_INFO_EXT = 1000411001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT = 1000412000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_SET_HOST_MAPPING_FEATURES_VALVE = 1000420000,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_BINDING_REFERENCE_VALVE = 1000420001,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_HOST_MAPPING_INFO_VALVE = 1000420002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NON_SEAMLESS_CUBE_MAP_FEATURES_EXT = 1000422000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_QCOM = 1000425000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_PROPERTIES_QCOM = 1000425001,\n    VK_STRUCTURE_TYPE_SUBPASS_FRAGMENT_DENSITY_MAP_OFFSET_END_INFO_QCOM = 1000425002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINEAR_COLOR_ATTACHMENT_FEATURES_NV = 1000430000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_FEATURES_EXT = 1000437000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_FEATURES_QCOM = 1000440000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_PROPERTIES_QCOM = 1000440001,\n    VK_STRUCTURE_TYPE_IMAGE_VIEW_SAMPLE_WEIGHT_CREATE_INFO_QCOM = 1000440002,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_MERGE_FEEDBACK_FEATURES_EXT = 1000458000,\n    VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_CONTROL_EXT = 1000458001,\n    VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_FEEDBACK_CREATE_INFO_EXT = 1000458002,\n    VK_STRUCTURE_TYPE_RENDER_PASS_SUBPASS_FEEDBACK_CREATE_INFO_EXT = 1000458003,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_FEATURES_EXT = 1000462000,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_PROPERTIES_EXT = 1000462001,\n    VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT = 1000462002,\n    VK_STRUCTURE_TYPE_SHADER_MODULE_IDENTIFIER_EXT = 1000462003,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_PROPERTIES_FEATURES_QCOM = 1000484000,\n    VK_STRUCTURE_TYPE_TILE_PROPERTIES_QCOM = 1000484001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_AMIGO_PROFILING_FEATURES_SEC = 1000485000,\n    VK_STRUCTURE_TYPE_AMIGO_PROFILING_SUBMIT_INFO_SEC = 1000485001,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES,\n    VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT,\n    VK_STRUCTURE_TYPE_RENDERING_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_INFO,\n    VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,\n    VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES,\n    VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO_KHR = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO,\n    VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_NV = VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_AMD,\n    VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,\n    VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2,\n    VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,\n    VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2,\n    VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2,\n    VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO,\n    VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO,\n    VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO,\n    VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO,\n    VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO,\n    VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO,\n    VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES,\n    VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO,\n    VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO,\n    VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES,\n    VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,\n    VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,\n    VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,\n    VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,\n    VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO,\n    VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES,\n    VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO,\n    VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO,\n    VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO,\n    VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2,\n    VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2,\n    VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2,\n    VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2,\n    VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2,\n    VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO,\n    VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_END_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO,\n    VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES,\n    VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES,\n    VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO,\n    VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR,\n    VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS,\n    VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES,\n    VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES,\n    VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO,\n    VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,\n    VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,\n    VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2,\n    VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,\n    VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2,\n    VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO,\n    VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO,\n    VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO,\n    VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO,\n    VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES,\n    VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES,\n    VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,\n    VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES,\n    VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT,\n    VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES,\n    VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES,\n    VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES,\n    VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,\n    VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,\n    VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,\n    VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO,\n    VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES,\n    VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES,\n    VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT,\n    VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT,\n    VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_EXT = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES,\n    VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES,\n    VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO,\n    VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO,\n    VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO,\n    VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES,\n    VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES,\n    VK_STRUCTURE_TYPE_MEMORY_BARRIER_2_KHR = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,\n    VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2,\n    VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n    VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,\n    VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR = VK_STRUCTURE_TYPE_SUBMIT_INFO_2,\n    VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,\n    VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES,\n    VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2,\n    VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2,\n    VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2,\n    VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2,\n    VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2,\n    VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2,\n    VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR = VK_STRUCTURE_TYPE_BUFFER_COPY_2,\n    VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR = VK_STRUCTURE_TYPE_IMAGE_COPY_2,\n    VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR = VK_STRUCTURE_TYPE_IMAGE_BLIT_2,\n    VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR = VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2,\n    VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2,\n    VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3,\n    VK_STRUCTURE_TYPE_PIPELINE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR,\n    VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES,\n    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES,\n    VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS,\n    VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS,\n    VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF\n} VkStructureType;\n\ntypedef enum VkPipelineCacheHeaderVersion {\n    VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1,\n    VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF\n} VkPipelineCacheHeaderVersion;\n\ntypedef enum VkImageLayout {\n    VK_IMAGE_LAYOUT_UNDEFINED = 0,\n    VK_IMAGE_LAYOUT_GENERAL = 1,\n    VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2,\n    VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3,\n    VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4,\n    VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5,\n    VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6,\n    VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7,\n    VK_IMAGE_LAYOUT_PREINITIALIZED = 8,\n    VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL = 1000117000,\n    VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL = 1000117001,\n    VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL = 1000241000,\n    VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL = 1000241001,\n    VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL = 1000241002,\n    VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL = 1000241003,\n    VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL = 1000314000,\n    VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL = 1000314001,\n    VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR = 1000024000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_IMAGE_LAYOUT_VIDEO_DECODE_SRC_KHR = 1000024001,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_IMAGE_LAYOUT_VIDEO_DECODE_DPB_KHR = 1000024002,\n#endif\n    VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR = 1000111000,\n    VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT = 1000218000,\n    VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR = 1000164003,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_IMAGE_LAYOUT_VIDEO_ENCODE_DST_KHR = 1000299000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_IMAGE_LAYOUT_VIDEO_ENCODE_SRC_KHR = 1000299001,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_IMAGE_LAYOUT_VIDEO_ENCODE_DPB_KHR = 1000299002,\n#endif\n    VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT = 1000339000,\n    VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL,\n    VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL,\n    VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV = VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR,\n    VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL,\n    VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL,\n    VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL,\n    VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL,\n    VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL,\n    VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL,\n    VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF\n} VkImageLayout;\n\ntypedef enum VkObjectType {\n    VK_OBJECT_TYPE_UNKNOWN = 0,\n    VK_OBJECT_TYPE_INSTANCE = 1,\n    VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2,\n    VK_OBJECT_TYPE_DEVICE = 3,\n    VK_OBJECT_TYPE_QUEUE = 4,\n    VK_OBJECT_TYPE_SEMAPHORE = 5,\n    VK_OBJECT_TYPE_COMMAND_BUFFER = 6,\n    VK_OBJECT_TYPE_FENCE = 7,\n    VK_OBJECT_TYPE_DEVICE_MEMORY = 8,\n    VK_OBJECT_TYPE_BUFFER = 9,\n    VK_OBJECT_TYPE_IMAGE = 10,\n    VK_OBJECT_TYPE_EVENT = 11,\n    VK_OBJECT_TYPE_QUERY_POOL = 12,\n    VK_OBJECT_TYPE_BUFFER_VIEW = 13,\n    VK_OBJECT_TYPE_IMAGE_VIEW = 14,\n    VK_OBJECT_TYPE_SHADER_MODULE = 15,\n    VK_OBJECT_TYPE_PIPELINE_CACHE = 16,\n    VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17,\n    VK_OBJECT_TYPE_RENDER_PASS = 18,\n    VK_OBJECT_TYPE_PIPELINE = 19,\n    VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20,\n    VK_OBJECT_TYPE_SAMPLER = 21,\n    VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22,\n    VK_OBJECT_TYPE_DESCRIPTOR_SET = 23,\n    VK_OBJECT_TYPE_FRAMEBUFFER = 24,\n    VK_OBJECT_TYPE_COMMAND_POOL = 25,\n    VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION = 1000156000,\n    VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE = 1000085000,\n    VK_OBJECT_TYPE_PRIVATE_DATA_SLOT = 1000295000,\n    VK_OBJECT_TYPE_SURFACE_KHR = 1000000000,\n    VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000,\n    VK_OBJECT_TYPE_DISPLAY_KHR = 1000002000,\n    VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1000002001,\n    VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_OBJECT_TYPE_VIDEO_SESSION_KHR = 1000023000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_OBJECT_TYPE_VIDEO_SESSION_PARAMETERS_KHR = 1000023001,\n#endif\n    VK_OBJECT_TYPE_CU_MODULE_NVX = 1000029000,\n    VK_OBJECT_TYPE_CU_FUNCTION_NVX = 1000029001,\n    VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000128000,\n    VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000,\n    VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000,\n    VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000,\n    VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL = 1000210000,\n    VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR = 1000268000,\n    VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV = 1000277000,\n    VK_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA = 1000366000,\n    VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE,\n    VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION,\n    VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT = VK_OBJECT_TYPE_PRIVATE_DATA_SLOT,\n    VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF\n} VkObjectType;\n\ntypedef enum VkVendorId {\n    VK_VENDOR_ID_VIV = 0x10001,\n    VK_VENDOR_ID_VSI = 0x10002,\n    VK_VENDOR_ID_KAZAN = 0x10003,\n    VK_VENDOR_ID_CODEPLAY = 0x10004,\n    VK_VENDOR_ID_MESA = 0x10005,\n    VK_VENDOR_ID_POCL = 0x10006,\n    VK_VENDOR_ID_MAX_ENUM = 0x7FFFFFFF\n} VkVendorId;\n\ntypedef enum VkSystemAllocationScope {\n    VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0,\n    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1,\n    VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2,\n    VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3,\n    VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4,\n    VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF\n} VkSystemAllocationScope;\n\ntypedef enum VkInternalAllocationType {\n    VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0,\n    VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF\n} VkInternalAllocationType;\n\ntypedef enum VkFormat {\n    VK_FORMAT_UNDEFINED = 0,\n    VK_FORMAT_R4G4_UNORM_PACK8 = 1,\n    VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2,\n    VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3,\n    VK_FORMAT_R5G6B5_UNORM_PACK16 = 4,\n    VK_FORMAT_B5G6R5_UNORM_PACK16 = 5,\n    VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6,\n    VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7,\n    VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8,\n    VK_FORMAT_R8_UNORM = 9,\n    VK_FORMAT_R8_SNORM = 10,\n    VK_FORMAT_R8_USCALED = 11,\n    VK_FORMAT_R8_SSCALED = 12,\n    VK_FORMAT_R8_UINT = 13,\n    VK_FORMAT_R8_SINT = 14,\n    VK_FORMAT_R8_SRGB = 15,\n    VK_FORMAT_R8G8_UNORM = 16,\n    VK_FORMAT_R8G8_SNORM = 17,\n    VK_FORMAT_R8G8_USCALED = 18,\n    VK_FORMAT_R8G8_SSCALED = 19,\n    VK_FORMAT_R8G8_UINT = 20,\n    VK_FORMAT_R8G8_SINT = 21,\n    VK_FORMAT_R8G8_SRGB = 22,\n    VK_FORMAT_R8G8B8_UNORM = 23,\n    VK_FORMAT_R8G8B8_SNORM = 24,\n    VK_FORMAT_R8G8B8_USCALED = 25,\n    VK_FORMAT_R8G8B8_SSCALED = 26,\n    VK_FORMAT_R8G8B8_UINT = 27,\n    VK_FORMAT_R8G8B8_SINT = 28,\n    VK_FORMAT_R8G8B8_SRGB = 29,\n    VK_FORMAT_B8G8R8_UNORM = 30,\n    VK_FORMAT_B8G8R8_SNORM = 31,\n    VK_FORMAT_B8G8R8_USCALED = 32,\n    VK_FORMAT_B8G8R8_SSCALED = 33,\n    VK_FORMAT_B8G8R8_UINT = 34,\n    VK_FORMAT_B8G8R8_SINT = 35,\n    VK_FORMAT_B8G8R8_SRGB = 36,\n    VK_FORMAT_R8G8B8A8_UNORM = 37,\n    VK_FORMAT_R8G8B8A8_SNORM = 38,\n    VK_FORMAT_R8G8B8A8_USCALED = 39,\n    VK_FORMAT_R8G8B8A8_SSCALED = 40,\n    VK_FORMAT_R8G8B8A8_UINT = 41,\n    VK_FORMAT_R8G8B8A8_SINT = 42,\n    VK_FORMAT_R8G8B8A8_SRGB = 43,\n    VK_FORMAT_B8G8R8A8_UNORM = 44,\n    VK_FORMAT_B8G8R8A8_SNORM = 45,\n    VK_FORMAT_B8G8R8A8_USCALED = 46,\n    VK_FORMAT_B8G8R8A8_SSCALED = 47,\n    VK_FORMAT_B8G8R8A8_UINT = 48,\n    VK_FORMAT_B8G8R8A8_SINT = 49,\n    VK_FORMAT_B8G8R8A8_SRGB = 50,\n    VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51,\n    VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52,\n    VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53,\n    VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54,\n    VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55,\n    VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56,\n    VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57,\n    VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58,\n    VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59,\n    VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60,\n    VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61,\n    VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62,\n    VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63,\n    VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64,\n    VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65,\n    VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66,\n    VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67,\n    VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68,\n    VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69,\n    VK_FORMAT_R16_UNORM = 70,\n    VK_FORMAT_R16_SNORM = 71,\n    VK_FORMAT_R16_USCALED = 72,\n    VK_FORMAT_R16_SSCALED = 73,\n    VK_FORMAT_R16_UINT = 74,\n    VK_FORMAT_R16_SINT = 75,\n    VK_FORMAT_R16_SFLOAT = 76,\n    VK_FORMAT_R16G16_UNORM = 77,\n    VK_FORMAT_R16G16_SNORM = 78,\n    VK_FORMAT_R16G16_USCALED = 79,\n    VK_FORMAT_R16G16_SSCALED = 80,\n    VK_FORMAT_R16G16_UINT = 81,\n    VK_FORMAT_R16G16_SINT = 82,\n    VK_FORMAT_R16G16_SFLOAT = 83,\n    VK_FORMAT_R16G16B16_UNORM = 84,\n    VK_FORMAT_R16G16B16_SNORM = 85,\n    VK_FORMAT_R16G16B16_USCALED = 86,\n    VK_FORMAT_R16G16B16_SSCALED = 87,\n    VK_FORMAT_R16G16B16_UINT = 88,\n    VK_FORMAT_R16G16B16_SINT = 89,\n    VK_FORMAT_R16G16B16_SFLOAT = 90,\n    VK_FORMAT_R16G16B16A16_UNORM = 91,\n    VK_FORMAT_R16G16B16A16_SNORM = 92,\n    VK_FORMAT_R16G16B16A16_USCALED = 93,\n    VK_FORMAT_R16G16B16A16_SSCALED = 94,\n    VK_FORMAT_R16G16B16A16_UINT = 95,\n    VK_FORMAT_R16G16B16A16_SINT = 96,\n    VK_FORMAT_R16G16B16A16_SFLOAT = 97,\n    VK_FORMAT_R32_UINT = 98,\n    VK_FORMAT_R32_SINT = 99,\n    VK_FORMAT_R32_SFLOAT = 100,\n    VK_FORMAT_R32G32_UINT = 101,\n    VK_FORMAT_R32G32_SINT = 102,\n    VK_FORMAT_R32G32_SFLOAT = 103,\n    VK_FORMAT_R32G32B32_UINT = 104,\n    VK_FORMAT_R32G32B32_SINT = 105,\n    VK_FORMAT_R32G32B32_SFLOAT = 106,\n    VK_FORMAT_R32G32B32A32_UINT = 107,\n    VK_FORMAT_R32G32B32A32_SINT = 108,\n    VK_FORMAT_R32G32B32A32_SFLOAT = 109,\n    VK_FORMAT_R64_UINT = 110,\n    VK_FORMAT_R64_SINT = 111,\n    VK_FORMAT_R64_SFLOAT = 112,\n    VK_FORMAT_R64G64_UINT = 113,\n    VK_FORMAT_R64G64_SINT = 114,\n    VK_FORMAT_R64G64_SFLOAT = 115,\n    VK_FORMAT_R64G64B64_UINT = 116,\n    VK_FORMAT_R64G64B64_SINT = 117,\n    VK_FORMAT_R64G64B64_SFLOAT = 118,\n    VK_FORMAT_R64G64B64A64_UINT = 119,\n    VK_FORMAT_R64G64B64A64_SINT = 120,\n    VK_FORMAT_R64G64B64A64_SFLOAT = 121,\n    VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122,\n    VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123,\n    VK_FORMAT_D16_UNORM = 124,\n    VK_FORMAT_X8_D24_UNORM_PACK32 = 125,\n    VK_FORMAT_D32_SFLOAT = 126,\n    VK_FORMAT_S8_UINT = 127,\n    VK_FORMAT_D16_UNORM_S8_UINT = 128,\n    VK_FORMAT_D24_UNORM_S8_UINT = 129,\n    VK_FORMAT_D32_SFLOAT_S8_UINT = 130,\n    VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131,\n    VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132,\n    VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133,\n    VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134,\n    VK_FORMAT_BC2_UNORM_BLOCK = 135,\n    VK_FORMAT_BC2_SRGB_BLOCK = 136,\n    VK_FORMAT_BC3_UNORM_BLOCK = 137,\n    VK_FORMAT_BC3_SRGB_BLOCK = 138,\n    VK_FORMAT_BC4_UNORM_BLOCK = 139,\n    VK_FORMAT_BC4_SNORM_BLOCK = 140,\n    VK_FORMAT_BC5_UNORM_BLOCK = 141,\n    VK_FORMAT_BC5_SNORM_BLOCK = 142,\n    VK_FORMAT_BC6H_UFLOAT_BLOCK = 143,\n    VK_FORMAT_BC6H_SFLOAT_BLOCK = 144,\n    VK_FORMAT_BC7_UNORM_BLOCK = 145,\n    VK_FORMAT_BC7_SRGB_BLOCK = 146,\n    VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147,\n    VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148,\n    VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149,\n    VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150,\n    VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151,\n    VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152,\n    VK_FORMAT_EAC_R11_UNORM_BLOCK = 153,\n    VK_FORMAT_EAC_R11_SNORM_BLOCK = 154,\n    VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155,\n    VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156,\n    VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157,\n    VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158,\n    VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159,\n    VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160,\n    VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161,\n    VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162,\n    VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163,\n    VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164,\n    VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165,\n    VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166,\n    VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167,\n    VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168,\n    VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169,\n    VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170,\n    VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171,\n    VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172,\n    VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173,\n    VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174,\n    VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175,\n    VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176,\n    VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177,\n    VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178,\n    VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179,\n    VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180,\n    VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181,\n    VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182,\n    VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183,\n    VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184,\n    VK_FORMAT_G8B8G8R8_422_UNORM = 1000156000,\n    VK_FORMAT_B8G8R8G8_422_UNORM = 1000156001,\n    VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM = 1000156002,\n    VK_FORMAT_G8_B8R8_2PLANE_420_UNORM = 1000156003,\n    VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM = 1000156004,\n    VK_FORMAT_G8_B8R8_2PLANE_422_UNORM = 1000156005,\n    VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM = 1000156006,\n    VK_FORMAT_R10X6_UNORM_PACK16 = 1000156007,\n    VK_FORMAT_R10X6G10X6_UNORM_2PACK16 = 1000156008,\n    VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009,\n    VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010,\n    VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011,\n    VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012,\n    VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013,\n    VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014,\n    VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015,\n    VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016,\n    VK_FORMAT_R12X4_UNORM_PACK16 = 1000156017,\n    VK_FORMAT_R12X4G12X4_UNORM_2PACK16 = 1000156018,\n    VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019,\n    VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020,\n    VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021,\n    VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022,\n    VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023,\n    VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024,\n    VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025,\n    VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026,\n    VK_FORMAT_G16B16G16R16_422_UNORM = 1000156027,\n    VK_FORMAT_B16G16R16G16_422_UNORM = 1000156028,\n    VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM = 1000156029,\n    VK_FORMAT_G16_B16R16_2PLANE_420_UNORM = 1000156030,\n    VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM = 1000156031,\n    VK_FORMAT_G16_B16R16_2PLANE_422_UNORM = 1000156032,\n    VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM = 1000156033,\n    VK_FORMAT_G8_B8R8_2PLANE_444_UNORM = 1000330000,\n    VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16 = 1000330001,\n    VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16 = 1000330002,\n    VK_FORMAT_G16_B16R16_2PLANE_444_UNORM = 1000330003,\n    VK_FORMAT_A4R4G4B4_UNORM_PACK16 = 1000340000,\n    VK_FORMAT_A4B4G4R4_UNORM_PACK16 = 1000340001,\n    VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK = 1000066000,\n    VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK = 1000066001,\n    VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK = 1000066002,\n    VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK = 1000066003,\n    VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK = 1000066004,\n    VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK = 1000066005,\n    VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK = 1000066006,\n    VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK = 1000066007,\n    VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK = 1000066008,\n    VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK = 1000066009,\n    VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK = 1000066010,\n    VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK = 1000066011,\n    VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK = 1000066012,\n    VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK = 1000066013,\n    VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000,\n    VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001,\n    VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002,\n    VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003,\n    VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004,\n    VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005,\n    VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006,\n    VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007,\n    VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK,\n    VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK,\n    VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK,\n    VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK,\n    VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK,\n    VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK,\n    VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK,\n    VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK,\n    VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK,\n    VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK,\n    VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK,\n    VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK,\n    VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK,\n    VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK,\n    VK_FORMAT_G8B8G8R8_422_UNORM_KHR = VK_FORMAT_G8B8G8R8_422_UNORM,\n    VK_FORMAT_B8G8R8G8_422_UNORM_KHR = VK_FORMAT_B8G8R8G8_422_UNORM,\n    VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM,\n    VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM,\n    VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM,\n    VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM,\n    VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM,\n    VK_FORMAT_R10X6_UNORM_PACK16_KHR = VK_FORMAT_R10X6_UNORM_PACK16,\n    VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR = VK_FORMAT_R10X6G10X6_UNORM_2PACK16,\n    VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16,\n    VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16,\n    VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16,\n    VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16,\n    VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16,\n    VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16,\n    VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16,\n    VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16,\n    VK_FORMAT_R12X4_UNORM_PACK16_KHR = VK_FORMAT_R12X4_UNORM_PACK16,\n    VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR = VK_FORMAT_R12X4G12X4_UNORM_2PACK16,\n    VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16,\n    VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16,\n    VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16,\n    VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16,\n    VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16,\n    VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16,\n    VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16,\n    VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16,\n    VK_FORMAT_G16B16G16R16_422_UNORM_KHR = VK_FORMAT_G16B16G16R16_422_UNORM,\n    VK_FORMAT_B16G16R16G16_422_UNORM_KHR = VK_FORMAT_B16G16R16G16_422_UNORM,\n    VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM,\n    VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM,\n    VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM,\n    VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM,\n    VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM,\n    VK_FORMAT_G8_B8R8_2PLANE_444_UNORM_EXT = VK_FORMAT_G8_B8R8_2PLANE_444_UNORM,\n    VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16_EXT = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16,\n    VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16_EXT = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16,\n    VK_FORMAT_G16_B16R16_2PLANE_444_UNORM_EXT = VK_FORMAT_G16_B16R16_2PLANE_444_UNORM,\n    VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT = VK_FORMAT_A4R4G4B4_UNORM_PACK16,\n    VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT = VK_FORMAT_A4B4G4R4_UNORM_PACK16,\n    VK_FORMAT_MAX_ENUM = 0x7FFFFFFF\n} VkFormat;\n\ntypedef enum VkImageTiling {\n    VK_IMAGE_TILING_OPTIMAL = 0,\n    VK_IMAGE_TILING_LINEAR = 1,\n    VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT = 1000158000,\n    VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF\n} VkImageTiling;\n\ntypedef enum VkImageType {\n    VK_IMAGE_TYPE_1D = 0,\n    VK_IMAGE_TYPE_2D = 1,\n    VK_IMAGE_TYPE_3D = 2,\n    VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF\n} VkImageType;\n\ntypedef enum VkPhysicalDeviceType {\n    VK_PHYSICAL_DEVICE_TYPE_OTHER = 0,\n    VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1,\n    VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2,\n    VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3,\n    VK_PHYSICAL_DEVICE_TYPE_CPU = 4,\n    VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF\n} VkPhysicalDeviceType;\n\ntypedef enum VkQueryType {\n    VK_QUERY_TYPE_OCCLUSION = 0,\n    VK_QUERY_TYPE_PIPELINE_STATISTICS = 1,\n    VK_QUERY_TYPE_TIMESTAMP = 2,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR = 1000023000,\n#endif\n    VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT = 1000028004,\n    VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR = 1000116000,\n    VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR = 1000150000,\n    VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR = 1000150001,\n    VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = 1000165000,\n    VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL = 1000210000,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_QUERY_TYPE_VIDEO_ENCODE_BITSTREAM_BUFFER_RANGE_KHR = 1000299000,\n#endif\n    VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT = 1000382000,\n    VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR = 1000386000,\n    VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR = 1000386001,\n    VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF\n} VkQueryType;\n\ntypedef enum VkSharingMode {\n    VK_SHARING_MODE_EXCLUSIVE = 0,\n    VK_SHARING_MODE_CONCURRENT = 1,\n    VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF\n} VkSharingMode;\n\ntypedef enum VkComponentSwizzle {\n    VK_COMPONENT_SWIZZLE_IDENTITY = 0,\n    VK_COMPONENT_SWIZZLE_ZERO = 1,\n    VK_COMPONENT_SWIZZLE_ONE = 2,\n    VK_COMPONENT_SWIZZLE_R = 3,\n    VK_COMPONENT_SWIZZLE_G = 4,\n    VK_COMPONENT_SWIZZLE_B = 5,\n    VK_COMPONENT_SWIZZLE_A = 6,\n    VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF\n} VkComponentSwizzle;\n\ntypedef enum VkImageViewType {\n    VK_IMAGE_VIEW_TYPE_1D = 0,\n    VK_IMAGE_VIEW_TYPE_2D = 1,\n    VK_IMAGE_VIEW_TYPE_3D = 2,\n    VK_IMAGE_VIEW_TYPE_CUBE = 3,\n    VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4,\n    VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5,\n    VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6,\n    VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF\n} VkImageViewType;\n\ntypedef enum VkBlendFactor {\n    VK_BLEND_FACTOR_ZERO = 0,\n    VK_BLEND_FACTOR_ONE = 1,\n    VK_BLEND_FACTOR_SRC_COLOR = 2,\n    VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3,\n    VK_BLEND_FACTOR_DST_COLOR = 4,\n    VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5,\n    VK_BLEND_FACTOR_SRC_ALPHA = 6,\n    VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7,\n    VK_BLEND_FACTOR_DST_ALPHA = 8,\n    VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9,\n    VK_BLEND_FACTOR_CONSTANT_COLOR = 10,\n    VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11,\n    VK_BLEND_FACTOR_CONSTANT_ALPHA = 12,\n    VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13,\n    VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14,\n    VK_BLEND_FACTOR_SRC1_COLOR = 15,\n    VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16,\n    VK_BLEND_FACTOR_SRC1_ALPHA = 17,\n    VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18,\n    VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF\n} VkBlendFactor;\n\ntypedef enum VkBlendOp {\n    VK_BLEND_OP_ADD = 0,\n    VK_BLEND_OP_SUBTRACT = 1,\n    VK_BLEND_OP_REVERSE_SUBTRACT = 2,\n    VK_BLEND_OP_MIN = 3,\n    VK_BLEND_OP_MAX = 4,\n    VK_BLEND_OP_ZERO_EXT = 1000148000,\n    VK_BLEND_OP_SRC_EXT = 1000148001,\n    VK_BLEND_OP_DST_EXT = 1000148002,\n    VK_BLEND_OP_SRC_OVER_EXT = 1000148003,\n    VK_BLEND_OP_DST_OVER_EXT = 1000148004,\n    VK_BLEND_OP_SRC_IN_EXT = 1000148005,\n    VK_BLEND_OP_DST_IN_EXT = 1000148006,\n    VK_BLEND_OP_SRC_OUT_EXT = 1000148007,\n    VK_BLEND_OP_DST_OUT_EXT = 1000148008,\n    VK_BLEND_OP_SRC_ATOP_EXT = 1000148009,\n    VK_BLEND_OP_DST_ATOP_EXT = 1000148010,\n    VK_BLEND_OP_XOR_EXT = 1000148011,\n    VK_BLEND_OP_MULTIPLY_EXT = 1000148012,\n    VK_BLEND_OP_SCREEN_EXT = 1000148013,\n    VK_BLEND_OP_OVERLAY_EXT = 1000148014,\n    VK_BLEND_OP_DARKEN_EXT = 1000148015,\n    VK_BLEND_OP_LIGHTEN_EXT = 1000148016,\n    VK_BLEND_OP_COLORDODGE_EXT = 1000148017,\n    VK_BLEND_OP_COLORBURN_EXT = 1000148018,\n    VK_BLEND_OP_HARDLIGHT_EXT = 1000148019,\n    VK_BLEND_OP_SOFTLIGHT_EXT = 1000148020,\n    VK_BLEND_OP_DIFFERENCE_EXT = 1000148021,\n    VK_BLEND_OP_EXCLUSION_EXT = 1000148022,\n    VK_BLEND_OP_INVERT_EXT = 1000148023,\n    VK_BLEND_OP_INVERT_RGB_EXT = 1000148024,\n    VK_BLEND_OP_LINEARDODGE_EXT = 1000148025,\n    VK_BLEND_OP_LINEARBURN_EXT = 1000148026,\n    VK_BLEND_OP_VIVIDLIGHT_EXT = 1000148027,\n    VK_BLEND_OP_LINEARLIGHT_EXT = 1000148028,\n    VK_BLEND_OP_PINLIGHT_EXT = 1000148029,\n    VK_BLEND_OP_HARDMIX_EXT = 1000148030,\n    VK_BLEND_OP_HSL_HUE_EXT = 1000148031,\n    VK_BLEND_OP_HSL_SATURATION_EXT = 1000148032,\n    VK_BLEND_OP_HSL_COLOR_EXT = 1000148033,\n    VK_BLEND_OP_HSL_LUMINOSITY_EXT = 1000148034,\n    VK_BLEND_OP_PLUS_EXT = 1000148035,\n    VK_BLEND_OP_PLUS_CLAMPED_EXT = 1000148036,\n    VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT = 1000148037,\n    VK_BLEND_OP_PLUS_DARKER_EXT = 1000148038,\n    VK_BLEND_OP_MINUS_EXT = 1000148039,\n    VK_BLEND_OP_MINUS_CLAMPED_EXT = 1000148040,\n    VK_BLEND_OP_CONTRAST_EXT = 1000148041,\n    VK_BLEND_OP_INVERT_OVG_EXT = 1000148042,\n    VK_BLEND_OP_RED_EXT = 1000148043,\n    VK_BLEND_OP_GREEN_EXT = 1000148044,\n    VK_BLEND_OP_BLUE_EXT = 1000148045,\n    VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF\n} VkBlendOp;\n\ntypedef enum VkCompareOp {\n    VK_COMPARE_OP_NEVER = 0,\n    VK_COMPARE_OP_LESS = 1,\n    VK_COMPARE_OP_EQUAL = 2,\n    VK_COMPARE_OP_LESS_OR_EQUAL = 3,\n    VK_COMPARE_OP_GREATER = 4,\n    VK_COMPARE_OP_NOT_EQUAL = 5,\n    VK_COMPARE_OP_GREATER_OR_EQUAL = 6,\n    VK_COMPARE_OP_ALWAYS = 7,\n    VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF\n} VkCompareOp;\n\ntypedef enum VkDynamicState {\n    VK_DYNAMIC_STATE_VIEWPORT = 0,\n    VK_DYNAMIC_STATE_SCISSOR = 1,\n    VK_DYNAMIC_STATE_LINE_WIDTH = 2,\n    VK_DYNAMIC_STATE_DEPTH_BIAS = 3,\n    VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4,\n    VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5,\n    VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6,\n    VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7,\n    VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8,\n    VK_DYNAMIC_STATE_CULL_MODE = 1000267000,\n    VK_DYNAMIC_STATE_FRONT_FACE = 1000267001,\n    VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY = 1000267002,\n    VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT = 1000267003,\n    VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT = 1000267004,\n    VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE = 1000267005,\n    VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE = 1000267006,\n    VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE = 1000267007,\n    VK_DYNAMIC_STATE_DEPTH_COMPARE_OP = 1000267008,\n    VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE = 1000267009,\n    VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE = 1000267010,\n    VK_DYNAMIC_STATE_STENCIL_OP = 1000267011,\n    VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE = 1000377001,\n    VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE = 1000377002,\n    VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE = 1000377004,\n    VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000,\n    VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000,\n    VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1000143000,\n    VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR = 1000347000,\n    VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV = 1000164004,\n    VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV = 1000164006,\n    VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV = 1000205001,\n    VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR = 1000226000,\n    VK_DYNAMIC_STATE_LINE_STIPPLE_EXT = 1000259000,\n    VK_DYNAMIC_STATE_VERTEX_INPUT_EXT = 1000352000,\n    VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT = 1000377000,\n    VK_DYNAMIC_STATE_LOGIC_OP_EXT = 1000377003,\n    VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT = 1000381000,\n    VK_DYNAMIC_STATE_CULL_MODE_EXT = VK_DYNAMIC_STATE_CULL_MODE,\n    VK_DYNAMIC_STATE_FRONT_FACE_EXT = VK_DYNAMIC_STATE_FRONT_FACE,\n    VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT = VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY,\n    VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT = VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT,\n    VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT = VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT,\n    VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT = VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE,\n    VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE,\n    VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE,\n    VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT = VK_DYNAMIC_STATE_DEPTH_COMPARE_OP,\n    VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE,\n    VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT = VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE,\n    VK_DYNAMIC_STATE_STENCIL_OP_EXT = VK_DYNAMIC_STATE_STENCIL_OP,\n    VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT = VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE,\n    VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE,\n    VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT = VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE,\n    VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF\n} VkDynamicState;\n\ntypedef enum VkFrontFace {\n    VK_FRONT_FACE_COUNTER_CLOCKWISE = 0,\n    VK_FRONT_FACE_CLOCKWISE = 1,\n    VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF\n} VkFrontFace;\n\ntypedef enum VkVertexInputRate {\n    VK_VERTEX_INPUT_RATE_VERTEX = 0,\n    VK_VERTEX_INPUT_RATE_INSTANCE = 1,\n    VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF\n} VkVertexInputRate;\n\ntypedef enum VkPrimitiveTopology {\n    VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0,\n    VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1,\n    VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2,\n    VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3,\n    VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4,\n    VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5,\n    VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6,\n    VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7,\n    VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8,\n    VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9,\n    VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10,\n    VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF\n} VkPrimitiveTopology;\n\ntypedef enum VkPolygonMode {\n    VK_POLYGON_MODE_FILL = 0,\n    VK_POLYGON_MODE_LINE = 1,\n    VK_POLYGON_MODE_POINT = 2,\n    VK_POLYGON_MODE_FILL_RECTANGLE_NV = 1000153000,\n    VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF\n} VkPolygonMode;\n\ntypedef enum VkStencilOp {\n    VK_STENCIL_OP_KEEP = 0,\n    VK_STENCIL_OP_ZERO = 1,\n    VK_STENCIL_OP_REPLACE = 2,\n    VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3,\n    VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4,\n    VK_STENCIL_OP_INVERT = 5,\n    VK_STENCIL_OP_INCREMENT_AND_WRAP = 6,\n    VK_STENCIL_OP_DECREMENT_AND_WRAP = 7,\n    VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF\n} VkStencilOp;\n\ntypedef enum VkLogicOp {\n    VK_LOGIC_OP_CLEAR = 0,\n    VK_LOGIC_OP_AND = 1,\n    VK_LOGIC_OP_AND_REVERSE = 2,\n    VK_LOGIC_OP_COPY = 3,\n    VK_LOGIC_OP_AND_INVERTED = 4,\n    VK_LOGIC_OP_NO_OP = 5,\n    VK_LOGIC_OP_XOR = 6,\n    VK_LOGIC_OP_OR = 7,\n    VK_LOGIC_OP_NOR = 8,\n    VK_LOGIC_OP_EQUIVALENT = 9,\n    VK_LOGIC_OP_INVERT = 10,\n    VK_LOGIC_OP_OR_REVERSE = 11,\n    VK_LOGIC_OP_COPY_INVERTED = 12,\n    VK_LOGIC_OP_OR_INVERTED = 13,\n    VK_LOGIC_OP_NAND = 14,\n    VK_LOGIC_OP_SET = 15,\n    VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF\n} VkLogicOp;\n\ntypedef enum VkBorderColor {\n    VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0,\n    VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1,\n    VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2,\n    VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3,\n    VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4,\n    VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5,\n    VK_BORDER_COLOR_FLOAT_CUSTOM_EXT = 1000287003,\n    VK_BORDER_COLOR_INT_CUSTOM_EXT = 1000287004,\n    VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF\n} VkBorderColor;\n\ntypedef enum VkFilter {\n    VK_FILTER_NEAREST = 0,\n    VK_FILTER_LINEAR = 1,\n    VK_FILTER_CUBIC_EXT = 1000015000,\n    VK_FILTER_CUBIC_IMG = VK_FILTER_CUBIC_EXT,\n    VK_FILTER_MAX_ENUM = 0x7FFFFFFF\n} VkFilter;\n\ntypedef enum VkSamplerAddressMode {\n    VK_SAMPLER_ADDRESS_MODE_REPEAT = 0,\n    VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1,\n    VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2,\n    VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3,\n    VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4,\n    VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE,\n    VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF\n} VkSamplerAddressMode;\n\ntypedef enum VkSamplerMipmapMode {\n    VK_SAMPLER_MIPMAP_MODE_NEAREST = 0,\n    VK_SAMPLER_MIPMAP_MODE_LINEAR = 1,\n    VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF\n} VkSamplerMipmapMode;\n\ntypedef enum VkDescriptorType {\n    VK_DESCRIPTOR_TYPE_SAMPLER = 0,\n    VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1,\n    VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2,\n    VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3,\n    VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4,\n    VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5,\n    VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6,\n    VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7,\n    VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8,\n    VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9,\n    VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10,\n    VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK = 1000138000,\n    VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000,\n    VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000,\n    VK_DESCRIPTOR_TYPE_MUTABLE_VALVE = 1000351000,\n    VK_DESCRIPTOR_TYPE_SAMPLE_WEIGHT_IMAGE_QCOM = 1000440000,\n    VK_DESCRIPTOR_TYPE_BLOCK_MATCH_IMAGE_QCOM = 1000440001,\n    VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK,\n    VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF\n} VkDescriptorType;\n\ntypedef enum VkAttachmentLoadOp {\n    VK_ATTACHMENT_LOAD_OP_LOAD = 0,\n    VK_ATTACHMENT_LOAD_OP_CLEAR = 1,\n    VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2,\n    VK_ATTACHMENT_LOAD_OP_NONE_EXT = 1000400000,\n    VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF\n} VkAttachmentLoadOp;\n\ntypedef enum VkAttachmentStoreOp {\n    VK_ATTACHMENT_STORE_OP_STORE = 0,\n    VK_ATTACHMENT_STORE_OP_DONT_CARE = 1,\n    VK_ATTACHMENT_STORE_OP_NONE = 1000301000,\n    VK_ATTACHMENT_STORE_OP_NONE_KHR = VK_ATTACHMENT_STORE_OP_NONE,\n    VK_ATTACHMENT_STORE_OP_NONE_QCOM = VK_ATTACHMENT_STORE_OP_NONE,\n    VK_ATTACHMENT_STORE_OP_NONE_EXT = VK_ATTACHMENT_STORE_OP_NONE,\n    VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF\n} VkAttachmentStoreOp;\n\ntypedef enum VkPipelineBindPoint {\n    VK_PIPELINE_BIND_POINT_GRAPHICS = 0,\n    VK_PIPELINE_BIND_POINT_COMPUTE = 1,\n    VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR = 1000165000,\n    VK_PIPELINE_BIND_POINT_SUBPASS_SHADING_HUAWEI = 1000369003,\n    VK_PIPELINE_BIND_POINT_RAY_TRACING_NV = VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR,\n    VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF\n} VkPipelineBindPoint;\n\ntypedef enum VkCommandBufferLevel {\n    VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0,\n    VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1,\n    VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF\n} VkCommandBufferLevel;\n\ntypedef enum VkIndexType {\n    VK_INDEX_TYPE_UINT16 = 0,\n    VK_INDEX_TYPE_UINT32 = 1,\n    VK_INDEX_TYPE_NONE_KHR = 1000165000,\n    VK_INDEX_TYPE_UINT8_EXT = 1000265000,\n    VK_INDEX_TYPE_NONE_NV = VK_INDEX_TYPE_NONE_KHR,\n    VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF\n} VkIndexType;\n\ntypedef enum VkSubpassContents {\n    VK_SUBPASS_CONTENTS_INLINE = 0,\n    VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1,\n    VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF\n} VkSubpassContents;\n\ntypedef enum VkAccessFlagBits {\n    VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001,\n    VK_ACCESS_INDEX_READ_BIT = 0x00000002,\n    VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004,\n    VK_ACCESS_UNIFORM_READ_BIT = 0x00000008,\n    VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010,\n    VK_ACCESS_SHADER_READ_BIT = 0x00000020,\n    VK_ACCESS_SHADER_WRITE_BIT = 0x00000040,\n    VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080,\n    VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100,\n    VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200,\n    VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400,\n    VK_ACCESS_TRANSFER_READ_BIT = 0x00000800,\n    VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000,\n    VK_ACCESS_HOST_READ_BIT = 0x00002000,\n    VK_ACCESS_HOST_WRITE_BIT = 0x00004000,\n    VK_ACCESS_MEMORY_READ_BIT = 0x00008000,\n    VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000,\n    VK_ACCESS_NONE = 0,\n    VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000,\n    VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000,\n    VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000,\n    VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000,\n    VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000,\n    VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x00200000,\n    VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x00400000,\n    VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x01000000,\n    VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR = 0x00800000,\n    VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV = 0x00020000,\n    VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV = 0x00040000,\n    VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR,\n    VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR,\n    VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR,\n    VK_ACCESS_NONE_KHR = VK_ACCESS_NONE,\n    VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkAccessFlagBits;\ntypedef VkFlags VkAccessFlags;\n\ntypedef enum VkImageAspectFlagBits {\n    VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001,\n    VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002,\n    VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004,\n    VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008,\n    VK_IMAGE_ASPECT_PLANE_0_BIT = 0x00000010,\n    VK_IMAGE_ASPECT_PLANE_1_BIT = 0x00000020,\n    VK_IMAGE_ASPECT_PLANE_2_BIT = 0x00000040,\n    VK_IMAGE_ASPECT_NONE = 0,\n    VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT = 0x00000080,\n    VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT = 0x00000100,\n    VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT = 0x00000200,\n    VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT = 0x00000400,\n    VK_IMAGE_ASPECT_PLANE_0_BIT_KHR = VK_IMAGE_ASPECT_PLANE_0_BIT,\n    VK_IMAGE_ASPECT_PLANE_1_BIT_KHR = VK_IMAGE_ASPECT_PLANE_1_BIT,\n    VK_IMAGE_ASPECT_PLANE_2_BIT_KHR = VK_IMAGE_ASPECT_PLANE_2_BIT,\n    VK_IMAGE_ASPECT_NONE_KHR = VK_IMAGE_ASPECT_NONE,\n    VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkImageAspectFlagBits;\ntypedef VkFlags VkImageAspectFlags;\n\ntypedef enum VkFormatFeatureFlagBits {\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001,\n    VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002,\n    VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004,\n    VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008,\n    VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010,\n    VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020,\n    VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040,\n    VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080,\n    VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100,\n    VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200,\n    VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400,\n    VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800,\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000,\n    VK_FORMAT_FEATURE_TRANSFER_SRC_BIT = 0x00004000,\n    VK_FORMAT_FEATURE_TRANSFER_DST_BIT = 0x00008000,\n    VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000,\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000,\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000,\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000,\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000,\n    VK_FORMAT_FEATURE_DISJOINT_BIT = 0x00400000,\n    VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT = 0x00800000,\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT = 0x00010000,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_FORMAT_FEATURE_VIDEO_DECODE_OUTPUT_BIT_KHR = 0x02000000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_FORMAT_FEATURE_VIDEO_DECODE_DPB_BIT_KHR = 0x04000000,\n#endif\n    VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR = 0x20000000,\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = 0x00002000,\n    VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x01000000,\n    VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x40000000,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_FORMAT_FEATURE_VIDEO_ENCODE_INPUT_BIT_KHR = 0x08000000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_FORMAT_FEATURE_VIDEO_ENCODE_DPB_BIT_KHR = 0x10000000,\n#endif\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT,\n    VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT,\n    VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT,\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT,\n    VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT,\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT,\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT,\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT,\n    VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT,\n    VK_FORMAT_FEATURE_DISJOINT_BIT_KHR = VK_FORMAT_FEATURE_DISJOINT_BIT,\n    VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT,\n    VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkFormatFeatureFlagBits;\ntypedef VkFlags VkFormatFeatureFlags;\n\ntypedef enum VkImageCreateFlagBits {\n    VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001,\n    VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,\n    VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004,\n    VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008,\n    VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010,\n    VK_IMAGE_CREATE_ALIAS_BIT = 0x00000400,\n    VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT = 0x00000040,\n    VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT = 0x00000020,\n    VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT = 0x00000080,\n    VK_IMAGE_CREATE_EXTENDED_USAGE_BIT = 0x00000100,\n    VK_IMAGE_CREATE_PROTECTED_BIT = 0x00000800,\n    VK_IMAGE_CREATE_DISJOINT_BIT = 0x00000200,\n    VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV = 0x00002000,\n    VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT = 0x00001000,\n    VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT = 0x00004000,\n    VK_IMAGE_CREATE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_BIT_EXT = 0x00040000,\n    VK_IMAGE_CREATE_2D_VIEW_COMPATIBLE_BIT_EXT = 0x00020000,\n    VK_IMAGE_CREATE_FRAGMENT_DENSITY_MAP_OFFSET_BIT_QCOM = 0x00008000,\n    VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT,\n    VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT,\n    VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT,\n    VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT,\n    VK_IMAGE_CREATE_DISJOINT_BIT_KHR = VK_IMAGE_CREATE_DISJOINT_BIT,\n    VK_IMAGE_CREATE_ALIAS_BIT_KHR = VK_IMAGE_CREATE_ALIAS_BIT,\n    VK_IMAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkImageCreateFlagBits;\ntypedef VkFlags VkImageCreateFlags;\n\ntypedef enum VkSampleCountFlagBits {\n    VK_SAMPLE_COUNT_1_BIT = 0x00000001,\n    VK_SAMPLE_COUNT_2_BIT = 0x00000002,\n    VK_SAMPLE_COUNT_4_BIT = 0x00000004,\n    VK_SAMPLE_COUNT_8_BIT = 0x00000008,\n    VK_SAMPLE_COUNT_16_BIT = 0x00000010,\n    VK_SAMPLE_COUNT_32_BIT = 0x00000020,\n    VK_SAMPLE_COUNT_64_BIT = 0x00000040,\n    VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkSampleCountFlagBits;\ntypedef VkFlags VkSampleCountFlags;\n\ntypedef enum VkImageUsageFlagBits {\n    VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001,\n    VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002,\n    VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004,\n    VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008,\n    VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010,\n    VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020,\n    VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040,\n    VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_IMAGE_USAGE_VIDEO_DECODE_DST_BIT_KHR = 0x00000400,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_IMAGE_USAGE_VIDEO_DECODE_SRC_BIT_KHR = 0x00000800,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_IMAGE_USAGE_VIDEO_DECODE_DPB_BIT_KHR = 0x00001000,\n#endif\n    VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x00000200,\n    VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00000100,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_IMAGE_USAGE_VIDEO_ENCODE_DST_BIT_KHR = 0x00002000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_IMAGE_USAGE_VIDEO_ENCODE_SRC_BIT_KHR = 0x00004000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR = 0x00008000,\n#endif\n    VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x00080000,\n    VK_IMAGE_USAGE_INVOCATION_MASK_BIT_HUAWEI = 0x00040000,\n    VK_IMAGE_USAGE_SAMPLE_WEIGHT_BIT_QCOM = 0x00100000,\n    VK_IMAGE_USAGE_SAMPLE_BLOCK_MATCH_BIT_QCOM = 0x00200000,\n    VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV = VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,\n    VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkImageUsageFlagBits;\ntypedef VkFlags VkImageUsageFlags;\n\ntypedef enum VkInstanceCreateFlagBits {\n    VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR = 0x00000001,\n    VK_INSTANCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkInstanceCreateFlagBits;\ntypedef VkFlags VkInstanceCreateFlags;\n\ntypedef enum VkMemoryHeapFlagBits {\n    VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001,\n    VK_MEMORY_HEAP_MULTI_INSTANCE_BIT = 0x00000002,\n    VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHR = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT,\n    VK_MEMORY_HEAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkMemoryHeapFlagBits;\ntypedef VkFlags VkMemoryHeapFlags;\n\ntypedef enum VkMemoryPropertyFlagBits {\n    VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001,\n    VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002,\n    VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004,\n    VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008,\n    VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010,\n    VK_MEMORY_PROPERTY_PROTECTED_BIT = 0x00000020,\n    VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD = 0x00000040,\n    VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD = 0x00000080,\n    VK_MEMORY_PROPERTY_RDMA_CAPABLE_BIT_NV = 0x00000100,\n    VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkMemoryPropertyFlagBits;\ntypedef VkFlags VkMemoryPropertyFlags;\n\ntypedef enum VkQueueFlagBits {\n    VK_QUEUE_GRAPHICS_BIT = 0x00000001,\n    VK_QUEUE_COMPUTE_BIT = 0x00000002,\n    VK_QUEUE_TRANSFER_BIT = 0x00000004,\n    VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008,\n    VK_QUEUE_PROTECTED_BIT = 0x00000010,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_QUEUE_VIDEO_DECODE_BIT_KHR = 0x00000020,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_QUEUE_VIDEO_ENCODE_BIT_KHR = 0x00000040,\n#endif\n    VK_QUEUE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkQueueFlagBits;\ntypedef VkFlags VkQueueFlags;\ntypedef VkFlags VkDeviceCreateFlags;\n\ntypedef enum VkDeviceQueueCreateFlagBits {\n    VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT = 0x00000001,\n    VK_DEVICE_QUEUE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkDeviceQueueCreateFlagBits;\ntypedef VkFlags VkDeviceQueueCreateFlags;\n\ntypedef enum VkPipelineStageFlagBits {\n    VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001,\n    VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002,\n    VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004,\n    VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008,\n    VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010,\n    VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020,\n    VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040,\n    VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080,\n    VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100,\n    VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200,\n    VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400,\n    VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800,\n    VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000,\n    VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000,\n    VK_PIPELINE_STAGE_HOST_BIT = 0x00004000,\n    VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000,\n    VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000,\n    VK_PIPELINE_STAGE_NONE = 0,\n    VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000,\n    VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000,\n    VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000,\n    VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR = 0x00200000,\n    VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x00080000,\n    VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = 0x00100000,\n    VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000,\n    VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00400000,\n    VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV = 0x00020000,\n    VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,\n    VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR,\n    VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,\n    VK_PIPELINE_STAGE_NONE_KHR = VK_PIPELINE_STAGE_NONE,\n    VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkPipelineStageFlagBits;\ntypedef VkFlags VkPipelineStageFlags;\ntypedef VkFlags VkMemoryMapFlags;\n\ntypedef enum VkSparseMemoryBindFlagBits {\n    VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001,\n    VK_SPARSE_MEMORY_BIND_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkSparseMemoryBindFlagBits;\ntypedef VkFlags VkSparseMemoryBindFlags;\n\ntypedef enum VkSparseImageFormatFlagBits {\n    VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001,\n    VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002,\n    VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004,\n    VK_SPARSE_IMAGE_FORMAT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkSparseImageFormatFlagBits;\ntypedef VkFlags VkSparseImageFormatFlags;\n\ntypedef enum VkFenceCreateFlagBits {\n    VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001,\n    VK_FENCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkFenceCreateFlagBits;\ntypedef VkFlags VkFenceCreateFlags;\ntypedef VkFlags VkSemaphoreCreateFlags;\n\ntypedef enum VkEventCreateFlagBits {\n    VK_EVENT_CREATE_DEVICE_ONLY_BIT = 0x00000001,\n    VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR = VK_EVENT_CREATE_DEVICE_ONLY_BIT,\n    VK_EVENT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkEventCreateFlagBits;\ntypedef VkFlags VkEventCreateFlags;\n\ntypedef enum VkQueryPipelineStatisticFlagBits {\n    VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001,\n    VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002,\n    VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004,\n    VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008,\n    VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010,\n    VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020,\n    VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040,\n    VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080,\n    VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100,\n    VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200,\n    VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400,\n    VK_QUERY_PIPELINE_STATISTIC_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkQueryPipelineStatisticFlagBits;\ntypedef VkFlags VkQueryPipelineStatisticFlags;\ntypedef VkFlags VkQueryPoolCreateFlags;\n\ntypedef enum VkQueryResultFlagBits {\n    VK_QUERY_RESULT_64_BIT = 0x00000001,\n    VK_QUERY_RESULT_WAIT_BIT = 0x00000002,\n    VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004,\n    VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_QUERY_RESULT_WITH_STATUS_BIT_KHR = 0x00000010,\n#endif\n    VK_QUERY_RESULT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkQueryResultFlagBits;\ntypedef VkFlags VkQueryResultFlags;\n\ntypedef enum VkBufferCreateFlagBits {\n    VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001,\n    VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002,\n    VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004,\n    VK_BUFFER_CREATE_PROTECTED_BIT = 0x00000008,\n    VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000010,\n    VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT,\n    VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT,\n    VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkBufferCreateFlagBits;\ntypedef VkFlags VkBufferCreateFlags;\n\ntypedef enum VkBufferUsageFlagBits {\n    VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001,\n    VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002,\n    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004,\n    VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008,\n    VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010,\n    VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020,\n    VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040,\n    VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080,\n    VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100,\n    VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT = 0x00020000,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_BUFFER_USAGE_VIDEO_DECODE_SRC_BIT_KHR = 0x00002000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_BUFFER_USAGE_VIDEO_DECODE_DST_BIT_KHR = 0x00004000,\n#endif\n    VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800,\n    VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x00001000,\n    VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200,\n    VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR = 0x00080000,\n    VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR = 0x00100000,\n    VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR = 0x00000400,\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_BUFFER_USAGE_VIDEO_ENCODE_DST_BIT_KHR = 0x00008000,\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n    VK_BUFFER_USAGE_VIDEO_ENCODE_SRC_BIT_KHR = 0x00010000,\n#endif\n    VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR,\n    VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT,\n    VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT,\n    VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkBufferUsageFlagBits;\ntypedef VkFlags VkBufferUsageFlags;\ntypedef VkFlags VkBufferViewCreateFlags;\n\ntypedef enum VkImageViewCreateFlagBits {\n    VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT = 0x00000001,\n    VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT = 0x00000002,\n    VK_IMAGE_VIEW_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkImageViewCreateFlagBits;\ntypedef VkFlags VkImageViewCreateFlags;\ntypedef VkFlags VkShaderModuleCreateFlags;\n\ntypedef enum VkPipelineCacheCreateFlagBits {\n    VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,\n    VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT = VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT,\n    VK_PIPELINE_CACHE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkPipelineCacheCreateFlagBits;\ntypedef VkFlags VkPipelineCacheCreateFlags;\n\ntypedef enum VkColorComponentFlagBits {\n    VK_COLOR_COMPONENT_R_BIT = 0x00000001,\n    VK_COLOR_COMPONENT_G_BIT = 0x00000002,\n    VK_COLOR_COMPONENT_B_BIT = 0x00000004,\n    VK_COLOR_COMPONENT_A_BIT = 0x00000008,\n    VK_COLOR_COMPONENT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkColorComponentFlagBits;\ntypedef VkFlags VkColorComponentFlags;\n\ntypedef enum VkPipelineCreateFlagBits {\n    VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001,\n    VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002,\n    VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004,\n    VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008,\n    VK_PIPELINE_CREATE_DISPATCH_BASE_BIT = 0x00000010,\n    VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT = 0x00000100,\n    VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT = 0x00000200,\n    VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00200000,\n    VK_PIPELINE_CREATE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT = 0x00400000,\n    VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR = 0x00004000,\n    VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR = 0x00008000,\n    VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR = 0x00010000,\n    VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR = 0x00020000,\n    VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR = 0x00001000,\n    VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR = 0x00002000,\n    VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR = 0x00080000,\n    VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020,\n    VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR = 0x00000040,\n    VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x00000080,\n    VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV = 0x00040000,\n    VK_PIPELINE_CREATE_LIBRARY_BIT_KHR = 0x00000800,\n    VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT = 0x00800000,\n    VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT = 0x00000400,\n    VK_PIPELINE_CREATE_RAY_TRACING_ALLOW_MOTION_BIT_NV = 0x00100000,\n    VK_PIPELINE_CREATE_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x02000000,\n    VK_PIPELINE_CREATE_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x04000000,\n    VK_PIPELINE_CREATE_DISPATCH_BASE = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT,\n    VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,\n    VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT = VK_PIPELINE_CREATE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT,\n    VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT,\n    VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE,\n    VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT = VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT,\n    VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT = VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT,\n    VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkPipelineCreateFlagBits;\ntypedef VkFlags VkPipelineCreateFlags;\n\ntypedef enum VkPipelineShaderStageCreateFlagBits {\n    VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT = 0x00000001,\n    VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT = 0x00000002,\n    VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT = VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT,\n    VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT = VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT,\n    VK_PIPELINE_SHADER_STAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkPipelineShaderStageCreateFlagBits;\ntypedef VkFlags VkPipelineShaderStageCreateFlags;\n\ntypedef enum VkShaderStageFlagBits {\n    VK_SHADER_STAGE_VERTEX_BIT = 0x00000001,\n    VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002,\n    VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004,\n    VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008,\n    VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010,\n    VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020,\n    VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F,\n    VK_SHADER_STAGE_ALL = 0x7FFFFFFF,\n    VK_SHADER_STAGE_RAYGEN_BIT_KHR = 0x00000100,\n    VK_SHADER_STAGE_ANY_HIT_BIT_KHR = 0x00000200,\n    VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR = 0x00000400,\n    VK_SHADER_STAGE_MISS_BIT_KHR = 0x00000800,\n    VK_SHADER_STAGE_INTERSECTION_BIT_KHR = 0x00001000,\n    VK_SHADER_STAGE_CALLABLE_BIT_KHR = 0x00002000,\n    VK_SHADER_STAGE_TASK_BIT_NV = 0x00000040,\n    VK_SHADER_STAGE_MESH_BIT_NV = 0x00000080,\n    VK_SHADER_STAGE_SUBPASS_SHADING_BIT_HUAWEI = 0x00004000,\n    VK_SHADER_STAGE_RAYGEN_BIT_NV = VK_SHADER_STAGE_RAYGEN_BIT_KHR,\n    VK_SHADER_STAGE_ANY_HIT_BIT_NV = VK_SHADER_STAGE_ANY_HIT_BIT_KHR,\n    VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR,\n    VK_SHADER_STAGE_MISS_BIT_NV = VK_SHADER_STAGE_MISS_BIT_KHR,\n    VK_SHADER_STAGE_INTERSECTION_BIT_NV = VK_SHADER_STAGE_INTERSECTION_BIT_KHR,\n    VK_SHADER_STAGE_CALLABLE_BIT_NV = VK_SHADER_STAGE_CALLABLE_BIT_KHR,\n    VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkShaderStageFlagBits;\n\ntypedef enum VkCullModeFlagBits {\n    VK_CULL_MODE_NONE = 0,\n    VK_CULL_MODE_FRONT_BIT = 0x00000001,\n    VK_CULL_MODE_BACK_BIT = 0x00000002,\n    VK_CULL_MODE_FRONT_AND_BACK = 0x00000003,\n    VK_CULL_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkCullModeFlagBits;\ntypedef VkFlags VkCullModeFlags;\ntypedef VkFlags VkPipelineVertexInputStateCreateFlags;\ntypedef VkFlags VkPipelineInputAssemblyStateCreateFlags;\ntypedef VkFlags VkPipelineTessellationStateCreateFlags;\ntypedef VkFlags VkPipelineViewportStateCreateFlags;\ntypedef VkFlags VkPipelineRasterizationStateCreateFlags;\ntypedef VkFlags VkPipelineMultisampleStateCreateFlags;\n\ntypedef enum VkPipelineDepthStencilStateCreateFlagBits {\n    VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM = 0x00000001,\n    VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM = 0x00000002,\n    VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkPipelineDepthStencilStateCreateFlagBits;\ntypedef VkFlags VkPipelineDepthStencilStateCreateFlags;\n\ntypedef enum VkPipelineColorBlendStateCreateFlagBits {\n    VK_PIPELINE_COLOR_BLEND_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_BIT_ARM = 0x00000001,\n    VK_PIPELINE_COLOR_BLEND_STATE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkPipelineColorBlendStateCreateFlagBits;\ntypedef VkFlags VkPipelineColorBlendStateCreateFlags;\ntypedef VkFlags VkPipelineDynamicStateCreateFlags;\n\ntypedef enum VkPipelineLayoutCreateFlagBits {\n    VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT = 0x00000002,\n    VK_PIPELINE_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkPipelineLayoutCreateFlagBits;\ntypedef VkFlags VkPipelineLayoutCreateFlags;\ntypedef VkFlags VkShaderStageFlags;\n\ntypedef enum VkSamplerCreateFlagBits {\n    VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT = 0x00000001,\n    VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT = 0x00000002,\n    VK_SAMPLER_CREATE_NON_SEAMLESS_CUBE_MAP_BIT_EXT = 0x00000004,\n    VK_SAMPLER_CREATE_IMAGE_PROCESSING_BIT_QCOM = 0x00000010,\n    VK_SAMPLER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkSamplerCreateFlagBits;\ntypedef VkFlags VkSamplerCreateFlags;\n\ntypedef enum VkDescriptorPoolCreateFlagBits {\n    VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001,\n    VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT = 0x00000002,\n    VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE = 0x00000004,\n    VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT,\n    VK_DESCRIPTOR_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkDescriptorPoolCreateFlagBits;\ntypedef VkFlags VkDescriptorPoolCreateFlags;\ntypedef VkFlags VkDescriptorPoolResetFlags;\n\ntypedef enum VkDescriptorSetLayoutCreateFlagBits {\n    VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT = 0x00000002,\n    VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x00000001,\n    VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE = 0x00000004,\n    VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT,\n    VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkDescriptorSetLayoutCreateFlagBits;\ntypedef VkFlags VkDescriptorSetLayoutCreateFlags;\n\ntypedef enum VkAttachmentDescriptionFlagBits {\n    VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001,\n    VK_ATTACHMENT_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkAttachmentDescriptionFlagBits;\ntypedef VkFlags VkAttachmentDescriptionFlags;\n\ntypedef enum VkDependencyFlagBits {\n    VK_DEPENDENCY_BY_REGION_BIT = 0x00000001,\n    VK_DEPENDENCY_DEVICE_GROUP_BIT = 0x00000004,\n    VK_DEPENDENCY_VIEW_LOCAL_BIT = 0x00000002,\n    VK_DEPENDENCY_FEEDBACK_LOOP_BIT_EXT = 0x00000008,\n    VK_DEPENDENCY_VIEW_LOCAL_BIT_KHR = VK_DEPENDENCY_VIEW_LOCAL_BIT,\n    VK_DEPENDENCY_DEVICE_GROUP_BIT_KHR = VK_DEPENDENCY_DEVICE_GROUP_BIT,\n    VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkDependencyFlagBits;\ntypedef VkFlags VkDependencyFlags;\n\ntypedef enum VkFramebufferCreateFlagBits {\n    VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT = 0x00000001,\n    VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT,\n    VK_FRAMEBUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkFramebufferCreateFlagBits;\ntypedef VkFlags VkFramebufferCreateFlags;\n\ntypedef enum VkRenderPassCreateFlagBits {\n    VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM = 0x00000002,\n    VK_RENDER_PASS_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkRenderPassCreateFlagBits;\ntypedef VkFlags VkRenderPassCreateFlags;\n\ntypedef enum VkSubpassDescriptionFlagBits {\n    VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX = 0x00000001,\n    VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x00000002,\n    VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM = 0x00000004,\n    VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM = 0x00000008,\n    VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_COLOR_ACCESS_BIT_ARM = 0x00000010,\n    VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM = 0x00000020,\n    VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM = 0x00000040,\n    VK_SUBPASS_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkSubpassDescriptionFlagBits;\ntypedef VkFlags VkSubpassDescriptionFlags;\n\ntypedef enum VkCommandPoolCreateFlagBits {\n    VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001,\n    VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002,\n    VK_COMMAND_POOL_CREATE_PROTECTED_BIT = 0x00000004,\n    VK_COMMAND_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkCommandPoolCreateFlagBits;\ntypedef VkFlags VkCommandPoolCreateFlags;\n\ntypedef enum VkCommandPoolResetFlagBits {\n    VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001,\n    VK_COMMAND_POOL_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkCommandPoolResetFlagBits;\ntypedef VkFlags VkCommandPoolResetFlags;\n\ntypedef enum VkCommandBufferUsageFlagBits {\n    VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001,\n    VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002,\n    VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004,\n    VK_COMMAND_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkCommandBufferUsageFlagBits;\ntypedef VkFlags VkCommandBufferUsageFlags;\n\ntypedef enum VkQueryControlFlagBits {\n    VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001,\n    VK_QUERY_CONTROL_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkQueryControlFlagBits;\ntypedef VkFlags VkQueryControlFlags;\n\ntypedef enum VkCommandBufferResetFlagBits {\n    VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001,\n    VK_COMMAND_BUFFER_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkCommandBufferResetFlagBits;\ntypedef VkFlags VkCommandBufferResetFlags;\n\ntypedef enum VkStencilFaceFlagBits {\n    VK_STENCIL_FACE_FRONT_BIT = 0x00000001,\n    VK_STENCIL_FACE_BACK_BIT = 0x00000002,\n    VK_STENCIL_FACE_FRONT_AND_BACK = 0x00000003,\n    VK_STENCIL_FRONT_AND_BACK = VK_STENCIL_FACE_FRONT_AND_BACK,\n    VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkStencilFaceFlagBits;\ntypedef VkFlags VkStencilFaceFlags;\ntypedef struct VkExtent2D {\n    uint32_t    width;\n    uint32_t    height;\n} VkExtent2D;\n\ntypedef struct VkExtent3D {\n    uint32_t    width;\n    uint32_t    height;\n    uint32_t    depth;\n} VkExtent3D;\n\ntypedef struct VkOffset2D {\n    int32_t    x;\n    int32_t    y;\n} VkOffset2D;\n\ntypedef struct VkOffset3D {\n    int32_t    x;\n    int32_t    y;\n    int32_t    z;\n} VkOffset3D;\n\ntypedef struct VkRect2D {\n    VkOffset2D    offset;\n    VkExtent2D    extent;\n} VkRect2D;\n\ntypedef struct VkBaseInStructure {\n    VkStructureType                    sType;\n    const struct VkBaseInStructure*    pNext;\n} VkBaseInStructure;\n\ntypedef struct VkBaseOutStructure {\n    VkStructureType               sType;\n    struct VkBaseOutStructure*    pNext;\n} VkBaseOutStructure;\n\ntypedef struct VkBufferMemoryBarrier {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkAccessFlags      srcAccessMask;\n    VkAccessFlags      dstAccessMask;\n    uint32_t           srcQueueFamilyIndex;\n    uint32_t           dstQueueFamilyIndex;\n    VkBuffer           buffer;\n    VkDeviceSize       offset;\n    VkDeviceSize       size;\n} VkBufferMemoryBarrier;\n\ntypedef struct VkDispatchIndirectCommand {\n    uint32_t    x;\n    uint32_t    y;\n    uint32_t    z;\n} VkDispatchIndirectCommand;\n\ntypedef struct VkDrawIndexedIndirectCommand {\n    uint32_t    indexCount;\n    uint32_t    instanceCount;\n    uint32_t    firstIndex;\n    int32_t     vertexOffset;\n    uint32_t    firstInstance;\n} VkDrawIndexedIndirectCommand;\n\ntypedef struct VkDrawIndirectCommand {\n    uint32_t    vertexCount;\n    uint32_t    instanceCount;\n    uint32_t    firstVertex;\n    uint32_t    firstInstance;\n} VkDrawIndirectCommand;\n\ntypedef struct VkImageSubresourceRange {\n    VkImageAspectFlags    aspectMask;\n    uint32_t              baseMipLevel;\n    uint32_t              levelCount;\n    uint32_t              baseArrayLayer;\n    uint32_t              layerCount;\n} VkImageSubresourceRange;\n\ntypedef struct VkImageMemoryBarrier {\n    VkStructureType            sType;\n    const void*                pNext;\n    VkAccessFlags              srcAccessMask;\n    VkAccessFlags              dstAccessMask;\n    VkImageLayout              oldLayout;\n    VkImageLayout              newLayout;\n    uint32_t                   srcQueueFamilyIndex;\n    uint32_t                   dstQueueFamilyIndex;\n    VkImage                    image;\n    VkImageSubresourceRange    subresourceRange;\n} VkImageMemoryBarrier;\n\ntypedef struct VkMemoryBarrier {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkAccessFlags      srcAccessMask;\n    VkAccessFlags      dstAccessMask;\n} VkMemoryBarrier;\n\ntypedef struct VkPipelineCacheHeaderVersionOne {\n    uint32_t                        headerSize;\n    VkPipelineCacheHeaderVersion    headerVersion;\n    uint32_t                        vendorID;\n    uint32_t                        deviceID;\n    uint8_t                         pipelineCacheUUID[VK_UUID_SIZE];\n} VkPipelineCacheHeaderVersionOne;\n\ntypedef void* (VKAPI_PTR *PFN_vkAllocationFunction)(\n    void*                                       pUserData,\n    size_t                                      size,\n    size_t                                      alignment,\n    VkSystemAllocationScope                     allocationScope);\n\ntypedef void (VKAPI_PTR *PFN_vkFreeFunction)(\n    void*                                       pUserData,\n    void*                                       pMemory);\n\ntypedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)(\n    void*                                       pUserData,\n    size_t                                      size,\n    VkInternalAllocationType                    allocationType,\n    VkSystemAllocationScope                     allocationScope);\n\ntypedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)(\n    void*                                       pUserData,\n    size_t                                      size,\n    VkInternalAllocationType                    allocationType,\n    VkSystemAllocationScope                     allocationScope);\n\ntypedef void* (VKAPI_PTR *PFN_vkReallocationFunction)(\n    void*                                       pUserData,\n    void*                                       pOriginal,\n    size_t                                      size,\n    size_t                                      alignment,\n    VkSystemAllocationScope                     allocationScope);\n\ntypedef void (VKAPI_PTR *PFN_vkVoidFunction)(void);\ntypedef struct VkAllocationCallbacks {\n    void*                                   pUserData;\n    PFN_vkAllocationFunction                pfnAllocation;\n    PFN_vkReallocationFunction              pfnReallocation;\n    PFN_vkFreeFunction                      pfnFree;\n    PFN_vkInternalAllocationNotification    pfnInternalAllocation;\n    PFN_vkInternalFreeNotification          pfnInternalFree;\n} VkAllocationCallbacks;\n\ntypedef struct VkApplicationInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    const char*        pApplicationName;\n    uint32_t           applicationVersion;\n    const char*        pEngineName;\n    uint32_t           engineVersion;\n    uint32_t           apiVersion;\n} VkApplicationInfo;\n\ntypedef struct VkFormatProperties {\n    VkFormatFeatureFlags    linearTilingFeatures;\n    VkFormatFeatureFlags    optimalTilingFeatures;\n    VkFormatFeatureFlags    bufferFeatures;\n} VkFormatProperties;\n\ntypedef struct VkImageFormatProperties {\n    VkExtent3D            maxExtent;\n    uint32_t              maxMipLevels;\n    uint32_t              maxArrayLayers;\n    VkSampleCountFlags    sampleCounts;\n    VkDeviceSize          maxResourceSize;\n} VkImageFormatProperties;\n\ntypedef struct VkInstanceCreateInfo {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkInstanceCreateFlags       flags;\n    const VkApplicationInfo*    pApplicationInfo;\n    uint32_t                    enabledLayerCount;\n    const char* const*          ppEnabledLayerNames;\n    uint32_t                    enabledExtensionCount;\n    const char* const*          ppEnabledExtensionNames;\n} VkInstanceCreateInfo;\n\ntypedef struct VkMemoryHeap {\n    VkDeviceSize         size;\n    VkMemoryHeapFlags    flags;\n} VkMemoryHeap;\n\ntypedef struct VkMemoryType {\n    VkMemoryPropertyFlags    propertyFlags;\n    uint32_t                 heapIndex;\n} VkMemoryType;\n\ntypedef struct VkPhysicalDeviceFeatures {\n    VkBool32    robustBufferAccess;\n    VkBool32    fullDrawIndexUint32;\n    VkBool32    imageCubeArray;\n    VkBool32    independentBlend;\n    VkBool32    geometryShader;\n    VkBool32    tessellationShader;\n    VkBool32    sampleRateShading;\n    VkBool32    dualSrcBlend;\n    VkBool32    logicOp;\n    VkBool32    multiDrawIndirect;\n    VkBool32    drawIndirectFirstInstance;\n    VkBool32    depthClamp;\n    VkBool32    depthBiasClamp;\n    VkBool32    fillModeNonSolid;\n    VkBool32    depthBounds;\n    VkBool32    wideLines;\n    VkBool32    largePoints;\n    VkBool32    alphaToOne;\n    VkBool32    multiViewport;\n    VkBool32    samplerAnisotropy;\n    VkBool32    textureCompressionETC2;\n    VkBool32    textureCompressionASTC_LDR;\n    VkBool32    textureCompressionBC;\n    VkBool32    occlusionQueryPrecise;\n    VkBool32    pipelineStatisticsQuery;\n    VkBool32    vertexPipelineStoresAndAtomics;\n    VkBool32    fragmentStoresAndAtomics;\n    VkBool32    shaderTessellationAndGeometryPointSize;\n    VkBool32    shaderImageGatherExtended;\n    VkBool32    shaderStorageImageExtendedFormats;\n    VkBool32    shaderStorageImageMultisample;\n    VkBool32    shaderStorageImageReadWithoutFormat;\n    VkBool32    shaderStorageImageWriteWithoutFormat;\n    VkBool32    shaderUniformBufferArrayDynamicIndexing;\n    VkBool32    shaderSampledImageArrayDynamicIndexing;\n    VkBool32    shaderStorageBufferArrayDynamicIndexing;\n    VkBool32    shaderStorageImageArrayDynamicIndexing;\n    VkBool32    shaderClipDistance;\n    VkBool32    shaderCullDistance;\n    VkBool32    shaderFloat64;\n    VkBool32    shaderInt64;\n    VkBool32    shaderInt16;\n    VkBool32    shaderResourceResidency;\n    VkBool32    shaderResourceMinLod;\n    VkBool32    sparseBinding;\n    VkBool32    sparseResidencyBuffer;\n    VkBool32    sparseResidencyImage2D;\n    VkBool32    sparseResidencyImage3D;\n    VkBool32    sparseResidency2Samples;\n    VkBool32    sparseResidency4Samples;\n    VkBool32    sparseResidency8Samples;\n    VkBool32    sparseResidency16Samples;\n    VkBool32    sparseResidencyAliased;\n    VkBool32    variableMultisampleRate;\n    VkBool32    inheritedQueries;\n} VkPhysicalDeviceFeatures;\n\ntypedef struct VkPhysicalDeviceLimits {\n    uint32_t              maxImageDimension1D;\n    uint32_t              maxImageDimension2D;\n    uint32_t              maxImageDimension3D;\n    uint32_t              maxImageDimensionCube;\n    uint32_t              maxImageArrayLayers;\n    uint32_t              maxTexelBufferElements;\n    uint32_t              maxUniformBufferRange;\n    uint32_t              maxStorageBufferRange;\n    uint32_t              maxPushConstantsSize;\n    uint32_t              maxMemoryAllocationCount;\n    uint32_t              maxSamplerAllocationCount;\n    VkDeviceSize          bufferImageGranularity;\n    VkDeviceSize          sparseAddressSpaceSize;\n    uint32_t              maxBoundDescriptorSets;\n    uint32_t              maxPerStageDescriptorSamplers;\n    uint32_t              maxPerStageDescriptorUniformBuffers;\n    uint32_t              maxPerStageDescriptorStorageBuffers;\n    uint32_t              maxPerStageDescriptorSampledImages;\n    uint32_t              maxPerStageDescriptorStorageImages;\n    uint32_t              maxPerStageDescriptorInputAttachments;\n    uint32_t              maxPerStageResources;\n    uint32_t              maxDescriptorSetSamplers;\n    uint32_t              maxDescriptorSetUniformBuffers;\n    uint32_t              maxDescriptorSetUniformBuffersDynamic;\n    uint32_t              maxDescriptorSetStorageBuffers;\n    uint32_t              maxDescriptorSetStorageBuffersDynamic;\n    uint32_t              maxDescriptorSetSampledImages;\n    uint32_t              maxDescriptorSetStorageImages;\n    uint32_t              maxDescriptorSetInputAttachments;\n    uint32_t              maxVertexInputAttributes;\n    uint32_t              maxVertexInputBindings;\n    uint32_t              maxVertexInputAttributeOffset;\n    uint32_t              maxVertexInputBindingStride;\n    uint32_t              maxVertexOutputComponents;\n    uint32_t              maxTessellationGenerationLevel;\n    uint32_t              maxTessellationPatchSize;\n    uint32_t              maxTessellationControlPerVertexInputComponents;\n    uint32_t              maxTessellationControlPerVertexOutputComponents;\n    uint32_t              maxTessellationControlPerPatchOutputComponents;\n    uint32_t              maxTessellationControlTotalOutputComponents;\n    uint32_t              maxTessellationEvaluationInputComponents;\n    uint32_t              maxTessellationEvaluationOutputComponents;\n    uint32_t              maxGeometryShaderInvocations;\n    uint32_t              maxGeometryInputComponents;\n    uint32_t              maxGeometryOutputComponents;\n    uint32_t              maxGeometryOutputVertices;\n    uint32_t              maxGeometryTotalOutputComponents;\n    uint32_t              maxFragmentInputComponents;\n    uint32_t              maxFragmentOutputAttachments;\n    uint32_t              maxFragmentDualSrcAttachments;\n    uint32_t              maxFragmentCombinedOutputResources;\n    uint32_t              maxComputeSharedMemorySize;\n    uint32_t              maxComputeWorkGroupCount[3];\n    uint32_t              maxComputeWorkGroupInvocations;\n    uint32_t              maxComputeWorkGroupSize[3];\n    uint32_t              subPixelPrecisionBits;\n    uint32_t              subTexelPrecisionBits;\n    uint32_t              mipmapPrecisionBits;\n    uint32_t              maxDrawIndexedIndexValue;\n    uint32_t              maxDrawIndirectCount;\n    float                 maxSamplerLodBias;\n    float                 maxSamplerAnisotropy;\n    uint32_t              maxViewports;\n    uint32_t              maxViewportDimensions[2];\n    float                 viewportBoundsRange[2];\n    uint32_t              viewportSubPixelBits;\n    size_t                minMemoryMapAlignment;\n    VkDeviceSize          minTexelBufferOffsetAlignment;\n    VkDeviceSize          minUniformBufferOffsetAlignment;\n    VkDeviceSize          minStorageBufferOffsetAlignment;\n    int32_t               minTexelOffset;\n    uint32_t              maxTexelOffset;\n    int32_t               minTexelGatherOffset;\n    uint32_t              maxTexelGatherOffset;\n    float                 minInterpolationOffset;\n    float                 maxInterpolationOffset;\n    uint32_t              subPixelInterpolationOffsetBits;\n    uint32_t              maxFramebufferWidth;\n    uint32_t              maxFramebufferHeight;\n    uint32_t              maxFramebufferLayers;\n    VkSampleCountFlags    framebufferColorSampleCounts;\n    VkSampleCountFlags    framebufferDepthSampleCounts;\n    VkSampleCountFlags    framebufferStencilSampleCounts;\n    VkSampleCountFlags    framebufferNoAttachmentsSampleCounts;\n    uint32_t              maxColorAttachments;\n    VkSampleCountFlags    sampledImageColorSampleCounts;\n    VkSampleCountFlags    sampledImageIntegerSampleCounts;\n    VkSampleCountFlags    sampledImageDepthSampleCounts;\n    VkSampleCountFlags    sampledImageStencilSampleCounts;\n    VkSampleCountFlags    storageImageSampleCounts;\n    uint32_t              maxSampleMaskWords;\n    VkBool32              timestampComputeAndGraphics;\n    float                 timestampPeriod;\n    uint32_t              maxClipDistances;\n    uint32_t              maxCullDistances;\n    uint32_t              maxCombinedClipAndCullDistances;\n    uint32_t              discreteQueuePriorities;\n    float                 pointSizeRange[2];\n    float                 lineWidthRange[2];\n    float                 pointSizeGranularity;\n    float                 lineWidthGranularity;\n    VkBool32              strictLines;\n    VkBool32              standardSampleLocations;\n    VkDeviceSize          optimalBufferCopyOffsetAlignment;\n    VkDeviceSize          optimalBufferCopyRowPitchAlignment;\n    VkDeviceSize          nonCoherentAtomSize;\n} VkPhysicalDeviceLimits;\n\ntypedef struct VkPhysicalDeviceMemoryProperties {\n    uint32_t        memoryTypeCount;\n    VkMemoryType    memoryTypes[VK_MAX_MEMORY_TYPES];\n    uint32_t        memoryHeapCount;\n    VkMemoryHeap    memoryHeaps[VK_MAX_MEMORY_HEAPS];\n} VkPhysicalDeviceMemoryProperties;\n\ntypedef struct VkPhysicalDeviceSparseProperties {\n    VkBool32    residencyStandard2DBlockShape;\n    VkBool32    residencyStandard2DMultisampleBlockShape;\n    VkBool32    residencyStandard3DBlockShape;\n    VkBool32    residencyAlignedMipSize;\n    VkBool32    residencyNonResidentStrict;\n} VkPhysicalDeviceSparseProperties;\n\ntypedef struct VkPhysicalDeviceProperties {\n    uint32_t                            apiVersion;\n    uint32_t                            driverVersion;\n    uint32_t                            vendorID;\n    uint32_t                            deviceID;\n    VkPhysicalDeviceType                deviceType;\n    char                                deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE];\n    uint8_t                             pipelineCacheUUID[VK_UUID_SIZE];\n    VkPhysicalDeviceLimits              limits;\n    VkPhysicalDeviceSparseProperties    sparseProperties;\n} VkPhysicalDeviceProperties;\n\ntypedef struct VkQueueFamilyProperties {\n    VkQueueFlags    queueFlags;\n    uint32_t        queueCount;\n    uint32_t        timestampValidBits;\n    VkExtent3D      minImageTransferGranularity;\n} VkQueueFamilyProperties;\n\ntypedef struct VkDeviceQueueCreateInfo {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkDeviceQueueCreateFlags    flags;\n    uint32_t                    queueFamilyIndex;\n    uint32_t                    queueCount;\n    const float*                pQueuePriorities;\n} VkDeviceQueueCreateInfo;\n\ntypedef struct VkDeviceCreateInfo {\n    VkStructureType                    sType;\n    const void*                        pNext;\n    VkDeviceCreateFlags                flags;\n    uint32_t                           queueCreateInfoCount;\n    const VkDeviceQueueCreateInfo*     pQueueCreateInfos;\n    uint32_t                           enabledLayerCount;\n    const char* const*                 ppEnabledLayerNames;\n    uint32_t                           enabledExtensionCount;\n    const char* const*                 ppEnabledExtensionNames;\n    const VkPhysicalDeviceFeatures*    pEnabledFeatures;\n} VkDeviceCreateInfo;\n\ntypedef struct VkExtensionProperties {\n    char        extensionName[VK_MAX_EXTENSION_NAME_SIZE];\n    uint32_t    specVersion;\n} VkExtensionProperties;\n\ntypedef struct VkLayerProperties {\n    char        layerName[VK_MAX_EXTENSION_NAME_SIZE];\n    uint32_t    specVersion;\n    uint32_t    implementationVersion;\n    char        description[VK_MAX_DESCRIPTION_SIZE];\n} VkLayerProperties;\n\ntypedef struct VkSubmitInfo {\n    VkStructureType                sType;\n    const void*                    pNext;\n    uint32_t                       waitSemaphoreCount;\n    const VkSemaphore*             pWaitSemaphores;\n    const VkPipelineStageFlags*    pWaitDstStageMask;\n    uint32_t                       commandBufferCount;\n    const VkCommandBuffer*         pCommandBuffers;\n    uint32_t                       signalSemaphoreCount;\n    const VkSemaphore*             pSignalSemaphores;\n} VkSubmitInfo;\n\ntypedef struct VkMappedMemoryRange {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkDeviceMemory     memory;\n    VkDeviceSize       offset;\n    VkDeviceSize       size;\n} VkMappedMemoryRange;\n\ntypedef struct VkMemoryAllocateInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkDeviceSize       allocationSize;\n    uint32_t           memoryTypeIndex;\n} VkMemoryAllocateInfo;\n\ntypedef struct VkMemoryRequirements {\n    VkDeviceSize    size;\n    VkDeviceSize    alignment;\n    uint32_t        memoryTypeBits;\n} VkMemoryRequirements;\n\ntypedef struct VkSparseMemoryBind {\n    VkDeviceSize               resourceOffset;\n    VkDeviceSize               size;\n    VkDeviceMemory             memory;\n    VkDeviceSize               memoryOffset;\n    VkSparseMemoryBindFlags    flags;\n} VkSparseMemoryBind;\n\ntypedef struct VkSparseBufferMemoryBindInfo {\n    VkBuffer                     buffer;\n    uint32_t                     bindCount;\n    const VkSparseMemoryBind*    pBinds;\n} VkSparseBufferMemoryBindInfo;\n\ntypedef struct VkSparseImageOpaqueMemoryBindInfo {\n    VkImage                      image;\n    uint32_t                     bindCount;\n    const VkSparseMemoryBind*    pBinds;\n} VkSparseImageOpaqueMemoryBindInfo;\n\ntypedef struct VkImageSubresource {\n    VkImageAspectFlags    aspectMask;\n    uint32_t              mipLevel;\n    uint32_t              arrayLayer;\n} VkImageSubresource;\n\ntypedef struct VkSparseImageMemoryBind {\n    VkImageSubresource         subresource;\n    VkOffset3D                 offset;\n    VkExtent3D                 extent;\n    VkDeviceMemory             memory;\n    VkDeviceSize               memoryOffset;\n    VkSparseMemoryBindFlags    flags;\n} VkSparseImageMemoryBind;\n\ntypedef struct VkSparseImageMemoryBindInfo {\n    VkImage                           image;\n    uint32_t                          bindCount;\n    const VkSparseImageMemoryBind*    pBinds;\n} VkSparseImageMemoryBindInfo;\n\ntypedef struct VkBindSparseInfo {\n    VkStructureType                             sType;\n    const void*                                 pNext;\n    uint32_t                                    waitSemaphoreCount;\n    const VkSemaphore*                          pWaitSemaphores;\n    uint32_t                                    bufferBindCount;\n    const VkSparseBufferMemoryBindInfo*         pBufferBinds;\n    uint32_t                                    imageOpaqueBindCount;\n    const VkSparseImageOpaqueMemoryBindInfo*    pImageOpaqueBinds;\n    uint32_t                                    imageBindCount;\n    const VkSparseImageMemoryBindInfo*          pImageBinds;\n    uint32_t                                    signalSemaphoreCount;\n    const VkSemaphore*                          pSignalSemaphores;\n} VkBindSparseInfo;\n\ntypedef struct VkSparseImageFormatProperties {\n    VkImageAspectFlags          aspectMask;\n    VkExtent3D                  imageGranularity;\n    VkSparseImageFormatFlags    flags;\n} VkSparseImageFormatProperties;\n\ntypedef struct VkSparseImageMemoryRequirements {\n    VkSparseImageFormatProperties    formatProperties;\n    uint32_t                         imageMipTailFirstLod;\n    VkDeviceSize                     imageMipTailSize;\n    VkDeviceSize                     imageMipTailOffset;\n    VkDeviceSize                     imageMipTailStride;\n} VkSparseImageMemoryRequirements;\n\ntypedef struct VkFenceCreateInfo {\n    VkStructureType       sType;\n    const void*           pNext;\n    VkFenceCreateFlags    flags;\n} VkFenceCreateInfo;\n\ntypedef struct VkSemaphoreCreateInfo {\n    VkStructureType           sType;\n    const void*               pNext;\n    VkSemaphoreCreateFlags    flags;\n} VkSemaphoreCreateInfo;\n\ntypedef struct VkEventCreateInfo {\n    VkStructureType       sType;\n    const void*           pNext;\n    VkEventCreateFlags    flags;\n} VkEventCreateInfo;\n\ntypedef struct VkQueryPoolCreateInfo {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkQueryPoolCreateFlags           flags;\n    VkQueryType                      queryType;\n    uint32_t                         queryCount;\n    VkQueryPipelineStatisticFlags    pipelineStatistics;\n} VkQueryPoolCreateInfo;\n\ntypedef struct VkBufferCreateInfo {\n    VkStructureType        sType;\n    const void*            pNext;\n    VkBufferCreateFlags    flags;\n    VkDeviceSize           size;\n    VkBufferUsageFlags     usage;\n    VkSharingMode          sharingMode;\n    uint32_t               queueFamilyIndexCount;\n    const uint32_t*        pQueueFamilyIndices;\n} VkBufferCreateInfo;\n\ntypedef struct VkBufferViewCreateInfo {\n    VkStructureType            sType;\n    const void*                pNext;\n    VkBufferViewCreateFlags    flags;\n    VkBuffer                   buffer;\n    VkFormat                   format;\n    VkDeviceSize               offset;\n    VkDeviceSize               range;\n} VkBufferViewCreateInfo;\n\ntypedef struct VkImageCreateInfo {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkImageCreateFlags       flags;\n    VkImageType              imageType;\n    VkFormat                 format;\n    VkExtent3D               extent;\n    uint32_t                 mipLevels;\n    uint32_t                 arrayLayers;\n    VkSampleCountFlagBits    samples;\n    VkImageTiling            tiling;\n    VkImageUsageFlags        usage;\n    VkSharingMode            sharingMode;\n    uint32_t                 queueFamilyIndexCount;\n    const uint32_t*          pQueueFamilyIndices;\n    VkImageLayout            initialLayout;\n} VkImageCreateInfo;\n\ntypedef struct VkSubresourceLayout {\n    VkDeviceSize    offset;\n    VkDeviceSize    size;\n    VkDeviceSize    rowPitch;\n    VkDeviceSize    arrayPitch;\n    VkDeviceSize    depthPitch;\n} VkSubresourceLayout;\n\ntypedef struct VkComponentMapping {\n    VkComponentSwizzle    r;\n    VkComponentSwizzle    g;\n    VkComponentSwizzle    b;\n    VkComponentSwizzle    a;\n} VkComponentMapping;\n\ntypedef struct VkImageViewCreateInfo {\n    VkStructureType            sType;\n    const void*                pNext;\n    VkImageViewCreateFlags     flags;\n    VkImage                    image;\n    VkImageViewType            viewType;\n    VkFormat                   format;\n    VkComponentMapping         components;\n    VkImageSubresourceRange    subresourceRange;\n} VkImageViewCreateInfo;\n\ntypedef struct VkShaderModuleCreateInfo {\n    VkStructureType              sType;\n    const void*                  pNext;\n    VkShaderModuleCreateFlags    flags;\n    size_t                       codeSize;\n    const uint32_t*              pCode;\n} VkShaderModuleCreateInfo;\n\ntypedef struct VkPipelineCacheCreateInfo {\n    VkStructureType               sType;\n    const void*                   pNext;\n    VkPipelineCacheCreateFlags    flags;\n    size_t                        initialDataSize;\n    const void*                   pInitialData;\n} VkPipelineCacheCreateInfo;\n\ntypedef struct VkSpecializationMapEntry {\n    uint32_t    constantID;\n    uint32_t    offset;\n    size_t      size;\n} VkSpecializationMapEntry;\n\ntypedef struct VkSpecializationInfo {\n    uint32_t                           mapEntryCount;\n    const VkSpecializationMapEntry*    pMapEntries;\n    size_t                             dataSize;\n    const void*                        pData;\n} VkSpecializationInfo;\n\ntypedef struct VkPipelineShaderStageCreateInfo {\n    VkStructureType                     sType;\n    const void*                         pNext;\n    VkPipelineShaderStageCreateFlags    flags;\n    VkShaderStageFlagBits               stage;\n    VkShaderModule                      module;\n    const char*                         pName;\n    const VkSpecializationInfo*         pSpecializationInfo;\n} VkPipelineShaderStageCreateInfo;\n\ntypedef struct VkComputePipelineCreateInfo {\n    VkStructureType                    sType;\n    const void*                        pNext;\n    VkPipelineCreateFlags              flags;\n    VkPipelineShaderStageCreateInfo    stage;\n    VkPipelineLayout                   layout;\n    VkPipeline                         basePipelineHandle;\n    int32_t                            basePipelineIndex;\n} VkComputePipelineCreateInfo;\n\ntypedef struct VkVertexInputBindingDescription {\n    uint32_t             binding;\n    uint32_t             stride;\n    VkVertexInputRate    inputRate;\n} VkVertexInputBindingDescription;\n\ntypedef struct VkVertexInputAttributeDescription {\n    uint32_t    location;\n    uint32_t    binding;\n    VkFormat    format;\n    uint32_t    offset;\n} VkVertexInputAttributeDescription;\n\ntypedef struct VkPipelineVertexInputStateCreateInfo {\n    VkStructureType                             sType;\n    const void*                                 pNext;\n    VkPipelineVertexInputStateCreateFlags       flags;\n    uint32_t                                    vertexBindingDescriptionCount;\n    const VkVertexInputBindingDescription*      pVertexBindingDescriptions;\n    uint32_t                                    vertexAttributeDescriptionCount;\n    const VkVertexInputAttributeDescription*    pVertexAttributeDescriptions;\n} VkPipelineVertexInputStateCreateInfo;\n\ntypedef struct VkPipelineInputAssemblyStateCreateInfo {\n    VkStructureType                            sType;\n    const void*                                pNext;\n    VkPipelineInputAssemblyStateCreateFlags    flags;\n    VkPrimitiveTopology                        topology;\n    VkBool32                                   primitiveRestartEnable;\n} VkPipelineInputAssemblyStateCreateInfo;\n\ntypedef struct VkPipelineTessellationStateCreateInfo {\n    VkStructureType                           sType;\n    const void*                               pNext;\n    VkPipelineTessellationStateCreateFlags    flags;\n    uint32_t                                  patchControlPoints;\n} VkPipelineTessellationStateCreateInfo;\n\ntypedef struct VkViewport {\n    float    x;\n    float    y;\n    float    width;\n    float    height;\n    float    minDepth;\n    float    maxDepth;\n} VkViewport;\n\ntypedef struct VkPipelineViewportStateCreateInfo {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkPipelineViewportStateCreateFlags    flags;\n    uint32_t                              viewportCount;\n    const VkViewport*                     pViewports;\n    uint32_t                              scissorCount;\n    const VkRect2D*                       pScissors;\n} VkPipelineViewportStateCreateInfo;\n\ntypedef struct VkPipelineRasterizationStateCreateInfo {\n    VkStructureType                            sType;\n    const void*                                pNext;\n    VkPipelineRasterizationStateCreateFlags    flags;\n    VkBool32                                   depthClampEnable;\n    VkBool32                                   rasterizerDiscardEnable;\n    VkPolygonMode                              polygonMode;\n    VkCullModeFlags                            cullMode;\n    VkFrontFace                                frontFace;\n    VkBool32                                   depthBiasEnable;\n    float                                      depthBiasConstantFactor;\n    float                                      depthBiasClamp;\n    float                                      depthBiasSlopeFactor;\n    float                                      lineWidth;\n} VkPipelineRasterizationStateCreateInfo;\n\ntypedef struct VkPipelineMultisampleStateCreateInfo {\n    VkStructureType                          sType;\n    const void*                              pNext;\n    VkPipelineMultisampleStateCreateFlags    flags;\n    VkSampleCountFlagBits                    rasterizationSamples;\n    VkBool32                                 sampleShadingEnable;\n    float                                    minSampleShading;\n    const VkSampleMask*                      pSampleMask;\n    VkBool32                                 alphaToCoverageEnable;\n    VkBool32                                 alphaToOneEnable;\n} VkPipelineMultisampleStateCreateInfo;\n\ntypedef struct VkStencilOpState {\n    VkStencilOp    failOp;\n    VkStencilOp    passOp;\n    VkStencilOp    depthFailOp;\n    VkCompareOp    compareOp;\n    uint32_t       compareMask;\n    uint32_t       writeMask;\n    uint32_t       reference;\n} VkStencilOpState;\n\ntypedef struct VkPipelineDepthStencilStateCreateInfo {\n    VkStructureType                           sType;\n    const void*                               pNext;\n    VkPipelineDepthStencilStateCreateFlags    flags;\n    VkBool32                                  depthTestEnable;\n    VkBool32                                  depthWriteEnable;\n    VkCompareOp                               depthCompareOp;\n    VkBool32                                  depthBoundsTestEnable;\n    VkBool32                                  stencilTestEnable;\n    VkStencilOpState                          front;\n    VkStencilOpState                          back;\n    float                                     minDepthBounds;\n    float                                     maxDepthBounds;\n} VkPipelineDepthStencilStateCreateInfo;\n\ntypedef struct VkPipelineColorBlendAttachmentState {\n    VkBool32                 blendEnable;\n    VkBlendFactor            srcColorBlendFactor;\n    VkBlendFactor            dstColorBlendFactor;\n    VkBlendOp                colorBlendOp;\n    VkBlendFactor            srcAlphaBlendFactor;\n    VkBlendFactor            dstAlphaBlendFactor;\n    VkBlendOp                alphaBlendOp;\n    VkColorComponentFlags    colorWriteMask;\n} VkPipelineColorBlendAttachmentState;\n\ntypedef struct VkPipelineColorBlendStateCreateInfo {\n    VkStructureType                               sType;\n    const void*                                   pNext;\n    VkPipelineColorBlendStateCreateFlags          flags;\n    VkBool32                                      logicOpEnable;\n    VkLogicOp                                     logicOp;\n    uint32_t                                      attachmentCount;\n    const VkPipelineColorBlendAttachmentState*    pAttachments;\n    float                                         blendConstants[4];\n} VkPipelineColorBlendStateCreateInfo;\n\ntypedef struct VkPipelineDynamicStateCreateInfo {\n    VkStructureType                      sType;\n    const void*                          pNext;\n    VkPipelineDynamicStateCreateFlags    flags;\n    uint32_t                             dynamicStateCount;\n    const VkDynamicState*                pDynamicStates;\n} VkPipelineDynamicStateCreateInfo;\n\ntypedef struct VkGraphicsPipelineCreateInfo {\n    VkStructureType                                  sType;\n    const void*                                      pNext;\n    VkPipelineCreateFlags                            flags;\n    uint32_t                                         stageCount;\n    const VkPipelineShaderStageCreateInfo*           pStages;\n    const VkPipelineVertexInputStateCreateInfo*      pVertexInputState;\n    const VkPipelineInputAssemblyStateCreateInfo*    pInputAssemblyState;\n    const VkPipelineTessellationStateCreateInfo*     pTessellationState;\n    const VkPipelineViewportStateCreateInfo*         pViewportState;\n    const VkPipelineRasterizationStateCreateInfo*    pRasterizationState;\n    const VkPipelineMultisampleStateCreateInfo*      pMultisampleState;\n    const VkPipelineDepthStencilStateCreateInfo*     pDepthStencilState;\n    const VkPipelineColorBlendStateCreateInfo*       pColorBlendState;\n    const VkPipelineDynamicStateCreateInfo*          pDynamicState;\n    VkPipelineLayout                                 layout;\n    VkRenderPass                                     renderPass;\n    uint32_t                                         subpass;\n    VkPipeline                                       basePipelineHandle;\n    int32_t                                          basePipelineIndex;\n} VkGraphicsPipelineCreateInfo;\n\ntypedef struct VkPushConstantRange {\n    VkShaderStageFlags    stageFlags;\n    uint32_t              offset;\n    uint32_t              size;\n} VkPushConstantRange;\n\ntypedef struct VkPipelineLayoutCreateInfo {\n    VkStructureType                 sType;\n    const void*                     pNext;\n    VkPipelineLayoutCreateFlags     flags;\n    uint32_t                        setLayoutCount;\n    const VkDescriptorSetLayout*    pSetLayouts;\n    uint32_t                        pushConstantRangeCount;\n    const VkPushConstantRange*      pPushConstantRanges;\n} VkPipelineLayoutCreateInfo;\n\ntypedef struct VkSamplerCreateInfo {\n    VkStructureType         sType;\n    const void*             pNext;\n    VkSamplerCreateFlags    flags;\n    VkFilter                magFilter;\n    VkFilter                minFilter;\n    VkSamplerMipmapMode     mipmapMode;\n    VkSamplerAddressMode    addressModeU;\n    VkSamplerAddressMode    addressModeV;\n    VkSamplerAddressMode    addressModeW;\n    float                   mipLodBias;\n    VkBool32                anisotropyEnable;\n    float                   maxAnisotropy;\n    VkBool32                compareEnable;\n    VkCompareOp             compareOp;\n    float                   minLod;\n    float                   maxLod;\n    VkBorderColor           borderColor;\n    VkBool32                unnormalizedCoordinates;\n} VkSamplerCreateInfo;\n\ntypedef struct VkCopyDescriptorSet {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkDescriptorSet    srcSet;\n    uint32_t           srcBinding;\n    uint32_t           srcArrayElement;\n    VkDescriptorSet    dstSet;\n    uint32_t           dstBinding;\n    uint32_t           dstArrayElement;\n    uint32_t           descriptorCount;\n} VkCopyDescriptorSet;\n\ntypedef struct VkDescriptorBufferInfo {\n    VkBuffer        buffer;\n    VkDeviceSize    offset;\n    VkDeviceSize    range;\n} VkDescriptorBufferInfo;\n\ntypedef struct VkDescriptorImageInfo {\n    VkSampler        sampler;\n    VkImageView      imageView;\n    VkImageLayout    imageLayout;\n} VkDescriptorImageInfo;\n\ntypedef struct VkDescriptorPoolSize {\n    VkDescriptorType    type;\n    uint32_t            descriptorCount;\n} VkDescriptorPoolSize;\n\ntypedef struct VkDescriptorPoolCreateInfo {\n    VkStructureType                sType;\n    const void*                    pNext;\n    VkDescriptorPoolCreateFlags    flags;\n    uint32_t                       maxSets;\n    uint32_t                       poolSizeCount;\n    const VkDescriptorPoolSize*    pPoolSizes;\n} VkDescriptorPoolCreateInfo;\n\ntypedef struct VkDescriptorSetAllocateInfo {\n    VkStructureType                 sType;\n    const void*                     pNext;\n    VkDescriptorPool                descriptorPool;\n    uint32_t                        descriptorSetCount;\n    const VkDescriptorSetLayout*    pSetLayouts;\n} VkDescriptorSetAllocateInfo;\n\ntypedef struct VkDescriptorSetLayoutBinding {\n    uint32_t              binding;\n    VkDescriptorType      descriptorType;\n    uint32_t              descriptorCount;\n    VkShaderStageFlags    stageFlags;\n    const VkSampler*      pImmutableSamplers;\n} VkDescriptorSetLayoutBinding;\n\ntypedef struct VkDescriptorSetLayoutCreateInfo {\n    VkStructureType                        sType;\n    const void*                            pNext;\n    VkDescriptorSetLayoutCreateFlags       flags;\n    uint32_t                               bindingCount;\n    const VkDescriptorSetLayoutBinding*    pBindings;\n} VkDescriptorSetLayoutCreateInfo;\n\ntypedef struct VkWriteDescriptorSet {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkDescriptorSet                  dstSet;\n    uint32_t                         dstBinding;\n    uint32_t                         dstArrayElement;\n    uint32_t                         descriptorCount;\n    VkDescriptorType                 descriptorType;\n    const VkDescriptorImageInfo*     pImageInfo;\n    const VkDescriptorBufferInfo*    pBufferInfo;\n    const VkBufferView*              pTexelBufferView;\n} VkWriteDescriptorSet;\n\ntypedef struct VkAttachmentDescription {\n    VkAttachmentDescriptionFlags    flags;\n    VkFormat                        format;\n    VkSampleCountFlagBits           samples;\n    VkAttachmentLoadOp              loadOp;\n    VkAttachmentStoreOp             storeOp;\n    VkAttachmentLoadOp              stencilLoadOp;\n    VkAttachmentStoreOp             stencilStoreOp;\n    VkImageLayout                   initialLayout;\n    VkImageLayout                   finalLayout;\n} VkAttachmentDescription;\n\ntypedef struct VkAttachmentReference {\n    uint32_t         attachment;\n    VkImageLayout    layout;\n} VkAttachmentReference;\n\ntypedef struct VkFramebufferCreateInfo {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkFramebufferCreateFlags    flags;\n    VkRenderPass                renderPass;\n    uint32_t                    attachmentCount;\n    const VkImageView*          pAttachments;\n    uint32_t                    width;\n    uint32_t                    height;\n    uint32_t                    layers;\n} VkFramebufferCreateInfo;\n\ntypedef struct VkSubpassDescription {\n    VkSubpassDescriptionFlags       flags;\n    VkPipelineBindPoint             pipelineBindPoint;\n    uint32_t                        inputAttachmentCount;\n    const VkAttachmentReference*    pInputAttachments;\n    uint32_t                        colorAttachmentCount;\n    const VkAttachmentReference*    pColorAttachments;\n    const VkAttachmentReference*    pResolveAttachments;\n    const VkAttachmentReference*    pDepthStencilAttachment;\n    uint32_t                        preserveAttachmentCount;\n    const uint32_t*                 pPreserveAttachments;\n} VkSubpassDescription;\n\ntypedef struct VkSubpassDependency {\n    uint32_t                srcSubpass;\n    uint32_t                dstSubpass;\n    VkPipelineStageFlags    srcStageMask;\n    VkPipelineStageFlags    dstStageMask;\n    VkAccessFlags           srcAccessMask;\n    VkAccessFlags           dstAccessMask;\n    VkDependencyFlags       dependencyFlags;\n} VkSubpassDependency;\n\ntypedef struct VkRenderPassCreateInfo {\n    VkStructureType                   sType;\n    const void*                       pNext;\n    VkRenderPassCreateFlags           flags;\n    uint32_t                          attachmentCount;\n    const VkAttachmentDescription*    pAttachments;\n    uint32_t                          subpassCount;\n    const VkSubpassDescription*       pSubpasses;\n    uint32_t                          dependencyCount;\n    const VkSubpassDependency*        pDependencies;\n} VkRenderPassCreateInfo;\n\ntypedef struct VkCommandPoolCreateInfo {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkCommandPoolCreateFlags    flags;\n    uint32_t                    queueFamilyIndex;\n} VkCommandPoolCreateInfo;\n\ntypedef struct VkCommandBufferAllocateInfo {\n    VkStructureType         sType;\n    const void*             pNext;\n    VkCommandPool           commandPool;\n    VkCommandBufferLevel    level;\n    uint32_t                commandBufferCount;\n} VkCommandBufferAllocateInfo;\n\ntypedef struct VkCommandBufferInheritanceInfo {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkRenderPass                     renderPass;\n    uint32_t                         subpass;\n    VkFramebuffer                    framebuffer;\n    VkBool32                         occlusionQueryEnable;\n    VkQueryControlFlags              queryFlags;\n    VkQueryPipelineStatisticFlags    pipelineStatistics;\n} VkCommandBufferInheritanceInfo;\n\ntypedef struct VkCommandBufferBeginInfo {\n    VkStructureType                          sType;\n    const void*                              pNext;\n    VkCommandBufferUsageFlags                flags;\n    const VkCommandBufferInheritanceInfo*    pInheritanceInfo;\n} VkCommandBufferBeginInfo;\n\ntypedef struct VkBufferCopy {\n    VkDeviceSize    srcOffset;\n    VkDeviceSize    dstOffset;\n    VkDeviceSize    size;\n} VkBufferCopy;\n\ntypedef struct VkImageSubresourceLayers {\n    VkImageAspectFlags    aspectMask;\n    uint32_t              mipLevel;\n    uint32_t              baseArrayLayer;\n    uint32_t              layerCount;\n} VkImageSubresourceLayers;\n\ntypedef struct VkBufferImageCopy {\n    VkDeviceSize                bufferOffset;\n    uint32_t                    bufferRowLength;\n    uint32_t                    bufferImageHeight;\n    VkImageSubresourceLayers    imageSubresource;\n    VkOffset3D                  imageOffset;\n    VkExtent3D                  imageExtent;\n} VkBufferImageCopy;\n\ntypedef union VkClearColorValue {\n    float       float32[4];\n    int32_t     int32[4];\n    uint32_t    uint32[4];\n} VkClearColorValue;\n\ntypedef struct VkClearDepthStencilValue {\n    float       depth;\n    uint32_t    stencil;\n} VkClearDepthStencilValue;\n\ntypedef union VkClearValue {\n    VkClearColorValue           color;\n    VkClearDepthStencilValue    depthStencil;\n} VkClearValue;\n\ntypedef struct VkClearAttachment {\n    VkImageAspectFlags    aspectMask;\n    uint32_t              colorAttachment;\n    VkClearValue          clearValue;\n} VkClearAttachment;\n\ntypedef struct VkClearRect {\n    VkRect2D    rect;\n    uint32_t    baseArrayLayer;\n    uint32_t    layerCount;\n} VkClearRect;\n\ntypedef struct VkImageBlit {\n    VkImageSubresourceLayers    srcSubresource;\n    VkOffset3D                  srcOffsets[2];\n    VkImageSubresourceLayers    dstSubresource;\n    VkOffset3D                  dstOffsets[2];\n} VkImageBlit;\n\ntypedef struct VkImageCopy {\n    VkImageSubresourceLayers    srcSubresource;\n    VkOffset3D                  srcOffset;\n    VkImageSubresourceLayers    dstSubresource;\n    VkOffset3D                  dstOffset;\n    VkExtent3D                  extent;\n} VkImageCopy;\n\ntypedef struct VkImageResolve {\n    VkImageSubresourceLayers    srcSubresource;\n    VkOffset3D                  srcOffset;\n    VkImageSubresourceLayers    dstSubresource;\n    VkOffset3D                  dstOffset;\n    VkExtent3D                  extent;\n} VkImageResolve;\n\ntypedef struct VkRenderPassBeginInfo {\n    VkStructureType        sType;\n    const void*            pNext;\n    VkRenderPass           renderPass;\n    VkFramebuffer          framebuffer;\n    VkRect2D               renderArea;\n    uint32_t               clearValueCount;\n    const VkClearValue*    pClearValues;\n} VkRenderPassBeginInfo;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance);\ntypedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties);\ntypedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName);\ntypedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice);\ntypedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue);\ntypedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence);\ntypedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue);\ntypedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device);\ntypedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory);\ntypedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData);\ntypedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory);\ntypedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges);\ntypedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges);\ntypedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes);\ntypedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset);\ntypedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset);\ntypedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements);\ntypedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements);\ntypedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence);\ntypedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence);\ntypedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore);\ntypedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent);\ntypedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event);\ntypedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event);\ntypedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool);\ntypedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer);\ntypedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView);\ntypedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage);\ntypedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator);\ntypedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView);\ntypedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule);\ntypedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache);\ntypedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData);\ntypedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);\ntypedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout);\ntypedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler);\ntypedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout);\ntypedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool);\ntypedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags);\ntypedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets);\ntypedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets);\ntypedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer);\ntypedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);\ntypedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator);\ntypedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool);\ntypedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags);\ntypedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers);\ntypedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers);\ntypedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer);\ntypedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags);\ntypedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference);\ntypedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets);\ntypedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType);\ntypedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);\ntypedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance);\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);\ntypedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);\ntypedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions);\ntypedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions);\ntypedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData);\ntypedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data);\ntypedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);\ntypedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);\ntypedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects);\ntypedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);\ntypedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);\ntypedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);\ntypedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);\ntypedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags);\ntypedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query);\ntypedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount);\ntypedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags);\ntypedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues);\ntypedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents);\ntypedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents);\ntypedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer);\ntypedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(\n    const VkInstanceCreateInfo*                 pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkInstance*                                 pInstance);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyInstance(\n    VkInstance                                  instance,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(\n    VkInstance                                  instance,\n    uint32_t*                                   pPhysicalDeviceCount,\n    VkPhysicalDevice*                           pPhysicalDevices);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures(\n    VkPhysicalDevice                            physicalDevice,\n    VkPhysicalDeviceFeatures*                   pFeatures);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties(\n    VkPhysicalDevice                            physicalDevice,\n    VkFormat                                    format,\n    VkFormatProperties*                         pFormatProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties(\n    VkPhysicalDevice                            physicalDevice,\n    VkFormat                                    format,\n    VkImageType                                 type,\n    VkImageTiling                               tiling,\n    VkImageUsageFlags                           usage,\n    VkImageCreateFlags                          flags,\n    VkImageFormatProperties*                    pImageFormatProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties(\n    VkPhysicalDevice                            physicalDevice,\n    VkPhysicalDeviceProperties*                 pProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pQueueFamilyPropertyCount,\n    VkQueueFamilyProperties*                    pQueueFamilyProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties(\n    VkPhysicalDevice                            physicalDevice,\n    VkPhysicalDeviceMemoryProperties*           pMemoryProperties);\n\nVKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(\n    VkInstance                                  instance,\n    const char*                                 pName);\n\nVKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(\n    VkDevice                                    device,\n    const char*                                 pName);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(\n    VkPhysicalDevice                            physicalDevice,\n    const VkDeviceCreateInfo*                   pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkDevice*                                   pDevice);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyDevice(\n    VkDevice                                    device,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(\n    const char*                                 pLayerName,\n    uint32_t*                                   pPropertyCount,\n    VkExtensionProperties*                      pProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(\n    VkPhysicalDevice                            physicalDevice,\n    const char*                                 pLayerName,\n    uint32_t*                                   pPropertyCount,\n    VkExtensionProperties*                      pProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(\n    uint32_t*                                   pPropertyCount,\n    VkLayerProperties*                          pProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pPropertyCount,\n    VkLayerProperties*                          pProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(\n    VkDevice                                    device,\n    uint32_t                                    queueFamilyIndex,\n    uint32_t                                    queueIndex,\n    VkQueue*                                    pQueue);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(\n    VkQueue                                     queue,\n    uint32_t                                    submitCount,\n    const VkSubmitInfo*                         pSubmits,\n    VkFence                                     fence);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(\n    VkQueue                                     queue);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(\n    VkDevice                                    device);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(\n    VkDevice                                    device,\n    const VkMemoryAllocateInfo*                 pAllocateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkDeviceMemory*                             pMemory);\n\nVKAPI_ATTR void VKAPI_CALL vkFreeMemory(\n    VkDevice                                    device,\n    VkDeviceMemory                              memory,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(\n    VkDevice                                    device,\n    VkDeviceMemory                              memory,\n    VkDeviceSize                                offset,\n    VkDeviceSize                                size,\n    VkMemoryMapFlags                            flags,\n    void**                                      ppData);\n\nVKAPI_ATTR void VKAPI_CALL vkUnmapMemory(\n    VkDevice                                    device,\n    VkDeviceMemory                              memory);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges(\n    VkDevice                                    device,\n    uint32_t                                    memoryRangeCount,\n    const VkMappedMemoryRange*                  pMemoryRanges);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges(\n    VkDevice                                    device,\n    uint32_t                                    memoryRangeCount,\n    const VkMappedMemoryRange*                  pMemoryRanges);\n\nVKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment(\n    VkDevice                                    device,\n    VkDeviceMemory                              memory,\n    VkDeviceSize*                               pCommittedMemoryInBytes);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(\n    VkDevice                                    device,\n    VkBuffer                                    buffer,\n    VkDeviceMemory                              memory,\n    VkDeviceSize                                memoryOffset);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(\n    VkDevice                                    device,\n    VkImage                                     image,\n    VkDeviceMemory                              memory,\n    VkDeviceSize                                memoryOffset);\n\nVKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements(\n    VkDevice                                    device,\n    VkBuffer                                    buffer,\n    VkMemoryRequirements*                       pMemoryRequirements);\n\nVKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements(\n    VkDevice                                    device,\n    VkImage                                     image,\n    VkMemoryRequirements*                       pMemoryRequirements);\n\nVKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements(\n    VkDevice                                    device,\n    VkImage                                     image,\n    uint32_t*                                   pSparseMemoryRequirementCount,\n    VkSparseImageMemoryRequirements*            pSparseMemoryRequirements);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties(\n    VkPhysicalDevice                            physicalDevice,\n    VkFormat                                    format,\n    VkImageType                                 type,\n    VkSampleCountFlagBits                       samples,\n    VkImageUsageFlags                           usage,\n    VkImageTiling                               tiling,\n    uint32_t*                                   pPropertyCount,\n    VkSparseImageFormatProperties*              pProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(\n    VkQueue                                     queue,\n    uint32_t                                    bindInfoCount,\n    const VkBindSparseInfo*                     pBindInfo,\n    VkFence                                     fence);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateFence(\n    VkDevice                                    device,\n    const VkFenceCreateInfo*                    pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkFence*                                    pFence);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyFence(\n    VkDevice                                    device,\n    VkFence                                     fence,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkResetFences(\n    VkDevice                                    device,\n    uint32_t                                    fenceCount,\n    const VkFence*                              pFences);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(\n    VkDevice                                    device,\n    VkFence                                     fence);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(\n    VkDevice                                    device,\n    uint32_t                                    fenceCount,\n    const VkFence*                              pFences,\n    VkBool32                                    waitAll,\n    uint64_t                                    timeout);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(\n    VkDevice                                    device,\n    const VkSemaphoreCreateInfo*                pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkSemaphore*                                pSemaphore);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroySemaphore(\n    VkDevice                                    device,\n    VkSemaphore                                 semaphore,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent(\n    VkDevice                                    device,\n    const VkEventCreateInfo*                    pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkEvent*                                    pEvent);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyEvent(\n    VkDevice                                    device,\n    VkEvent                                     event,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus(\n    VkDevice                                    device,\n    VkEvent                                     event);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(\n    VkDevice                                    device,\n    VkEvent                                     event);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkResetEvent(\n    VkDevice                                    device,\n    VkEvent                                     event);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(\n    VkDevice                                    device,\n    const VkQueryPoolCreateInfo*                pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkQueryPool*                                pQueryPool);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool(\n    VkDevice                                    device,\n    VkQueryPool                                 queryPool,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(\n    VkDevice                                    device,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    firstQuery,\n    uint32_t                                    queryCount,\n    size_t                                      dataSize,\n    void*                                       pData,\n    VkDeviceSize                                stride,\n    VkQueryResultFlags                          flags);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(\n    VkDevice                                    device,\n    const VkBufferCreateInfo*                   pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkBuffer*                                   pBuffer);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(\n    VkDevice                                    device,\n    VkBuffer                                    buffer,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(\n    VkDevice                                    device,\n    const VkBufferViewCreateInfo*               pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkBufferView*                               pView);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyBufferView(\n    VkDevice                                    device,\n    VkBufferView                                bufferView,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(\n    VkDevice                                    device,\n    const VkImageCreateInfo*                    pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkImage*                                    pImage);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyImage(\n    VkDevice                                    device,\n    VkImage                                     image,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout(\n    VkDevice                                    device,\n    VkImage                                     image,\n    const VkImageSubresource*                   pSubresource,\n    VkSubresourceLayout*                        pLayout);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(\n    VkDevice                                    device,\n    const VkImageViewCreateInfo*                pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkImageView*                                pView);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyImageView(\n    VkDevice                                    device,\n    VkImageView                                 imageView,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(\n    VkDevice                                    device,\n    const VkShaderModuleCreateInfo*             pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkShaderModule*                             pShaderModule);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule(\n    VkDevice                                    device,\n    VkShaderModule                              shaderModule,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(\n    VkDevice                                    device,\n    const VkPipelineCacheCreateInfo*            pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkPipelineCache*                            pPipelineCache);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache(\n    VkDevice                                    device,\n    VkPipelineCache                             pipelineCache,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData(\n    VkDevice                                    device,\n    VkPipelineCache                             pipelineCache,\n    size_t*                                     pDataSize,\n    void*                                       pData);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches(\n    VkDevice                                    device,\n    VkPipelineCache                             dstCache,\n    uint32_t                                    srcCacheCount,\n    const VkPipelineCache*                      pSrcCaches);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(\n    VkDevice                                    device,\n    VkPipelineCache                             pipelineCache,\n    uint32_t                                    createInfoCount,\n    const VkGraphicsPipelineCreateInfo*         pCreateInfos,\n    const VkAllocationCallbacks*                pAllocator,\n    VkPipeline*                                 pPipelines);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(\n    VkDevice                                    device,\n    VkPipelineCache                             pipelineCache,\n    uint32_t                                    createInfoCount,\n    const VkComputePipelineCreateInfo*          pCreateInfos,\n    const VkAllocationCallbacks*                pAllocator,\n    VkPipeline*                                 pPipelines);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyPipeline(\n    VkDevice                                    device,\n    VkPipeline                                  pipeline,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(\n    VkDevice                                    device,\n    const VkPipelineLayoutCreateInfo*           pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkPipelineLayout*                           pPipelineLayout);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout(\n    VkDevice                                    device,\n    VkPipelineLayout                            pipelineLayout,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(\n    VkDevice                                    device,\n    const VkSamplerCreateInfo*                  pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkSampler*                                  pSampler);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroySampler(\n    VkDevice                                    device,\n    VkSampler                                   sampler,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(\n    VkDevice                                    device,\n    const VkDescriptorSetLayoutCreateInfo*      pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkDescriptorSetLayout*                      pSetLayout);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout(\n    VkDevice                                    device,\n    VkDescriptorSetLayout                       descriptorSetLayout,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool(\n    VkDevice                                    device,\n    const VkDescriptorPoolCreateInfo*           pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkDescriptorPool*                           pDescriptorPool);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool(\n    VkDevice                                    device,\n    VkDescriptorPool                            descriptorPool,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool(\n    VkDevice                                    device,\n    VkDescriptorPool                            descriptorPool,\n    VkDescriptorPoolResetFlags                  flags);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(\n    VkDevice                                    device,\n    const VkDescriptorSetAllocateInfo*          pAllocateInfo,\n    VkDescriptorSet*                            pDescriptorSets);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(\n    VkDevice                                    device,\n    VkDescriptorPool                            descriptorPool,\n    uint32_t                                    descriptorSetCount,\n    const VkDescriptorSet*                      pDescriptorSets);\n\nVKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets(\n    VkDevice                                    device,\n    uint32_t                                    descriptorWriteCount,\n    const VkWriteDescriptorSet*                 pDescriptorWrites,\n    uint32_t                                    descriptorCopyCount,\n    const VkCopyDescriptorSet*                  pDescriptorCopies);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(\n    VkDevice                                    device,\n    const VkFramebufferCreateInfo*              pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkFramebuffer*                              pFramebuffer);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(\n    VkDevice                                    device,\n    VkFramebuffer                               framebuffer,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(\n    VkDevice                                    device,\n    const VkRenderPassCreateInfo*               pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkRenderPass*                               pRenderPass);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass(\n    VkDevice                                    device,\n    VkRenderPass                                renderPass,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity(\n    VkDevice                                    device,\n    VkRenderPass                                renderPass,\n    VkExtent2D*                                 pGranularity);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(\n    VkDevice                                    device,\n    const VkCommandPoolCreateInfo*              pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkCommandPool*                              pCommandPool);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(\n    VkDevice                                    device,\n    VkCommandPool                               commandPool,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(\n    VkDevice                                    device,\n    VkCommandPool                               commandPool,\n    VkCommandPoolResetFlags                     flags);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(\n    VkDevice                                    device,\n    const VkCommandBufferAllocateInfo*          pAllocateInfo,\n    VkCommandBuffer*                            pCommandBuffers);\n\nVKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(\n    VkDevice                                    device,\n    VkCommandPool                               commandPool,\n    uint32_t                                    commandBufferCount,\n    const VkCommandBuffer*                      pCommandBuffers);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(\n    VkCommandBuffer                             commandBuffer,\n    const VkCommandBufferBeginInfo*             pBeginInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(\n    VkCommandBuffer                             commandBuffer);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(\n    VkCommandBuffer                             commandBuffer,\n    VkCommandBufferResetFlags                   flags);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(\n    VkCommandBuffer                             commandBuffer,\n    VkPipelineBindPoint                         pipelineBindPoint,\n    VkPipeline                                  pipeline);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    firstViewport,\n    uint32_t                                    viewportCount,\n    const VkViewport*                           pViewports);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    firstScissor,\n    uint32_t                                    scissorCount,\n    const VkRect2D*                             pScissors);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(\n    VkCommandBuffer                             commandBuffer,\n    float                                       lineWidth);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias(\n    VkCommandBuffer                             commandBuffer,\n    float                                       depthBiasConstantFactor,\n    float                                       depthBiasClamp,\n    float                                       depthBiasSlopeFactor);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(\n    VkCommandBuffer                             commandBuffer,\n    const float                                 blendConstants[4]);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds(\n    VkCommandBuffer                             commandBuffer,\n    float                                       minDepthBounds,\n    float                                       maxDepthBounds);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask(\n    VkCommandBuffer                             commandBuffer,\n    VkStencilFaceFlags                          faceMask,\n    uint32_t                                    compareMask);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask(\n    VkCommandBuffer                             commandBuffer,\n    VkStencilFaceFlags                          faceMask,\n    uint32_t                                    writeMask);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference(\n    VkCommandBuffer                             commandBuffer,\n    VkStencilFaceFlags                          faceMask,\n    uint32_t                                    reference);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(\n    VkCommandBuffer                             commandBuffer,\n    VkPipelineBindPoint                         pipelineBindPoint,\n    VkPipelineLayout                            layout,\n    uint32_t                                    firstSet,\n    uint32_t                                    descriptorSetCount,\n    const VkDescriptorSet*                      pDescriptorSets,\n    uint32_t                                    dynamicOffsetCount,\n    const uint32_t*                             pDynamicOffsets);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    buffer,\n    VkDeviceSize                                offset,\n    VkIndexType                                 indexType);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    firstBinding,\n    uint32_t                                    bindingCount,\n    const VkBuffer*                             pBuffers,\n    const VkDeviceSize*                         pOffsets);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDraw(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    vertexCount,\n    uint32_t                                    instanceCount,\n    uint32_t                                    firstVertex,\n    uint32_t                                    firstInstance);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    indexCount,\n    uint32_t                                    instanceCount,\n    uint32_t                                    firstIndex,\n    int32_t                                     vertexOffset,\n    uint32_t                                    firstInstance);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    buffer,\n    VkDeviceSize                                offset,\n    uint32_t                                    drawCount,\n    uint32_t                                    stride);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    buffer,\n    VkDeviceSize                                offset,\n    uint32_t                                    drawCount,\n    uint32_t                                    stride);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDispatch(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    groupCountX,\n    uint32_t                                    groupCountY,\n    uint32_t                                    groupCountZ);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    buffer,\n    VkDeviceSize                                offset);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    srcBuffer,\n    VkBuffer                                    dstBuffer,\n    uint32_t                                    regionCount,\n    const VkBufferCopy*                         pRegions);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(\n    VkCommandBuffer                             commandBuffer,\n    VkImage                                     srcImage,\n    VkImageLayout                               srcImageLayout,\n    VkImage                                     dstImage,\n    VkImageLayout                               dstImageLayout,\n    uint32_t                                    regionCount,\n    const VkImageCopy*                          pRegions);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(\n    VkCommandBuffer                             commandBuffer,\n    VkImage                                     srcImage,\n    VkImageLayout                               srcImageLayout,\n    VkImage                                     dstImage,\n    VkImageLayout                               dstImageLayout,\n    uint32_t                                    regionCount,\n    const VkImageBlit*                          pRegions,\n    VkFilter                                    filter);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    srcBuffer,\n    VkImage                                     dstImage,\n    VkImageLayout                               dstImageLayout,\n    uint32_t                                    regionCount,\n    const VkBufferImageCopy*                    pRegions);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(\n    VkCommandBuffer                             commandBuffer,\n    VkImage                                     srcImage,\n    VkImageLayout                               srcImageLayout,\n    VkBuffer                                    dstBuffer,\n    uint32_t                                    regionCount,\n    const VkBufferImageCopy*                    pRegions);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    dstBuffer,\n    VkDeviceSize                                dstOffset,\n    VkDeviceSize                                dataSize,\n    const void*                                 pData);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    dstBuffer,\n    VkDeviceSize                                dstOffset,\n    VkDeviceSize                                size,\n    uint32_t                                    data);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(\n    VkCommandBuffer                             commandBuffer,\n    VkImage                                     image,\n    VkImageLayout                               imageLayout,\n    const VkClearColorValue*                    pColor,\n    uint32_t                                    rangeCount,\n    const VkImageSubresourceRange*              pRanges);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(\n    VkCommandBuffer                             commandBuffer,\n    VkImage                                     image,\n    VkImageLayout                               imageLayout,\n    const VkClearDepthStencilValue*             pDepthStencil,\n    uint32_t                                    rangeCount,\n    const VkImageSubresourceRange*              pRanges);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    attachmentCount,\n    const VkClearAttachment*                    pAttachments,\n    uint32_t                                    rectCount,\n    const VkClearRect*                          pRects);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(\n    VkCommandBuffer                             commandBuffer,\n    VkImage                                     srcImage,\n    VkImageLayout                               srcImageLayout,\n    VkImage                                     dstImage,\n    VkImageLayout                               dstImageLayout,\n    uint32_t                                    regionCount,\n    const VkImageResolve*                       pRegions);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetEvent(\n    VkCommandBuffer                             commandBuffer,\n    VkEvent                                     event,\n    VkPipelineStageFlags                        stageMask);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(\n    VkCommandBuffer                             commandBuffer,\n    VkEvent                                     event,\n    VkPipelineStageFlags                        stageMask);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    eventCount,\n    const VkEvent*                              pEvents,\n    VkPipelineStageFlags                        srcStageMask,\n    VkPipelineStageFlags                        dstStageMask,\n    uint32_t                                    memoryBarrierCount,\n    const VkMemoryBarrier*                      pMemoryBarriers,\n    uint32_t                                    bufferMemoryBarrierCount,\n    const VkBufferMemoryBarrier*                pBufferMemoryBarriers,\n    uint32_t                                    imageMemoryBarrierCount,\n    const VkImageMemoryBarrier*                 pImageMemoryBarriers);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier(\n    VkCommandBuffer                             commandBuffer,\n    VkPipelineStageFlags                        srcStageMask,\n    VkPipelineStageFlags                        dstStageMask,\n    VkDependencyFlags                           dependencyFlags,\n    uint32_t                                    memoryBarrierCount,\n    const VkMemoryBarrier*                      pMemoryBarriers,\n    uint32_t                                    bufferMemoryBarrierCount,\n    const VkBufferMemoryBarrier*                pBufferMemoryBarriers,\n    uint32_t                                    imageMemoryBarrierCount,\n    const VkImageMemoryBarrier*                 pImageMemoryBarriers);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(\n    VkCommandBuffer                             commandBuffer,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    query,\n    VkQueryControlFlags                         flags);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(\n    VkCommandBuffer                             commandBuffer,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    query);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(\n    VkCommandBuffer                             commandBuffer,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    firstQuery,\n    uint32_t                                    queryCount);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp(\n    VkCommandBuffer                             commandBuffer,\n    VkPipelineStageFlagBits                     pipelineStage,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    query);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(\n    VkCommandBuffer                             commandBuffer,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    firstQuery,\n    uint32_t                                    queryCount,\n    VkBuffer                                    dstBuffer,\n    VkDeviceSize                                dstOffset,\n    VkDeviceSize                                stride,\n    VkQueryResultFlags                          flags);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(\n    VkCommandBuffer                             commandBuffer,\n    VkPipelineLayout                            layout,\n    VkShaderStageFlags                          stageFlags,\n    uint32_t                                    offset,\n    uint32_t                                    size,\n    const void*                                 pValues);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(\n    VkCommandBuffer                             commandBuffer,\n    const VkRenderPassBeginInfo*                pRenderPassBegin,\n    VkSubpassContents                           contents);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(\n    VkCommandBuffer                             commandBuffer,\n    VkSubpassContents                           contents);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(\n    VkCommandBuffer                             commandBuffer);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    commandBufferCount,\n    const VkCommandBuffer*                      pCommandBuffers);\n#endif\n\n\n#define VK_VERSION_1_1 1\n// Vulkan 1.1 version number\n#define VK_API_VERSION_1_1 VK_MAKE_API_VERSION(0, 1, 1, 0)// Patch version should always be set to 0\n\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSamplerYcbcrConversion)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplate)\n#define VK_MAX_DEVICE_GROUP_SIZE          32U\n#define VK_LUID_SIZE                      8U\n#define VK_QUEUE_FAMILY_EXTERNAL          (~1U)\n\ntypedef enum VkPointClippingBehavior {\n    VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES = 0,\n    VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY = 1,\n    VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES,\n    VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY_KHR = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY,\n    VK_POINT_CLIPPING_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF\n} VkPointClippingBehavior;\n\ntypedef enum VkTessellationDomainOrigin {\n    VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT = 0,\n    VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT = 1,\n    VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT,\n    VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT,\n    VK_TESSELLATION_DOMAIN_ORIGIN_MAX_ENUM = 0x7FFFFFFF\n} VkTessellationDomainOrigin;\n\ntypedef enum VkSamplerYcbcrModelConversion {\n    VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0,\n    VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY = 1,\n    VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709 = 2,\n    VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601 = 3,\n    VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 = 4,\n    VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY,\n    VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY,\n    VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709,\n    VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601,\n    VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020,\n    VK_SAMPLER_YCBCR_MODEL_CONVERSION_MAX_ENUM = 0x7FFFFFFF\n} VkSamplerYcbcrModelConversion;\n\ntypedef enum VkSamplerYcbcrRange {\n    VK_SAMPLER_YCBCR_RANGE_ITU_FULL = 0,\n    VK_SAMPLER_YCBCR_RANGE_ITU_NARROW = 1,\n    VK_SAMPLER_YCBCR_RANGE_ITU_FULL_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_FULL,\n    VK_SAMPLER_YCBCR_RANGE_ITU_NARROW_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW,\n    VK_SAMPLER_YCBCR_RANGE_MAX_ENUM = 0x7FFFFFFF\n} VkSamplerYcbcrRange;\n\ntypedef enum VkChromaLocation {\n    VK_CHROMA_LOCATION_COSITED_EVEN = 0,\n    VK_CHROMA_LOCATION_MIDPOINT = 1,\n    VK_CHROMA_LOCATION_COSITED_EVEN_KHR = VK_CHROMA_LOCATION_COSITED_EVEN,\n    VK_CHROMA_LOCATION_MIDPOINT_KHR = VK_CHROMA_LOCATION_MIDPOINT,\n    VK_CHROMA_LOCATION_MAX_ENUM = 0x7FFFFFFF\n} VkChromaLocation;\n\ntypedef enum VkDescriptorUpdateTemplateType {\n    VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET = 0,\n    VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = 1,\n    VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET,\n    VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_MAX_ENUM = 0x7FFFFFFF\n} VkDescriptorUpdateTemplateType;\n\ntypedef enum VkSubgroupFeatureFlagBits {\n    VK_SUBGROUP_FEATURE_BASIC_BIT = 0x00000001,\n    VK_SUBGROUP_FEATURE_VOTE_BIT = 0x00000002,\n    VK_SUBGROUP_FEATURE_ARITHMETIC_BIT = 0x00000004,\n    VK_SUBGROUP_FEATURE_BALLOT_BIT = 0x00000008,\n    VK_SUBGROUP_FEATURE_SHUFFLE_BIT = 0x00000010,\n    VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 0x00000020,\n    VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 0x00000040,\n    VK_SUBGROUP_FEATURE_QUAD_BIT = 0x00000080,\n    VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV = 0x00000100,\n    VK_SUBGROUP_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkSubgroupFeatureFlagBits;\ntypedef VkFlags VkSubgroupFeatureFlags;\n\ntypedef enum VkPeerMemoryFeatureFlagBits {\n    VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT = 0x00000001,\n    VK_PEER_MEMORY_FEATURE_COPY_DST_BIT = 0x00000002,\n    VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT = 0x00000004,\n    VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT = 0x00000008,\n    VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT,\n    VK_PEER_MEMORY_FEATURE_COPY_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT,\n    VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT,\n    VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT,\n    VK_PEER_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkPeerMemoryFeatureFlagBits;\ntypedef VkFlags VkPeerMemoryFeatureFlags;\n\ntypedef enum VkMemoryAllocateFlagBits {\n    VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT = 0x00000001,\n    VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT = 0x00000002,\n    VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000004,\n    VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT,\n    VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT,\n    VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT,\n    VK_MEMORY_ALLOCATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkMemoryAllocateFlagBits;\ntypedef VkFlags VkMemoryAllocateFlags;\ntypedef VkFlags VkCommandPoolTrimFlags;\ntypedef VkFlags VkDescriptorUpdateTemplateCreateFlags;\n\ntypedef enum VkExternalMemoryHandleTypeFlagBits {\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT = 0x00000008,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT = 0x00000010,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT = 0x00000020,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT = 0x00000040,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT = 0x00000200,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID = 0x00000400,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT = 0x00000080,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT = 0x00000100,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA = 0x00000800,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_RDMA_ADDRESS_BIT_NV = 0x00001000,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkExternalMemoryHandleTypeFlagBits;\ntypedef VkFlags VkExternalMemoryHandleTypeFlags;\n\ntypedef enum VkExternalMemoryFeatureFlagBits {\n    VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT = 0x00000001,\n    VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT = 0x00000002,\n    VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT = 0x00000004,\n    VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT,\n    VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT,\n    VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,\n    VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkExternalMemoryFeatureFlagBits;\ntypedef VkFlags VkExternalMemoryFeatureFlags;\n\ntypedef enum VkExternalFenceHandleTypeFlagBits {\n    VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001,\n    VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002,\n    VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004,\n    VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000008,\n    VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT,\n    VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT,\n    VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,\n    VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT,\n    VK_EXTERNAL_FENCE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkExternalFenceHandleTypeFlagBits;\ntypedef VkFlags VkExternalFenceHandleTypeFlags;\n\ntypedef enum VkExternalFenceFeatureFlagBits {\n    VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT = 0x00000001,\n    VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT = 0x00000002,\n    VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT,\n    VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT,\n    VK_EXTERNAL_FENCE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkExternalFenceFeatureFlagBits;\ntypedef VkFlags VkExternalFenceFeatureFlags;\n\ntypedef enum VkFenceImportFlagBits {\n    VK_FENCE_IMPORT_TEMPORARY_BIT = 0x00000001,\n    VK_FENCE_IMPORT_TEMPORARY_BIT_KHR = VK_FENCE_IMPORT_TEMPORARY_BIT,\n    VK_FENCE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkFenceImportFlagBits;\ntypedef VkFlags VkFenceImportFlags;\n\ntypedef enum VkSemaphoreImportFlagBits {\n    VK_SEMAPHORE_IMPORT_TEMPORARY_BIT = 0x00000001,\n    VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT,\n    VK_SEMAPHORE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkSemaphoreImportFlagBits;\ntypedef VkFlags VkSemaphoreImportFlags;\n\ntypedef enum VkExternalSemaphoreHandleTypeFlagBits {\n    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001,\n    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002,\n    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004,\n    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT = 0x00000008,\n    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000010,\n    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA = 0x00000080,\n    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE_BIT = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT,\n    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,\n    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,\n    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,\n    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT,\n    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,\n    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkExternalSemaphoreHandleTypeFlagBits;\ntypedef VkFlags VkExternalSemaphoreHandleTypeFlags;\n\ntypedef enum VkExternalSemaphoreFeatureFlagBits {\n    VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT = 0x00000001,\n    VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT = 0x00000002,\n    VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT,\n    VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT,\n    VK_EXTERNAL_SEMAPHORE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkExternalSemaphoreFeatureFlagBits;\ntypedef VkFlags VkExternalSemaphoreFeatureFlags;\ntypedef struct VkPhysicalDeviceSubgroupProperties {\n    VkStructureType           sType;\n    void*                     pNext;\n    uint32_t                  subgroupSize;\n    VkShaderStageFlags        supportedStages;\n    VkSubgroupFeatureFlags    supportedOperations;\n    VkBool32                  quadOperationsInAllStages;\n} VkPhysicalDeviceSubgroupProperties;\n\ntypedef struct VkBindBufferMemoryInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBuffer           buffer;\n    VkDeviceMemory     memory;\n    VkDeviceSize       memoryOffset;\n} VkBindBufferMemoryInfo;\n\ntypedef struct VkBindImageMemoryInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkImage            image;\n    VkDeviceMemory     memory;\n    VkDeviceSize       memoryOffset;\n} VkBindImageMemoryInfo;\n\ntypedef struct VkPhysicalDevice16BitStorageFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           storageBuffer16BitAccess;\n    VkBool32           uniformAndStorageBuffer16BitAccess;\n    VkBool32           storagePushConstant16;\n    VkBool32           storageInputOutput16;\n} VkPhysicalDevice16BitStorageFeatures;\n\ntypedef struct VkMemoryDedicatedRequirements {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           prefersDedicatedAllocation;\n    VkBool32           requiresDedicatedAllocation;\n} VkMemoryDedicatedRequirements;\n\ntypedef struct VkMemoryDedicatedAllocateInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkImage            image;\n    VkBuffer           buffer;\n} VkMemoryDedicatedAllocateInfo;\n\ntypedef struct VkMemoryAllocateFlagsInfo {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkMemoryAllocateFlags    flags;\n    uint32_t                 deviceMask;\n} VkMemoryAllocateFlagsInfo;\n\ntypedef struct VkDeviceGroupRenderPassBeginInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           deviceMask;\n    uint32_t           deviceRenderAreaCount;\n    const VkRect2D*    pDeviceRenderAreas;\n} VkDeviceGroupRenderPassBeginInfo;\n\ntypedef struct VkDeviceGroupCommandBufferBeginInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           deviceMask;\n} VkDeviceGroupCommandBufferBeginInfo;\n\ntypedef struct VkDeviceGroupSubmitInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           waitSemaphoreCount;\n    const uint32_t*    pWaitSemaphoreDeviceIndices;\n    uint32_t           commandBufferCount;\n    const uint32_t*    pCommandBufferDeviceMasks;\n    uint32_t           signalSemaphoreCount;\n    const uint32_t*    pSignalSemaphoreDeviceIndices;\n} VkDeviceGroupSubmitInfo;\n\ntypedef struct VkDeviceGroupBindSparseInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           resourceDeviceIndex;\n    uint32_t           memoryDeviceIndex;\n} VkDeviceGroupBindSparseInfo;\n\ntypedef struct VkBindBufferMemoryDeviceGroupInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           deviceIndexCount;\n    const uint32_t*    pDeviceIndices;\n} VkBindBufferMemoryDeviceGroupInfo;\n\ntypedef struct VkBindImageMemoryDeviceGroupInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           deviceIndexCount;\n    const uint32_t*    pDeviceIndices;\n    uint32_t           splitInstanceBindRegionCount;\n    const VkRect2D*    pSplitInstanceBindRegions;\n} VkBindImageMemoryDeviceGroupInfo;\n\ntypedef struct VkPhysicalDeviceGroupProperties {\n    VkStructureType     sType;\n    void*               pNext;\n    uint32_t            physicalDeviceCount;\n    VkPhysicalDevice    physicalDevices[VK_MAX_DEVICE_GROUP_SIZE];\n    VkBool32            subsetAllocation;\n} VkPhysicalDeviceGroupProperties;\n\ntypedef struct VkDeviceGroupDeviceCreateInfo {\n    VkStructureType            sType;\n    const void*                pNext;\n    uint32_t                   physicalDeviceCount;\n    const VkPhysicalDevice*    pPhysicalDevices;\n} VkDeviceGroupDeviceCreateInfo;\n\ntypedef struct VkBufferMemoryRequirementsInfo2 {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBuffer           buffer;\n} VkBufferMemoryRequirementsInfo2;\n\ntypedef struct VkImageMemoryRequirementsInfo2 {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkImage            image;\n} VkImageMemoryRequirementsInfo2;\n\ntypedef struct VkImageSparseMemoryRequirementsInfo2 {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkImage            image;\n} VkImageSparseMemoryRequirementsInfo2;\n\ntypedef struct VkMemoryRequirements2 {\n    VkStructureType         sType;\n    void*                   pNext;\n    VkMemoryRequirements    memoryRequirements;\n} VkMemoryRequirements2;\n\ntypedef struct VkSparseImageMemoryRequirements2 {\n    VkStructureType                    sType;\n    void*                              pNext;\n    VkSparseImageMemoryRequirements    memoryRequirements;\n} VkSparseImageMemoryRequirements2;\n\ntypedef struct VkPhysicalDeviceFeatures2 {\n    VkStructureType             sType;\n    void*                       pNext;\n    VkPhysicalDeviceFeatures    features;\n} VkPhysicalDeviceFeatures2;\n\ntypedef struct VkPhysicalDeviceProperties2 {\n    VkStructureType               sType;\n    void*                         pNext;\n    VkPhysicalDeviceProperties    properties;\n} VkPhysicalDeviceProperties2;\n\ntypedef struct VkFormatProperties2 {\n    VkStructureType       sType;\n    void*                 pNext;\n    VkFormatProperties    formatProperties;\n} VkFormatProperties2;\n\ntypedef struct VkImageFormatProperties2 {\n    VkStructureType            sType;\n    void*                      pNext;\n    VkImageFormatProperties    imageFormatProperties;\n} VkImageFormatProperties2;\n\ntypedef struct VkPhysicalDeviceImageFormatInfo2 {\n    VkStructureType       sType;\n    const void*           pNext;\n    VkFormat              format;\n    VkImageType           type;\n    VkImageTiling         tiling;\n    VkImageUsageFlags     usage;\n    VkImageCreateFlags    flags;\n} VkPhysicalDeviceImageFormatInfo2;\n\ntypedef struct VkQueueFamilyProperties2 {\n    VkStructureType            sType;\n    void*                      pNext;\n    VkQueueFamilyProperties    queueFamilyProperties;\n} VkQueueFamilyProperties2;\n\ntypedef struct VkPhysicalDeviceMemoryProperties2 {\n    VkStructureType                     sType;\n    void*                               pNext;\n    VkPhysicalDeviceMemoryProperties    memoryProperties;\n} VkPhysicalDeviceMemoryProperties2;\n\ntypedef struct VkSparseImageFormatProperties2 {\n    VkStructureType                  sType;\n    void*                            pNext;\n    VkSparseImageFormatProperties    properties;\n} VkSparseImageFormatProperties2;\n\ntypedef struct VkPhysicalDeviceSparseImageFormatInfo2 {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkFormat                 format;\n    VkImageType              type;\n    VkSampleCountFlagBits    samples;\n    VkImageUsageFlags        usage;\n    VkImageTiling            tiling;\n} VkPhysicalDeviceSparseImageFormatInfo2;\n\ntypedef struct VkPhysicalDevicePointClippingProperties {\n    VkStructureType            sType;\n    void*                      pNext;\n    VkPointClippingBehavior    pointClippingBehavior;\n} VkPhysicalDevicePointClippingProperties;\n\ntypedef struct VkInputAttachmentAspectReference {\n    uint32_t              subpass;\n    uint32_t              inputAttachmentIndex;\n    VkImageAspectFlags    aspectMask;\n} VkInputAttachmentAspectReference;\n\ntypedef struct VkRenderPassInputAttachmentAspectCreateInfo {\n    VkStructureType                            sType;\n    const void*                                pNext;\n    uint32_t                                   aspectReferenceCount;\n    const VkInputAttachmentAspectReference*    pAspectReferences;\n} VkRenderPassInputAttachmentAspectCreateInfo;\n\ntypedef struct VkImageViewUsageCreateInfo {\n    VkStructureType      sType;\n    const void*          pNext;\n    VkImageUsageFlags    usage;\n} VkImageViewUsageCreateInfo;\n\ntypedef struct VkPipelineTessellationDomainOriginStateCreateInfo {\n    VkStructureType               sType;\n    const void*                   pNext;\n    VkTessellationDomainOrigin    domainOrigin;\n} VkPipelineTessellationDomainOriginStateCreateInfo;\n\ntypedef struct VkRenderPassMultiviewCreateInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           subpassCount;\n    const uint32_t*    pViewMasks;\n    uint32_t           dependencyCount;\n    const int32_t*     pViewOffsets;\n    uint32_t           correlationMaskCount;\n    const uint32_t*    pCorrelationMasks;\n} VkRenderPassMultiviewCreateInfo;\n\ntypedef struct VkPhysicalDeviceMultiviewFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           multiview;\n    VkBool32           multiviewGeometryShader;\n    VkBool32           multiviewTessellationShader;\n} VkPhysicalDeviceMultiviewFeatures;\n\ntypedef struct VkPhysicalDeviceMultiviewProperties {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxMultiviewViewCount;\n    uint32_t           maxMultiviewInstanceIndex;\n} VkPhysicalDeviceMultiviewProperties;\n\ntypedef struct VkPhysicalDeviceVariablePointersFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           variablePointersStorageBuffer;\n    VkBool32           variablePointers;\n} VkPhysicalDeviceVariablePointersFeatures;\n\ntypedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeatures;\n\ntypedef struct VkPhysicalDeviceProtectedMemoryFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           protectedMemory;\n} VkPhysicalDeviceProtectedMemoryFeatures;\n\ntypedef struct VkPhysicalDeviceProtectedMemoryProperties {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           protectedNoFault;\n} VkPhysicalDeviceProtectedMemoryProperties;\n\ntypedef struct VkDeviceQueueInfo2 {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkDeviceQueueCreateFlags    flags;\n    uint32_t                    queueFamilyIndex;\n    uint32_t                    queueIndex;\n} VkDeviceQueueInfo2;\n\ntypedef struct VkProtectedSubmitInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBool32           protectedSubmit;\n} VkProtectedSubmitInfo;\n\ntypedef struct VkSamplerYcbcrConversionCreateInfo {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkFormat                         format;\n    VkSamplerYcbcrModelConversion    ycbcrModel;\n    VkSamplerYcbcrRange              ycbcrRange;\n    VkComponentMapping               components;\n    VkChromaLocation                 xChromaOffset;\n    VkChromaLocation                 yChromaOffset;\n    VkFilter                         chromaFilter;\n    VkBool32                         forceExplicitReconstruction;\n} VkSamplerYcbcrConversionCreateInfo;\n\ntypedef struct VkSamplerYcbcrConversionInfo {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkSamplerYcbcrConversion    conversion;\n} VkSamplerYcbcrConversionInfo;\n\ntypedef struct VkBindImagePlaneMemoryInfo {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkImageAspectFlagBits    planeAspect;\n} VkBindImagePlaneMemoryInfo;\n\ntypedef struct VkImagePlaneMemoryRequirementsInfo {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkImageAspectFlagBits    planeAspect;\n} VkImagePlaneMemoryRequirementsInfo;\n\ntypedef struct VkPhysicalDeviceSamplerYcbcrConversionFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           samplerYcbcrConversion;\n} VkPhysicalDeviceSamplerYcbcrConversionFeatures;\n\ntypedef struct VkSamplerYcbcrConversionImageFormatProperties {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           combinedImageSamplerDescriptorCount;\n} VkSamplerYcbcrConversionImageFormatProperties;\n\ntypedef struct VkDescriptorUpdateTemplateEntry {\n    uint32_t            dstBinding;\n    uint32_t            dstArrayElement;\n    uint32_t            descriptorCount;\n    VkDescriptorType    descriptorType;\n    size_t              offset;\n    size_t              stride;\n} VkDescriptorUpdateTemplateEntry;\n\ntypedef struct VkDescriptorUpdateTemplateCreateInfo {\n    VkStructureType                           sType;\n    const void*                               pNext;\n    VkDescriptorUpdateTemplateCreateFlags     flags;\n    uint32_t                                  descriptorUpdateEntryCount;\n    const VkDescriptorUpdateTemplateEntry*    pDescriptorUpdateEntries;\n    VkDescriptorUpdateTemplateType            templateType;\n    VkDescriptorSetLayout                     descriptorSetLayout;\n    VkPipelineBindPoint                       pipelineBindPoint;\n    VkPipelineLayout                          pipelineLayout;\n    uint32_t                                  set;\n} VkDescriptorUpdateTemplateCreateInfo;\n\ntypedef struct VkExternalMemoryProperties {\n    VkExternalMemoryFeatureFlags       externalMemoryFeatures;\n    VkExternalMemoryHandleTypeFlags    exportFromImportedHandleTypes;\n    VkExternalMemoryHandleTypeFlags    compatibleHandleTypes;\n} VkExternalMemoryProperties;\n\ntypedef struct VkPhysicalDeviceExternalImageFormatInfo {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkExternalMemoryHandleTypeFlagBits    handleType;\n} VkPhysicalDeviceExternalImageFormatInfo;\n\ntypedef struct VkExternalImageFormatProperties {\n    VkStructureType               sType;\n    void*                         pNext;\n    VkExternalMemoryProperties    externalMemoryProperties;\n} VkExternalImageFormatProperties;\n\ntypedef struct VkPhysicalDeviceExternalBufferInfo {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkBufferCreateFlags                   flags;\n    VkBufferUsageFlags                    usage;\n    VkExternalMemoryHandleTypeFlagBits    handleType;\n} VkPhysicalDeviceExternalBufferInfo;\n\ntypedef struct VkExternalBufferProperties {\n    VkStructureType               sType;\n    void*                         pNext;\n    VkExternalMemoryProperties    externalMemoryProperties;\n} VkExternalBufferProperties;\n\ntypedef struct VkPhysicalDeviceIDProperties {\n    VkStructureType    sType;\n    void*              pNext;\n    uint8_t            deviceUUID[VK_UUID_SIZE];\n    uint8_t            driverUUID[VK_UUID_SIZE];\n    uint8_t            deviceLUID[VK_LUID_SIZE];\n    uint32_t           deviceNodeMask;\n    VkBool32           deviceLUIDValid;\n} VkPhysicalDeviceIDProperties;\n\ntypedef struct VkExternalMemoryImageCreateInfo {\n    VkStructureType                    sType;\n    const void*                        pNext;\n    VkExternalMemoryHandleTypeFlags    handleTypes;\n} VkExternalMemoryImageCreateInfo;\n\ntypedef struct VkExternalMemoryBufferCreateInfo {\n    VkStructureType                    sType;\n    const void*                        pNext;\n    VkExternalMemoryHandleTypeFlags    handleTypes;\n} VkExternalMemoryBufferCreateInfo;\n\ntypedef struct VkExportMemoryAllocateInfo {\n    VkStructureType                    sType;\n    const void*                        pNext;\n    VkExternalMemoryHandleTypeFlags    handleTypes;\n} VkExportMemoryAllocateInfo;\n\ntypedef struct VkPhysicalDeviceExternalFenceInfo {\n    VkStructureType                      sType;\n    const void*                          pNext;\n    VkExternalFenceHandleTypeFlagBits    handleType;\n} VkPhysicalDeviceExternalFenceInfo;\n\ntypedef struct VkExternalFenceProperties {\n    VkStructureType                   sType;\n    void*                             pNext;\n    VkExternalFenceHandleTypeFlags    exportFromImportedHandleTypes;\n    VkExternalFenceHandleTypeFlags    compatibleHandleTypes;\n    VkExternalFenceFeatureFlags       externalFenceFeatures;\n} VkExternalFenceProperties;\n\ntypedef struct VkExportFenceCreateInfo {\n    VkStructureType                   sType;\n    const void*                       pNext;\n    VkExternalFenceHandleTypeFlags    handleTypes;\n} VkExportFenceCreateInfo;\n\ntypedef struct VkExportSemaphoreCreateInfo {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkExternalSemaphoreHandleTypeFlags    handleTypes;\n} VkExportSemaphoreCreateInfo;\n\ntypedef struct VkPhysicalDeviceExternalSemaphoreInfo {\n    VkStructureType                          sType;\n    const void*                              pNext;\n    VkExternalSemaphoreHandleTypeFlagBits    handleType;\n} VkPhysicalDeviceExternalSemaphoreInfo;\n\ntypedef struct VkExternalSemaphoreProperties {\n    VkStructureType                       sType;\n    void*                                 pNext;\n    VkExternalSemaphoreHandleTypeFlags    exportFromImportedHandleTypes;\n    VkExternalSemaphoreHandleTypeFlags    compatibleHandleTypes;\n    VkExternalSemaphoreFeatureFlags       externalSemaphoreFeatures;\n} VkExternalSemaphoreProperties;\n\ntypedef struct VkPhysicalDeviceMaintenance3Properties {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxPerSetDescriptors;\n    VkDeviceSize       maxMemoryAllocationSize;\n} VkPhysicalDeviceMaintenance3Properties;\n\ntypedef struct VkDescriptorSetLayoutSupport {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           supported;\n} VkDescriptorSetLayoutSupport;\n\ntypedef struct VkPhysicalDeviceShaderDrawParametersFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderDrawParameters;\n} VkPhysicalDeviceShaderDrawParametersFeatures;\n\ntypedef VkPhysicalDeviceShaderDrawParametersFeatures VkPhysicalDeviceShaderDrawParameterFeatures;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceVersion)(uint32_t* pApiVersion);\ntypedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos);\ntypedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos);\ntypedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeatures)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMask)(VkCommandBuffer commandBuffer, uint32_t deviceMask);\ntypedef void (VKAPI_PTR *PFN_vkCmdDispatchBase)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);\ntypedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroups)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements);\ntypedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements);\ntypedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties);\ntypedef void (VKAPI_PTR *PFN_vkTrimCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags);\ntypedef void (VKAPI_PTR *PFN_vkGetDeviceQueue2)(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversion)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion);\ntypedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversion)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplate)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate);\ntypedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplate)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator);\ntypedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplate)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFenceProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupport)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceVersion(\n    uint32_t*                                   pApiVersion);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2(\n    VkDevice                                    device,\n    uint32_t                                    bindInfoCount,\n    const VkBindBufferMemoryInfo*               pBindInfos);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2(\n    VkDevice                                    device,\n    uint32_t                                    bindInfoCount,\n    const VkBindImageMemoryInfo*                pBindInfos);\n\nVKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeatures(\n    VkDevice                                    device,\n    uint32_t                                    heapIndex,\n    uint32_t                                    localDeviceIndex,\n    uint32_t                                    remoteDeviceIndex,\n    VkPeerMemoryFeatureFlags*                   pPeerMemoryFeatures);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMask(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    deviceMask);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDispatchBase(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    baseGroupX,\n    uint32_t                                    baseGroupY,\n    uint32_t                                    baseGroupZ,\n    uint32_t                                    groupCountX,\n    uint32_t                                    groupCountY,\n    uint32_t                                    groupCountZ);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroups(\n    VkInstance                                  instance,\n    uint32_t*                                   pPhysicalDeviceGroupCount,\n    VkPhysicalDeviceGroupProperties*            pPhysicalDeviceGroupProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2(\n    VkDevice                                    device,\n    const VkImageMemoryRequirementsInfo2*       pInfo,\n    VkMemoryRequirements2*                      pMemoryRequirements);\n\nVKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2(\n    VkDevice                                    device,\n    const VkBufferMemoryRequirementsInfo2*      pInfo,\n    VkMemoryRequirements2*                      pMemoryRequirements);\n\nVKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2(\n    VkDevice                                    device,\n    const VkImageSparseMemoryRequirementsInfo2* pInfo,\n    uint32_t*                                   pSparseMemoryRequirementCount,\n    VkSparseImageMemoryRequirements2*           pSparseMemoryRequirements);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2(\n    VkPhysicalDevice                            physicalDevice,\n    VkPhysicalDeviceFeatures2*                  pFeatures);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2(\n    VkPhysicalDevice                            physicalDevice,\n    VkPhysicalDeviceProperties2*                pProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2(\n    VkPhysicalDevice                            physicalDevice,\n    VkFormat                                    format,\n    VkFormatProperties2*                        pFormatProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2(\n    VkPhysicalDevice                            physicalDevice,\n    const VkPhysicalDeviceImageFormatInfo2*     pImageFormatInfo,\n    VkImageFormatProperties2*                   pImageFormatProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pQueueFamilyPropertyCount,\n    VkQueueFamilyProperties2*                   pQueueFamilyProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2(\n    VkPhysicalDevice                            physicalDevice,\n    VkPhysicalDeviceMemoryProperties2*          pMemoryProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2(\n    VkPhysicalDevice                            physicalDevice,\n    const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo,\n    uint32_t*                                   pPropertyCount,\n    VkSparseImageFormatProperties2*             pProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkTrimCommandPool(\n    VkDevice                                    device,\n    VkCommandPool                               commandPool,\n    VkCommandPoolTrimFlags                      flags);\n\nVKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue2(\n    VkDevice                                    device,\n    const VkDeviceQueueInfo2*                   pQueueInfo,\n    VkQueue*                                    pQueue);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversion(\n    VkDevice                                    device,\n    const VkSamplerYcbcrConversionCreateInfo*   pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkSamplerYcbcrConversion*                   pYcbcrConversion);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversion(\n    VkDevice                                    device,\n    VkSamplerYcbcrConversion                    ycbcrConversion,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplate(\n    VkDevice                                    device,\n    const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkDescriptorUpdateTemplate*                 pDescriptorUpdateTemplate);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate(\n    VkDevice                                    device,\n    VkDescriptorUpdateTemplate                  descriptorUpdateTemplate,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate(\n    VkDevice                                    device,\n    VkDescriptorSet                             descriptorSet,\n    VkDescriptorUpdateTemplate                  descriptorUpdateTemplate,\n    const void*                                 pData);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferProperties(\n    VkPhysicalDevice                            physicalDevice,\n    const VkPhysicalDeviceExternalBufferInfo*   pExternalBufferInfo,\n    VkExternalBufferProperties*                 pExternalBufferProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFenceProperties(\n    VkPhysicalDevice                            physicalDevice,\n    const VkPhysicalDeviceExternalFenceInfo*    pExternalFenceInfo,\n    VkExternalFenceProperties*                  pExternalFenceProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphoreProperties(\n    VkPhysicalDevice                            physicalDevice,\n    const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,\n    VkExternalSemaphoreProperties*              pExternalSemaphoreProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupport(\n    VkDevice                                    device,\n    const VkDescriptorSetLayoutCreateInfo*      pCreateInfo,\n    VkDescriptorSetLayoutSupport*               pSupport);\n#endif\n\n\n#define VK_VERSION_1_2 1\n// Vulkan 1.2 version number\n#define VK_API_VERSION_1_2 VK_MAKE_API_VERSION(0, 1, 2, 0)// Patch version should always be set to 0\n\n#define VK_MAX_DRIVER_NAME_SIZE           256U\n#define VK_MAX_DRIVER_INFO_SIZE           256U\n\ntypedef enum VkDriverId {\n    VK_DRIVER_ID_AMD_PROPRIETARY = 1,\n    VK_DRIVER_ID_AMD_OPEN_SOURCE = 2,\n    VK_DRIVER_ID_MESA_RADV = 3,\n    VK_DRIVER_ID_NVIDIA_PROPRIETARY = 4,\n    VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS = 5,\n    VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA = 6,\n    VK_DRIVER_ID_IMAGINATION_PROPRIETARY = 7,\n    VK_DRIVER_ID_QUALCOMM_PROPRIETARY = 8,\n    VK_DRIVER_ID_ARM_PROPRIETARY = 9,\n    VK_DRIVER_ID_GOOGLE_SWIFTSHADER = 10,\n    VK_DRIVER_ID_GGP_PROPRIETARY = 11,\n    VK_DRIVER_ID_BROADCOM_PROPRIETARY = 12,\n    VK_DRIVER_ID_MESA_LLVMPIPE = 13,\n    VK_DRIVER_ID_MOLTENVK = 14,\n    VK_DRIVER_ID_COREAVI_PROPRIETARY = 15,\n    VK_DRIVER_ID_JUICE_PROPRIETARY = 16,\n    VK_DRIVER_ID_VERISILICON_PROPRIETARY = 17,\n    VK_DRIVER_ID_MESA_TURNIP = 18,\n    VK_DRIVER_ID_MESA_V3DV = 19,\n    VK_DRIVER_ID_MESA_PANVK = 20,\n    VK_DRIVER_ID_SAMSUNG_PROPRIETARY = 21,\n    VK_DRIVER_ID_MESA_VENUS = 22,\n    VK_DRIVER_ID_MESA_DOZEN = 23,\n    VK_DRIVER_ID_AMD_PROPRIETARY_KHR = VK_DRIVER_ID_AMD_PROPRIETARY,\n    VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR = VK_DRIVER_ID_AMD_OPEN_SOURCE,\n    VK_DRIVER_ID_MESA_RADV_KHR = VK_DRIVER_ID_MESA_RADV,\n    VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR = VK_DRIVER_ID_NVIDIA_PROPRIETARY,\n    VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR = VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS,\n    VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA,\n    VK_DRIVER_ID_IMAGINATION_PROPRIETARY_KHR = VK_DRIVER_ID_IMAGINATION_PROPRIETARY,\n    VK_DRIVER_ID_QUALCOMM_PROPRIETARY_KHR = VK_DRIVER_ID_QUALCOMM_PROPRIETARY,\n    VK_DRIVER_ID_ARM_PROPRIETARY_KHR = VK_DRIVER_ID_ARM_PROPRIETARY,\n    VK_DRIVER_ID_GOOGLE_SWIFTSHADER_KHR = VK_DRIVER_ID_GOOGLE_SWIFTSHADER,\n    VK_DRIVER_ID_GGP_PROPRIETARY_KHR = VK_DRIVER_ID_GGP_PROPRIETARY,\n    VK_DRIVER_ID_BROADCOM_PROPRIETARY_KHR = VK_DRIVER_ID_BROADCOM_PROPRIETARY,\n    VK_DRIVER_ID_MAX_ENUM = 0x7FFFFFFF\n} VkDriverId;\n\ntypedef enum VkShaderFloatControlsIndependence {\n    VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY = 0,\n    VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL = 1,\n    VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE = 2,\n    VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY,\n    VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL,\n    VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE,\n    VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_MAX_ENUM = 0x7FFFFFFF\n} VkShaderFloatControlsIndependence;\n\ntypedef enum VkSamplerReductionMode {\n    VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE = 0,\n    VK_SAMPLER_REDUCTION_MODE_MIN = 1,\n    VK_SAMPLER_REDUCTION_MODE_MAX = 2,\n    VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE,\n    VK_SAMPLER_REDUCTION_MODE_MIN_EXT = VK_SAMPLER_REDUCTION_MODE_MIN,\n    VK_SAMPLER_REDUCTION_MODE_MAX_EXT = VK_SAMPLER_REDUCTION_MODE_MAX,\n    VK_SAMPLER_REDUCTION_MODE_MAX_ENUM = 0x7FFFFFFF\n} VkSamplerReductionMode;\n\ntypedef enum VkSemaphoreType {\n    VK_SEMAPHORE_TYPE_BINARY = 0,\n    VK_SEMAPHORE_TYPE_TIMELINE = 1,\n    VK_SEMAPHORE_TYPE_BINARY_KHR = VK_SEMAPHORE_TYPE_BINARY,\n    VK_SEMAPHORE_TYPE_TIMELINE_KHR = VK_SEMAPHORE_TYPE_TIMELINE,\n    VK_SEMAPHORE_TYPE_MAX_ENUM = 0x7FFFFFFF\n} VkSemaphoreType;\n\ntypedef enum VkResolveModeFlagBits {\n    VK_RESOLVE_MODE_NONE = 0,\n    VK_RESOLVE_MODE_SAMPLE_ZERO_BIT = 0x00000001,\n    VK_RESOLVE_MODE_AVERAGE_BIT = 0x00000002,\n    VK_RESOLVE_MODE_MIN_BIT = 0x00000004,\n    VK_RESOLVE_MODE_MAX_BIT = 0x00000008,\n    VK_RESOLVE_MODE_NONE_KHR = VK_RESOLVE_MODE_NONE,\n    VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT,\n    VK_RESOLVE_MODE_AVERAGE_BIT_KHR = VK_RESOLVE_MODE_AVERAGE_BIT,\n    VK_RESOLVE_MODE_MIN_BIT_KHR = VK_RESOLVE_MODE_MIN_BIT,\n    VK_RESOLVE_MODE_MAX_BIT_KHR = VK_RESOLVE_MODE_MAX_BIT,\n    VK_RESOLVE_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkResolveModeFlagBits;\ntypedef VkFlags VkResolveModeFlags;\n\ntypedef enum VkDescriptorBindingFlagBits {\n    VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT = 0x00000001,\n    VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT = 0x00000002,\n    VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT = 0x00000004,\n    VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT = 0x00000008,\n    VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT,\n    VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT,\n    VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT,\n    VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT,\n    VK_DESCRIPTOR_BINDING_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkDescriptorBindingFlagBits;\ntypedef VkFlags VkDescriptorBindingFlags;\n\ntypedef enum VkSemaphoreWaitFlagBits {\n    VK_SEMAPHORE_WAIT_ANY_BIT = 0x00000001,\n    VK_SEMAPHORE_WAIT_ANY_BIT_KHR = VK_SEMAPHORE_WAIT_ANY_BIT,\n    VK_SEMAPHORE_WAIT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkSemaphoreWaitFlagBits;\ntypedef VkFlags VkSemaphoreWaitFlags;\ntypedef struct VkPhysicalDeviceVulkan11Features {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           storageBuffer16BitAccess;\n    VkBool32           uniformAndStorageBuffer16BitAccess;\n    VkBool32           storagePushConstant16;\n    VkBool32           storageInputOutput16;\n    VkBool32           multiview;\n    VkBool32           multiviewGeometryShader;\n    VkBool32           multiviewTessellationShader;\n    VkBool32           variablePointersStorageBuffer;\n    VkBool32           variablePointers;\n    VkBool32           protectedMemory;\n    VkBool32           samplerYcbcrConversion;\n    VkBool32           shaderDrawParameters;\n} VkPhysicalDeviceVulkan11Features;\n\ntypedef struct VkPhysicalDeviceVulkan11Properties {\n    VkStructureType            sType;\n    void*                      pNext;\n    uint8_t                    deviceUUID[VK_UUID_SIZE];\n    uint8_t                    driverUUID[VK_UUID_SIZE];\n    uint8_t                    deviceLUID[VK_LUID_SIZE];\n    uint32_t                   deviceNodeMask;\n    VkBool32                   deviceLUIDValid;\n    uint32_t                   subgroupSize;\n    VkShaderStageFlags         subgroupSupportedStages;\n    VkSubgroupFeatureFlags     subgroupSupportedOperations;\n    VkBool32                   subgroupQuadOperationsInAllStages;\n    VkPointClippingBehavior    pointClippingBehavior;\n    uint32_t                   maxMultiviewViewCount;\n    uint32_t                   maxMultiviewInstanceIndex;\n    VkBool32                   protectedNoFault;\n    uint32_t                   maxPerSetDescriptors;\n    VkDeviceSize               maxMemoryAllocationSize;\n} VkPhysicalDeviceVulkan11Properties;\n\ntypedef struct VkPhysicalDeviceVulkan12Features {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           samplerMirrorClampToEdge;\n    VkBool32           drawIndirectCount;\n    VkBool32           storageBuffer8BitAccess;\n    VkBool32           uniformAndStorageBuffer8BitAccess;\n    VkBool32           storagePushConstant8;\n    VkBool32           shaderBufferInt64Atomics;\n    VkBool32           shaderSharedInt64Atomics;\n    VkBool32           shaderFloat16;\n    VkBool32           shaderInt8;\n    VkBool32           descriptorIndexing;\n    VkBool32           shaderInputAttachmentArrayDynamicIndexing;\n    VkBool32           shaderUniformTexelBufferArrayDynamicIndexing;\n    VkBool32           shaderStorageTexelBufferArrayDynamicIndexing;\n    VkBool32           shaderUniformBufferArrayNonUniformIndexing;\n    VkBool32           shaderSampledImageArrayNonUniformIndexing;\n    VkBool32           shaderStorageBufferArrayNonUniformIndexing;\n    VkBool32           shaderStorageImageArrayNonUniformIndexing;\n    VkBool32           shaderInputAttachmentArrayNonUniformIndexing;\n    VkBool32           shaderUniformTexelBufferArrayNonUniformIndexing;\n    VkBool32           shaderStorageTexelBufferArrayNonUniformIndexing;\n    VkBool32           descriptorBindingUniformBufferUpdateAfterBind;\n    VkBool32           descriptorBindingSampledImageUpdateAfterBind;\n    VkBool32           descriptorBindingStorageImageUpdateAfterBind;\n    VkBool32           descriptorBindingStorageBufferUpdateAfterBind;\n    VkBool32           descriptorBindingUniformTexelBufferUpdateAfterBind;\n    VkBool32           descriptorBindingStorageTexelBufferUpdateAfterBind;\n    VkBool32           descriptorBindingUpdateUnusedWhilePending;\n    VkBool32           descriptorBindingPartiallyBound;\n    VkBool32           descriptorBindingVariableDescriptorCount;\n    VkBool32           runtimeDescriptorArray;\n    VkBool32           samplerFilterMinmax;\n    VkBool32           scalarBlockLayout;\n    VkBool32           imagelessFramebuffer;\n    VkBool32           uniformBufferStandardLayout;\n    VkBool32           shaderSubgroupExtendedTypes;\n    VkBool32           separateDepthStencilLayouts;\n    VkBool32           hostQueryReset;\n    VkBool32           timelineSemaphore;\n    VkBool32           bufferDeviceAddress;\n    VkBool32           bufferDeviceAddressCaptureReplay;\n    VkBool32           bufferDeviceAddressMultiDevice;\n    VkBool32           vulkanMemoryModel;\n    VkBool32           vulkanMemoryModelDeviceScope;\n    VkBool32           vulkanMemoryModelAvailabilityVisibilityChains;\n    VkBool32           shaderOutputViewportIndex;\n    VkBool32           shaderOutputLayer;\n    VkBool32           subgroupBroadcastDynamicId;\n} VkPhysicalDeviceVulkan12Features;\n\ntypedef struct VkConformanceVersion {\n    uint8_t    major;\n    uint8_t    minor;\n    uint8_t    subminor;\n    uint8_t    patch;\n} VkConformanceVersion;\n\ntypedef struct VkPhysicalDeviceVulkan12Properties {\n    VkStructureType                      sType;\n    void*                                pNext;\n    VkDriverId                           driverID;\n    char                                 driverName[VK_MAX_DRIVER_NAME_SIZE];\n    char                                 driverInfo[VK_MAX_DRIVER_INFO_SIZE];\n    VkConformanceVersion                 conformanceVersion;\n    VkShaderFloatControlsIndependence    denormBehaviorIndependence;\n    VkShaderFloatControlsIndependence    roundingModeIndependence;\n    VkBool32                             shaderSignedZeroInfNanPreserveFloat16;\n    VkBool32                             shaderSignedZeroInfNanPreserveFloat32;\n    VkBool32                             shaderSignedZeroInfNanPreserveFloat64;\n    VkBool32                             shaderDenormPreserveFloat16;\n    VkBool32                             shaderDenormPreserveFloat32;\n    VkBool32                             shaderDenormPreserveFloat64;\n    VkBool32                             shaderDenormFlushToZeroFloat16;\n    VkBool32                             shaderDenormFlushToZeroFloat32;\n    VkBool32                             shaderDenormFlushToZeroFloat64;\n    VkBool32                             shaderRoundingModeRTEFloat16;\n    VkBool32                             shaderRoundingModeRTEFloat32;\n    VkBool32                             shaderRoundingModeRTEFloat64;\n    VkBool32                             shaderRoundingModeRTZFloat16;\n    VkBool32                             shaderRoundingModeRTZFloat32;\n    VkBool32                             shaderRoundingModeRTZFloat64;\n    uint32_t                             maxUpdateAfterBindDescriptorsInAllPools;\n    VkBool32                             shaderUniformBufferArrayNonUniformIndexingNative;\n    VkBool32                             shaderSampledImageArrayNonUniformIndexingNative;\n    VkBool32                             shaderStorageBufferArrayNonUniformIndexingNative;\n    VkBool32                             shaderStorageImageArrayNonUniformIndexingNative;\n    VkBool32                             shaderInputAttachmentArrayNonUniformIndexingNative;\n    VkBool32                             robustBufferAccessUpdateAfterBind;\n    VkBool32                             quadDivergentImplicitLod;\n    uint32_t                             maxPerStageDescriptorUpdateAfterBindSamplers;\n    uint32_t                             maxPerStageDescriptorUpdateAfterBindUniformBuffers;\n    uint32_t                             maxPerStageDescriptorUpdateAfterBindStorageBuffers;\n    uint32_t                             maxPerStageDescriptorUpdateAfterBindSampledImages;\n    uint32_t                             maxPerStageDescriptorUpdateAfterBindStorageImages;\n    uint32_t                             maxPerStageDescriptorUpdateAfterBindInputAttachments;\n    uint32_t                             maxPerStageUpdateAfterBindResources;\n    uint32_t                             maxDescriptorSetUpdateAfterBindSamplers;\n    uint32_t                             maxDescriptorSetUpdateAfterBindUniformBuffers;\n    uint32_t                             maxDescriptorSetUpdateAfterBindUniformBuffersDynamic;\n    uint32_t                             maxDescriptorSetUpdateAfterBindStorageBuffers;\n    uint32_t                             maxDescriptorSetUpdateAfterBindStorageBuffersDynamic;\n    uint32_t                             maxDescriptorSetUpdateAfterBindSampledImages;\n    uint32_t                             maxDescriptorSetUpdateAfterBindStorageImages;\n    uint32_t                             maxDescriptorSetUpdateAfterBindInputAttachments;\n    VkResolveModeFlags                   supportedDepthResolveModes;\n    VkResolveModeFlags                   supportedStencilResolveModes;\n    VkBool32                             independentResolveNone;\n    VkBool32                             independentResolve;\n    VkBool32                             filterMinmaxSingleComponentFormats;\n    VkBool32                             filterMinmaxImageComponentMapping;\n    uint64_t                             maxTimelineSemaphoreValueDifference;\n    VkSampleCountFlags                   framebufferIntegerColorSampleCounts;\n} VkPhysicalDeviceVulkan12Properties;\n\ntypedef struct VkImageFormatListCreateInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           viewFormatCount;\n    const VkFormat*    pViewFormats;\n} VkImageFormatListCreateInfo;\n\ntypedef struct VkAttachmentDescription2 {\n    VkStructureType                 sType;\n    const void*                     pNext;\n    VkAttachmentDescriptionFlags    flags;\n    VkFormat                        format;\n    VkSampleCountFlagBits           samples;\n    VkAttachmentLoadOp              loadOp;\n    VkAttachmentStoreOp             storeOp;\n    VkAttachmentLoadOp              stencilLoadOp;\n    VkAttachmentStoreOp             stencilStoreOp;\n    VkImageLayout                   initialLayout;\n    VkImageLayout                   finalLayout;\n} VkAttachmentDescription2;\n\ntypedef struct VkAttachmentReference2 {\n    VkStructureType       sType;\n    const void*           pNext;\n    uint32_t              attachment;\n    VkImageLayout         layout;\n    VkImageAspectFlags    aspectMask;\n} VkAttachmentReference2;\n\ntypedef struct VkSubpassDescription2 {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkSubpassDescriptionFlags        flags;\n    VkPipelineBindPoint              pipelineBindPoint;\n    uint32_t                         viewMask;\n    uint32_t                         inputAttachmentCount;\n    const VkAttachmentReference2*    pInputAttachments;\n    uint32_t                         colorAttachmentCount;\n    const VkAttachmentReference2*    pColorAttachments;\n    const VkAttachmentReference2*    pResolveAttachments;\n    const VkAttachmentReference2*    pDepthStencilAttachment;\n    uint32_t                         preserveAttachmentCount;\n    const uint32_t*                  pPreserveAttachments;\n} VkSubpassDescription2;\n\ntypedef struct VkSubpassDependency2 {\n    VkStructureType         sType;\n    const void*             pNext;\n    uint32_t                srcSubpass;\n    uint32_t                dstSubpass;\n    VkPipelineStageFlags    srcStageMask;\n    VkPipelineStageFlags    dstStageMask;\n    VkAccessFlags           srcAccessMask;\n    VkAccessFlags           dstAccessMask;\n    VkDependencyFlags       dependencyFlags;\n    int32_t                 viewOffset;\n} VkSubpassDependency2;\n\ntypedef struct VkRenderPassCreateInfo2 {\n    VkStructureType                    sType;\n    const void*                        pNext;\n    VkRenderPassCreateFlags            flags;\n    uint32_t                           attachmentCount;\n    const VkAttachmentDescription2*    pAttachments;\n    uint32_t                           subpassCount;\n    const VkSubpassDescription2*       pSubpasses;\n    uint32_t                           dependencyCount;\n    const VkSubpassDependency2*        pDependencies;\n    uint32_t                           correlatedViewMaskCount;\n    const uint32_t*                    pCorrelatedViewMasks;\n} VkRenderPassCreateInfo2;\n\ntypedef struct VkSubpassBeginInfo {\n    VkStructureType      sType;\n    const void*          pNext;\n    VkSubpassContents    contents;\n} VkSubpassBeginInfo;\n\ntypedef struct VkSubpassEndInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n} VkSubpassEndInfo;\n\ntypedef struct VkPhysicalDevice8BitStorageFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           storageBuffer8BitAccess;\n    VkBool32           uniformAndStorageBuffer8BitAccess;\n    VkBool32           storagePushConstant8;\n} VkPhysicalDevice8BitStorageFeatures;\n\ntypedef struct VkPhysicalDeviceDriverProperties {\n    VkStructureType         sType;\n    void*                   pNext;\n    VkDriverId              driverID;\n    char                    driverName[VK_MAX_DRIVER_NAME_SIZE];\n    char                    driverInfo[VK_MAX_DRIVER_INFO_SIZE];\n    VkConformanceVersion    conformanceVersion;\n} VkPhysicalDeviceDriverProperties;\n\ntypedef struct VkPhysicalDeviceShaderAtomicInt64Features {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderBufferInt64Atomics;\n    VkBool32           shaderSharedInt64Atomics;\n} VkPhysicalDeviceShaderAtomicInt64Features;\n\ntypedef struct VkPhysicalDeviceShaderFloat16Int8Features {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderFloat16;\n    VkBool32           shaderInt8;\n} VkPhysicalDeviceShaderFloat16Int8Features;\n\ntypedef struct VkPhysicalDeviceFloatControlsProperties {\n    VkStructureType                      sType;\n    void*                                pNext;\n    VkShaderFloatControlsIndependence    denormBehaviorIndependence;\n    VkShaderFloatControlsIndependence    roundingModeIndependence;\n    VkBool32                             shaderSignedZeroInfNanPreserveFloat16;\n    VkBool32                             shaderSignedZeroInfNanPreserveFloat32;\n    VkBool32                             shaderSignedZeroInfNanPreserveFloat64;\n    VkBool32                             shaderDenormPreserveFloat16;\n    VkBool32                             shaderDenormPreserveFloat32;\n    VkBool32                             shaderDenormPreserveFloat64;\n    VkBool32                             shaderDenormFlushToZeroFloat16;\n    VkBool32                             shaderDenormFlushToZeroFloat32;\n    VkBool32                             shaderDenormFlushToZeroFloat64;\n    VkBool32                             shaderRoundingModeRTEFloat16;\n    VkBool32                             shaderRoundingModeRTEFloat32;\n    VkBool32                             shaderRoundingModeRTEFloat64;\n    VkBool32                             shaderRoundingModeRTZFloat16;\n    VkBool32                             shaderRoundingModeRTZFloat32;\n    VkBool32                             shaderRoundingModeRTZFloat64;\n} VkPhysicalDeviceFloatControlsProperties;\n\ntypedef struct VkDescriptorSetLayoutBindingFlagsCreateInfo {\n    VkStructureType                    sType;\n    const void*                        pNext;\n    uint32_t                           bindingCount;\n    const VkDescriptorBindingFlags*    pBindingFlags;\n} VkDescriptorSetLayoutBindingFlagsCreateInfo;\n\ntypedef struct VkPhysicalDeviceDescriptorIndexingFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderInputAttachmentArrayDynamicIndexing;\n    VkBool32           shaderUniformTexelBufferArrayDynamicIndexing;\n    VkBool32           shaderStorageTexelBufferArrayDynamicIndexing;\n    VkBool32           shaderUniformBufferArrayNonUniformIndexing;\n    VkBool32           shaderSampledImageArrayNonUniformIndexing;\n    VkBool32           shaderStorageBufferArrayNonUniformIndexing;\n    VkBool32           shaderStorageImageArrayNonUniformIndexing;\n    VkBool32           shaderInputAttachmentArrayNonUniformIndexing;\n    VkBool32           shaderUniformTexelBufferArrayNonUniformIndexing;\n    VkBool32           shaderStorageTexelBufferArrayNonUniformIndexing;\n    VkBool32           descriptorBindingUniformBufferUpdateAfterBind;\n    VkBool32           descriptorBindingSampledImageUpdateAfterBind;\n    VkBool32           descriptorBindingStorageImageUpdateAfterBind;\n    VkBool32           descriptorBindingStorageBufferUpdateAfterBind;\n    VkBool32           descriptorBindingUniformTexelBufferUpdateAfterBind;\n    VkBool32           descriptorBindingStorageTexelBufferUpdateAfterBind;\n    VkBool32           descriptorBindingUpdateUnusedWhilePending;\n    VkBool32           descriptorBindingPartiallyBound;\n    VkBool32           descriptorBindingVariableDescriptorCount;\n    VkBool32           runtimeDescriptorArray;\n} VkPhysicalDeviceDescriptorIndexingFeatures;\n\ntypedef struct VkPhysicalDeviceDescriptorIndexingProperties {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxUpdateAfterBindDescriptorsInAllPools;\n    VkBool32           shaderUniformBufferArrayNonUniformIndexingNative;\n    VkBool32           shaderSampledImageArrayNonUniformIndexingNative;\n    VkBool32           shaderStorageBufferArrayNonUniformIndexingNative;\n    VkBool32           shaderStorageImageArrayNonUniformIndexingNative;\n    VkBool32           shaderInputAttachmentArrayNonUniformIndexingNative;\n    VkBool32           robustBufferAccessUpdateAfterBind;\n    VkBool32           quadDivergentImplicitLod;\n    uint32_t           maxPerStageDescriptorUpdateAfterBindSamplers;\n    uint32_t           maxPerStageDescriptorUpdateAfterBindUniformBuffers;\n    uint32_t           maxPerStageDescriptorUpdateAfterBindStorageBuffers;\n    uint32_t           maxPerStageDescriptorUpdateAfterBindSampledImages;\n    uint32_t           maxPerStageDescriptorUpdateAfterBindStorageImages;\n    uint32_t           maxPerStageDescriptorUpdateAfterBindInputAttachments;\n    uint32_t           maxPerStageUpdateAfterBindResources;\n    uint32_t           maxDescriptorSetUpdateAfterBindSamplers;\n    uint32_t           maxDescriptorSetUpdateAfterBindUniformBuffers;\n    uint32_t           maxDescriptorSetUpdateAfterBindUniformBuffersDynamic;\n    uint32_t           maxDescriptorSetUpdateAfterBindStorageBuffers;\n    uint32_t           maxDescriptorSetUpdateAfterBindStorageBuffersDynamic;\n    uint32_t           maxDescriptorSetUpdateAfterBindSampledImages;\n    uint32_t           maxDescriptorSetUpdateAfterBindStorageImages;\n    uint32_t           maxDescriptorSetUpdateAfterBindInputAttachments;\n} VkPhysicalDeviceDescriptorIndexingProperties;\n\ntypedef struct VkDescriptorSetVariableDescriptorCountAllocateInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           descriptorSetCount;\n    const uint32_t*    pDescriptorCounts;\n} VkDescriptorSetVariableDescriptorCountAllocateInfo;\n\ntypedef struct VkDescriptorSetVariableDescriptorCountLayoutSupport {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxVariableDescriptorCount;\n} VkDescriptorSetVariableDescriptorCountLayoutSupport;\n\ntypedef struct VkSubpassDescriptionDepthStencilResolve {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkResolveModeFlagBits            depthResolveMode;\n    VkResolveModeFlagBits            stencilResolveMode;\n    const VkAttachmentReference2*    pDepthStencilResolveAttachment;\n} VkSubpassDescriptionDepthStencilResolve;\n\ntypedef struct VkPhysicalDeviceDepthStencilResolveProperties {\n    VkStructureType       sType;\n    void*                 pNext;\n    VkResolveModeFlags    supportedDepthResolveModes;\n    VkResolveModeFlags    supportedStencilResolveModes;\n    VkBool32              independentResolveNone;\n    VkBool32              independentResolve;\n} VkPhysicalDeviceDepthStencilResolveProperties;\n\ntypedef struct VkPhysicalDeviceScalarBlockLayoutFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           scalarBlockLayout;\n} VkPhysicalDeviceScalarBlockLayoutFeatures;\n\ntypedef struct VkImageStencilUsageCreateInfo {\n    VkStructureType      sType;\n    const void*          pNext;\n    VkImageUsageFlags    stencilUsage;\n} VkImageStencilUsageCreateInfo;\n\ntypedef struct VkSamplerReductionModeCreateInfo {\n    VkStructureType           sType;\n    const void*               pNext;\n    VkSamplerReductionMode    reductionMode;\n} VkSamplerReductionModeCreateInfo;\n\ntypedef struct VkPhysicalDeviceSamplerFilterMinmaxProperties {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           filterMinmaxSingleComponentFormats;\n    VkBool32           filterMinmaxImageComponentMapping;\n} VkPhysicalDeviceSamplerFilterMinmaxProperties;\n\ntypedef struct VkPhysicalDeviceVulkanMemoryModelFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           vulkanMemoryModel;\n    VkBool32           vulkanMemoryModelDeviceScope;\n    VkBool32           vulkanMemoryModelAvailabilityVisibilityChains;\n} VkPhysicalDeviceVulkanMemoryModelFeatures;\n\ntypedef struct VkPhysicalDeviceImagelessFramebufferFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           imagelessFramebuffer;\n} VkPhysicalDeviceImagelessFramebufferFeatures;\n\ntypedef struct VkFramebufferAttachmentImageInfo {\n    VkStructureType       sType;\n    const void*           pNext;\n    VkImageCreateFlags    flags;\n    VkImageUsageFlags     usage;\n    uint32_t              width;\n    uint32_t              height;\n    uint32_t              layerCount;\n    uint32_t              viewFormatCount;\n    const VkFormat*       pViewFormats;\n} VkFramebufferAttachmentImageInfo;\n\ntypedef struct VkFramebufferAttachmentsCreateInfo {\n    VkStructureType                            sType;\n    const void*                                pNext;\n    uint32_t                                   attachmentImageInfoCount;\n    const VkFramebufferAttachmentImageInfo*    pAttachmentImageInfos;\n} VkFramebufferAttachmentsCreateInfo;\n\ntypedef struct VkRenderPassAttachmentBeginInfo {\n    VkStructureType       sType;\n    const void*           pNext;\n    uint32_t              attachmentCount;\n    const VkImageView*    pAttachments;\n} VkRenderPassAttachmentBeginInfo;\n\ntypedef struct VkPhysicalDeviceUniformBufferStandardLayoutFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           uniformBufferStandardLayout;\n} VkPhysicalDeviceUniformBufferStandardLayoutFeatures;\n\ntypedef struct VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderSubgroupExtendedTypes;\n} VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures;\n\ntypedef struct VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           separateDepthStencilLayouts;\n} VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures;\n\ntypedef struct VkAttachmentReferenceStencilLayout {\n    VkStructureType    sType;\n    void*              pNext;\n    VkImageLayout      stencilLayout;\n} VkAttachmentReferenceStencilLayout;\n\ntypedef struct VkAttachmentDescriptionStencilLayout {\n    VkStructureType    sType;\n    void*              pNext;\n    VkImageLayout      stencilInitialLayout;\n    VkImageLayout      stencilFinalLayout;\n} VkAttachmentDescriptionStencilLayout;\n\ntypedef struct VkPhysicalDeviceHostQueryResetFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           hostQueryReset;\n} VkPhysicalDeviceHostQueryResetFeatures;\n\ntypedef struct VkPhysicalDeviceTimelineSemaphoreFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           timelineSemaphore;\n} VkPhysicalDeviceTimelineSemaphoreFeatures;\n\ntypedef struct VkPhysicalDeviceTimelineSemaphoreProperties {\n    VkStructureType    sType;\n    void*              pNext;\n    uint64_t           maxTimelineSemaphoreValueDifference;\n} VkPhysicalDeviceTimelineSemaphoreProperties;\n\ntypedef struct VkSemaphoreTypeCreateInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkSemaphoreType    semaphoreType;\n    uint64_t           initialValue;\n} VkSemaphoreTypeCreateInfo;\n\ntypedef struct VkTimelineSemaphoreSubmitInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           waitSemaphoreValueCount;\n    const uint64_t*    pWaitSemaphoreValues;\n    uint32_t           signalSemaphoreValueCount;\n    const uint64_t*    pSignalSemaphoreValues;\n} VkTimelineSemaphoreSubmitInfo;\n\ntypedef struct VkSemaphoreWaitInfo {\n    VkStructureType         sType;\n    const void*             pNext;\n    VkSemaphoreWaitFlags    flags;\n    uint32_t                semaphoreCount;\n    const VkSemaphore*      pSemaphores;\n    const uint64_t*         pValues;\n} VkSemaphoreWaitInfo;\n\ntypedef struct VkSemaphoreSignalInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkSemaphore        semaphore;\n    uint64_t           value;\n} VkSemaphoreSignalInfo;\n\ntypedef struct VkPhysicalDeviceBufferDeviceAddressFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           bufferDeviceAddress;\n    VkBool32           bufferDeviceAddressCaptureReplay;\n    VkBool32           bufferDeviceAddressMultiDevice;\n} VkPhysicalDeviceBufferDeviceAddressFeatures;\n\ntypedef struct VkBufferDeviceAddressInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBuffer           buffer;\n} VkBufferDeviceAddressInfo;\n\ntypedef struct VkBufferOpaqueCaptureAddressCreateInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint64_t           opaqueCaptureAddress;\n} VkBufferOpaqueCaptureAddressCreateInfo;\n\ntypedef struct VkMemoryOpaqueCaptureAddressAllocateInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint64_t           opaqueCaptureAddress;\n} VkMemoryOpaqueCaptureAddressAllocateInfo;\n\ntypedef struct VkDeviceMemoryOpaqueCaptureAddressInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkDeviceMemory     memory;\n} VkDeviceMemoryOpaqueCaptureAddressInfo;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);\ntypedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo*      pRenderPassBegin, const VkSubpassBeginInfo*      pSubpassBeginInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo*      pSubpassBeginInfo, const VkSubpassEndInfo*        pSubpassEndInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo*        pSubpassEndInfo);\ntypedef void (VKAPI_PTR *PFN_vkResetQueryPool)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValue)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue);\ntypedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphores)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout);\ntypedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphore)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo);\ntypedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo);\ntypedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo);\ntypedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddress)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCount(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    buffer,\n    VkDeviceSize                                offset,\n    VkBuffer                                    countBuffer,\n    VkDeviceSize                                countBufferOffset,\n    uint32_t                                    maxDrawCount,\n    uint32_t                                    stride);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCount(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    buffer,\n    VkDeviceSize                                offset,\n    VkBuffer                                    countBuffer,\n    VkDeviceSize                                countBufferOffset,\n    uint32_t                                    maxDrawCount,\n    uint32_t                                    stride);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2(\n    VkDevice                                    device,\n    const VkRenderPassCreateInfo2*              pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkRenderPass*                               pRenderPass);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2(\n    VkCommandBuffer                             commandBuffer,\n    const VkRenderPassBeginInfo*                pRenderPassBegin,\n    const VkSubpassBeginInfo*                   pSubpassBeginInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2(\n    VkCommandBuffer                             commandBuffer,\n    const VkSubpassBeginInfo*                   pSubpassBeginInfo,\n    const VkSubpassEndInfo*                     pSubpassEndInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2(\n    VkCommandBuffer                             commandBuffer,\n    const VkSubpassEndInfo*                     pSubpassEndInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkResetQueryPool(\n    VkDevice                                    device,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    firstQuery,\n    uint32_t                                    queryCount);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValue(\n    VkDevice                                    device,\n    VkSemaphore                                 semaphore,\n    uint64_t*                                   pValue);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphores(\n    VkDevice                                    device,\n    const VkSemaphoreWaitInfo*                  pWaitInfo,\n    uint64_t                                    timeout);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphore(\n    VkDevice                                    device,\n    const VkSemaphoreSignalInfo*                pSignalInfo);\n\nVKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddress(\n    VkDevice                                    device,\n    const VkBufferDeviceAddressInfo*            pInfo);\n\nVKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddress(\n    VkDevice                                    device,\n    const VkBufferDeviceAddressInfo*            pInfo);\n\nVKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddress(\n    VkDevice                                    device,\n    const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo);\n#endif\n\n\n#define VK_VERSION_1_3 1\n// Vulkan 1.3 version number\n#define VK_API_VERSION_1_3 VK_MAKE_API_VERSION(0, 1, 3, 0)// Patch version should always be set to 0\n\ntypedef uint64_t VkFlags64;\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPrivateDataSlot)\n\ntypedef enum VkPipelineCreationFeedbackFlagBits {\n    VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT = 0x00000001,\n    VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT = 0x00000002,\n    VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT = 0x00000004,\n    VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT,\n    VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT = VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT,\n    VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT = VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT,\n    VK_PIPELINE_CREATION_FEEDBACK_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkPipelineCreationFeedbackFlagBits;\ntypedef VkFlags VkPipelineCreationFeedbackFlags;\n\ntypedef enum VkToolPurposeFlagBits {\n    VK_TOOL_PURPOSE_VALIDATION_BIT = 0x00000001,\n    VK_TOOL_PURPOSE_PROFILING_BIT = 0x00000002,\n    VK_TOOL_PURPOSE_TRACING_BIT = 0x00000004,\n    VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT = 0x00000008,\n    VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT = 0x00000010,\n    VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT = 0x00000020,\n    VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT = 0x00000040,\n    VK_TOOL_PURPOSE_VALIDATION_BIT_EXT = VK_TOOL_PURPOSE_VALIDATION_BIT,\n    VK_TOOL_PURPOSE_PROFILING_BIT_EXT = VK_TOOL_PURPOSE_PROFILING_BIT,\n    VK_TOOL_PURPOSE_TRACING_BIT_EXT = VK_TOOL_PURPOSE_TRACING_BIT,\n    VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT = VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT,\n    VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT = VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT,\n    VK_TOOL_PURPOSE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkToolPurposeFlagBits;\ntypedef VkFlags VkToolPurposeFlags;\ntypedef VkFlags VkPrivateDataSlotCreateFlags;\ntypedef VkFlags64 VkPipelineStageFlags2;\n\n// Flag bits for VkPipelineStageFlagBits2\ntypedef VkFlags64 VkPipelineStageFlagBits2;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_NONE = 0ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_NONE_KHR = 0ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT = 0x00000001ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR = 0x00000001ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT = 0x00000002ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT_KHR = 0x00000002ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT = 0x00000004ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR = 0x00000004ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT = 0x00000008ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT_KHR = 0x00000008ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT_KHR = 0x00000010ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT_KHR = 0x00000020ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT = 0x00000040ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT_KHR = 0x00000040ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT = 0x00000080ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR = 0x00000080ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT = 0x00000100ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR = 0x00000100ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT = 0x00000200ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR = 0x00000200ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR = 0x00000400ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT = 0x00000800ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT_KHR = 0x00000800ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT = 0x00001000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR = 0x00001000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFER_BIT = 0x00001000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR = 0x00001000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT = 0x00002000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR = 0x00002000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_HOST_BIT = 0x00004000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_HOST_BIT_KHR = 0x00004000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT = 0x00008000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR = 0x00008000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT = 0x00010000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR = 0x00010000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COPY_BIT = 0x100000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COPY_BIT_KHR = 0x100000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RESOLVE_BIT = 0x200000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RESOLVE_BIT_KHR = 0x200000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BLIT_BIT = 0x400000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BLIT_BIT_KHR = 0x400000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CLEAR_BIT = 0x800000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CLEAR_BIT_KHR = 0x800000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT = 0x1000000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT_KHR = 0x1000000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT = 0x2000000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT_KHR = 0x2000000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT = 0x4000000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR = 0x4000000000ULL;\n#ifdef VK_ENABLE_BETA_EXTENSIONS\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR = 0x04000000ULL;\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR = 0x08000000ULL;\n#endif\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV = 0x00020000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00400000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_SHADING_RATE_IMAGE_BIT_NV = 0x00400000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR = 0x00200000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_NV = 0x00200000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_NV = 0x02000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_NV = 0x00080000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_NV = 0x00100000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_SUBPASS_SHADING_BIT_HUAWEI = 0x8000000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INVOCATION_MASK_BIT_HUAWEI = 0x10000000000ULL;\nstatic const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR = 0x10000000ULL;\n\ntypedef VkFlags64 VkAccessFlags2;\n\n// Flag bits for VkAccessFlagBits2\ntypedef VkFlags64 VkAccessFlagBits2;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_NONE = 0ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_NONE_KHR = 0ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT = 0x00000001ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT_KHR = 0x00000001ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_INDEX_READ_BIT = 0x00000002ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_INDEX_READ_BIT_KHR = 0x00000002ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT_KHR = 0x00000004ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_UNIFORM_READ_BIT = 0x00000008ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_UNIFORM_READ_BIT_KHR = 0x00000008ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT = 0x00000010ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT_KHR = 0x00000010ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_SHADER_READ_BIT = 0x00000020ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_SHADER_READ_BIT_KHR = 0x00000020ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_SHADER_WRITE_BIT = 0x00000040ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_SHADER_WRITE_BIT_KHR = 0x00000040ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT = 0x00000080ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT_KHR = 0x00000080ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT_KHR = 0x00000100ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT_KHR = 0x00000200ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT_KHR = 0x00000400ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_READ_BIT = 0x00000800ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_READ_BIT_KHR = 0x00000800ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_WRITE_BIT = 0x00001000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR = 0x00001000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_HOST_READ_BIT = 0x00002000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_HOST_READ_BIT_KHR = 0x00002000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_HOST_WRITE_BIT = 0x00004000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_HOST_WRITE_BIT_KHR = 0x00004000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_READ_BIT = 0x00008000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_READ_BIT_KHR = 0x00008000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_WRITE_BIT = 0x00010000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_WRITE_BIT_KHR = 0x00010000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_SHADER_SAMPLED_READ_BIT = 0x100000000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_SHADER_SAMPLED_READ_BIT_KHR = 0x100000000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_READ_BIT = 0x200000000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_READ_BIT_KHR = 0x200000000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT = 0x400000000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT_KHR = 0x400000000ULL;\n#ifdef VK_ENABLE_BETA_EXTENSIONS\nstatic const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_DECODE_READ_BIT_KHR = 0x800000000ULL;\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\nstatic const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_DECODE_WRITE_BIT_KHR = 0x1000000000ULL;\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\nstatic const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_ENCODE_READ_BIT_KHR = 0x2000000000ULL;\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\nstatic const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_ENCODE_WRITE_BIT_KHR = 0x4000000000ULL;\n#endif\nstatic const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_COMMAND_PREPROCESS_READ_BIT_NV = 0x00020000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_COMMAND_PREPROCESS_WRITE_BIT_NV = 0x00040000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR = 0x00800000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x00200000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x00400000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_NV = 0x00200000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 0x00400000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x01000000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_INVOCATION_MASK_READ_BIT_HUAWEI = 0x8000000000ULL;\nstatic const VkAccessFlagBits2 VK_ACCESS_2_SHADER_BINDING_TABLE_READ_BIT_KHR = 0x10000000000ULL;\n\n\ntypedef enum VkSubmitFlagBits {\n    VK_SUBMIT_PROTECTED_BIT = 0x00000001,\n    VK_SUBMIT_PROTECTED_BIT_KHR = VK_SUBMIT_PROTECTED_BIT,\n    VK_SUBMIT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkSubmitFlagBits;\ntypedef VkFlags VkSubmitFlags;\n\ntypedef enum VkRenderingFlagBits {\n    VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT = 0x00000001,\n    VK_RENDERING_SUSPENDING_BIT = 0x00000002,\n    VK_RENDERING_RESUMING_BIT = 0x00000004,\n    VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT_KHR = VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT,\n    VK_RENDERING_SUSPENDING_BIT_KHR = VK_RENDERING_SUSPENDING_BIT,\n    VK_RENDERING_RESUMING_BIT_KHR = VK_RENDERING_RESUMING_BIT,\n    VK_RENDERING_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF\n} VkRenderingFlagBits;\ntypedef VkFlags VkRenderingFlags;\ntypedef VkFlags64 VkFormatFeatureFlags2;\n\n// Flag bits for VkFormatFeatureFlagBits2\ntypedef VkFlags64 VkFormatFeatureFlagBits2;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT = 0x00000001ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT_KHR = 0x00000001ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT = 0x00000002ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT_KHR = 0x00000002ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT_KHR = 0x00000004ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR = 0x00000008ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT = 0x00000010ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT_KHR = 0x00000010ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT_KHR = 0x00000020ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT = 0x00000040ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT_KHR = 0x00000040ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT = 0x00000080ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR = 0x00000080ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT_KHR = 0x00000100ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR = 0x00000200ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_SRC_BIT = 0x00000400ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_SRC_BIT_KHR = 0x00000400ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_DST_BIT = 0x00000800ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_DST_BIT_KHR = 0x00000800ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT_KHR = 0x00001000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT = 0x00002000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = 0x00002000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT = 0x00004000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT_KHR = 0x00004000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT = 0x00008000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT_KHR = 0x00008000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT = 0x00010000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT_KHR = 0x00010000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = 0x00020000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = 0x00040000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = 0x00080000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = 0x00100000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = 0x00200000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DISJOINT_BIT = 0x00400000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DISJOINT_BIT_KHR = 0x00400000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT = 0x00800000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT_KHR = 0x00800000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT = 0x80000000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR = 0x80000000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT = 0x100000000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR = 0x100000000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT = 0x200000000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT_KHR = 0x200000000ULL;\n#ifdef VK_ENABLE_BETA_EXTENSIONS\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_DECODE_OUTPUT_BIT_KHR = 0x02000000ULL;\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_DECODE_DPB_BIT_KHR = 0x04000000ULL;\n#endif\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR = 0x20000000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x01000000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x40000000ULL;\n#ifdef VK_ENABLE_BETA_EXTENSIONS\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_ENCODE_INPUT_BIT_KHR = 0x08000000ULL;\n#endif\n#ifdef VK_ENABLE_BETA_EXTENSIONS\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_ENCODE_DPB_BIT_KHR = 0x10000000ULL;\n#endif\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_LINEAR_COLOR_ATTACHMENT_BIT_NV = 0x4000000000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_WEIGHT_IMAGE_BIT_QCOM = 0x400000000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_WEIGHT_SAMPLED_IMAGE_BIT_QCOM = 0x800000000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLOCK_MATCHING_BIT_QCOM = 0x1000000000ULL;\nstatic const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BOX_FILTER_SAMPLED_BIT_QCOM = 0x2000000000ULL;\n\ntypedef struct VkPhysicalDeviceVulkan13Features {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           robustImageAccess;\n    VkBool32           inlineUniformBlock;\n    VkBool32           descriptorBindingInlineUniformBlockUpdateAfterBind;\n    VkBool32           pipelineCreationCacheControl;\n    VkBool32           privateData;\n    VkBool32           shaderDemoteToHelperInvocation;\n    VkBool32           shaderTerminateInvocation;\n    VkBool32           subgroupSizeControl;\n    VkBool32           computeFullSubgroups;\n    VkBool32           synchronization2;\n    VkBool32           textureCompressionASTC_HDR;\n    VkBool32           shaderZeroInitializeWorkgroupMemory;\n    VkBool32           dynamicRendering;\n    VkBool32           shaderIntegerDotProduct;\n    VkBool32           maintenance4;\n} VkPhysicalDeviceVulkan13Features;\n\ntypedef struct VkPhysicalDeviceVulkan13Properties {\n    VkStructureType       sType;\n    void*                 pNext;\n    uint32_t              minSubgroupSize;\n    uint32_t              maxSubgroupSize;\n    uint32_t              maxComputeWorkgroupSubgroups;\n    VkShaderStageFlags    requiredSubgroupSizeStages;\n    uint32_t              maxInlineUniformBlockSize;\n    uint32_t              maxPerStageDescriptorInlineUniformBlocks;\n    uint32_t              maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks;\n    uint32_t              maxDescriptorSetInlineUniformBlocks;\n    uint32_t              maxDescriptorSetUpdateAfterBindInlineUniformBlocks;\n    uint32_t              maxInlineUniformTotalSize;\n    VkBool32              integerDotProduct8BitUnsignedAccelerated;\n    VkBool32              integerDotProduct8BitSignedAccelerated;\n    VkBool32              integerDotProduct8BitMixedSignednessAccelerated;\n    VkBool32              integerDotProduct4x8BitPackedUnsignedAccelerated;\n    VkBool32              integerDotProduct4x8BitPackedSignedAccelerated;\n    VkBool32              integerDotProduct4x8BitPackedMixedSignednessAccelerated;\n    VkBool32              integerDotProduct16BitUnsignedAccelerated;\n    VkBool32              integerDotProduct16BitSignedAccelerated;\n    VkBool32              integerDotProduct16BitMixedSignednessAccelerated;\n    VkBool32              integerDotProduct32BitUnsignedAccelerated;\n    VkBool32              integerDotProduct32BitSignedAccelerated;\n    VkBool32              integerDotProduct32BitMixedSignednessAccelerated;\n    VkBool32              integerDotProduct64BitUnsignedAccelerated;\n    VkBool32              integerDotProduct64BitSignedAccelerated;\n    VkBool32              integerDotProduct64BitMixedSignednessAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating8BitUnsignedAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating8BitSignedAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating16BitUnsignedAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating16BitSignedAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating32BitUnsignedAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating32BitSignedAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating64BitUnsignedAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating64BitSignedAccelerated;\n    VkBool32              integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated;\n    VkDeviceSize          storageTexelBufferOffsetAlignmentBytes;\n    VkBool32              storageTexelBufferOffsetSingleTexelAlignment;\n    VkDeviceSize          uniformTexelBufferOffsetAlignmentBytes;\n    VkBool32              uniformTexelBufferOffsetSingleTexelAlignment;\n    VkDeviceSize          maxBufferSize;\n} VkPhysicalDeviceVulkan13Properties;\n\ntypedef struct VkPipelineCreationFeedback {\n    VkPipelineCreationFeedbackFlags    flags;\n    uint64_t                           duration;\n} VkPipelineCreationFeedback;\n\ntypedef struct VkPipelineCreationFeedbackCreateInfo {\n    VkStructureType                sType;\n    const void*                    pNext;\n    VkPipelineCreationFeedback*    pPipelineCreationFeedback;\n    uint32_t                       pipelineStageCreationFeedbackCount;\n    VkPipelineCreationFeedback*    pPipelineStageCreationFeedbacks;\n} VkPipelineCreationFeedbackCreateInfo;\n\ntypedef struct VkPhysicalDeviceShaderTerminateInvocationFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderTerminateInvocation;\n} VkPhysicalDeviceShaderTerminateInvocationFeatures;\n\ntypedef struct VkPhysicalDeviceToolProperties {\n    VkStructureType       sType;\n    void*                 pNext;\n    char                  name[VK_MAX_EXTENSION_NAME_SIZE];\n    char                  version[VK_MAX_EXTENSION_NAME_SIZE];\n    VkToolPurposeFlags    purposes;\n    char                  description[VK_MAX_DESCRIPTION_SIZE];\n    char                  layer[VK_MAX_EXTENSION_NAME_SIZE];\n} VkPhysicalDeviceToolProperties;\n\ntypedef struct VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderDemoteToHelperInvocation;\n} VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures;\n\ntypedef struct VkPhysicalDevicePrivateDataFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           privateData;\n} VkPhysicalDevicePrivateDataFeatures;\n\ntypedef struct VkDevicePrivateDataCreateInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           privateDataSlotRequestCount;\n} VkDevicePrivateDataCreateInfo;\n\ntypedef struct VkPrivateDataSlotCreateInfo {\n    VkStructureType                 sType;\n    const void*                     pNext;\n    VkPrivateDataSlotCreateFlags    flags;\n} VkPrivateDataSlotCreateInfo;\n\ntypedef struct VkPhysicalDevicePipelineCreationCacheControlFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           pipelineCreationCacheControl;\n} VkPhysicalDevicePipelineCreationCacheControlFeatures;\n\ntypedef struct VkMemoryBarrier2 {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkPipelineStageFlags2    srcStageMask;\n    VkAccessFlags2           srcAccessMask;\n    VkPipelineStageFlags2    dstStageMask;\n    VkAccessFlags2           dstAccessMask;\n} VkMemoryBarrier2;\n\ntypedef struct VkBufferMemoryBarrier2 {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkPipelineStageFlags2    srcStageMask;\n    VkAccessFlags2           srcAccessMask;\n    VkPipelineStageFlags2    dstStageMask;\n    VkAccessFlags2           dstAccessMask;\n    uint32_t                 srcQueueFamilyIndex;\n    uint32_t                 dstQueueFamilyIndex;\n    VkBuffer                 buffer;\n    VkDeviceSize             offset;\n    VkDeviceSize             size;\n} VkBufferMemoryBarrier2;\n\ntypedef struct VkImageMemoryBarrier2 {\n    VkStructureType            sType;\n    const void*                pNext;\n    VkPipelineStageFlags2      srcStageMask;\n    VkAccessFlags2             srcAccessMask;\n    VkPipelineStageFlags2      dstStageMask;\n    VkAccessFlags2             dstAccessMask;\n    VkImageLayout              oldLayout;\n    VkImageLayout              newLayout;\n    uint32_t                   srcQueueFamilyIndex;\n    uint32_t                   dstQueueFamilyIndex;\n    VkImage                    image;\n    VkImageSubresourceRange    subresourceRange;\n} VkImageMemoryBarrier2;\n\ntypedef struct VkDependencyInfo {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkDependencyFlags                dependencyFlags;\n    uint32_t                         memoryBarrierCount;\n    const VkMemoryBarrier2*          pMemoryBarriers;\n    uint32_t                         bufferMemoryBarrierCount;\n    const VkBufferMemoryBarrier2*    pBufferMemoryBarriers;\n    uint32_t                         imageMemoryBarrierCount;\n    const VkImageMemoryBarrier2*     pImageMemoryBarriers;\n} VkDependencyInfo;\n\ntypedef struct VkSemaphoreSubmitInfo {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkSemaphore              semaphore;\n    uint64_t                 value;\n    VkPipelineStageFlags2    stageMask;\n    uint32_t                 deviceIndex;\n} VkSemaphoreSubmitInfo;\n\ntypedef struct VkCommandBufferSubmitInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkCommandBuffer    commandBuffer;\n    uint32_t           deviceMask;\n} VkCommandBufferSubmitInfo;\n\ntypedef struct VkSubmitInfo2 {\n    VkStructureType                     sType;\n    const void*                         pNext;\n    VkSubmitFlags                       flags;\n    uint32_t                            waitSemaphoreInfoCount;\n    const VkSemaphoreSubmitInfo*        pWaitSemaphoreInfos;\n    uint32_t                            commandBufferInfoCount;\n    const VkCommandBufferSubmitInfo*    pCommandBufferInfos;\n    uint32_t                            signalSemaphoreInfoCount;\n    const VkSemaphoreSubmitInfo*        pSignalSemaphoreInfos;\n} VkSubmitInfo2;\n\ntypedef struct VkPhysicalDeviceSynchronization2Features {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           synchronization2;\n} VkPhysicalDeviceSynchronization2Features;\n\ntypedef struct VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderZeroInitializeWorkgroupMemory;\n} VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures;\n\ntypedef struct VkPhysicalDeviceImageRobustnessFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           robustImageAccess;\n} VkPhysicalDeviceImageRobustnessFeatures;\n\ntypedef struct VkBufferCopy2 {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkDeviceSize       srcOffset;\n    VkDeviceSize       dstOffset;\n    VkDeviceSize       size;\n} VkBufferCopy2;\n\ntypedef struct VkCopyBufferInfo2 {\n    VkStructureType         sType;\n    const void*             pNext;\n    VkBuffer                srcBuffer;\n    VkBuffer                dstBuffer;\n    uint32_t                regionCount;\n    const VkBufferCopy2*    pRegions;\n} VkCopyBufferInfo2;\n\ntypedef struct VkImageCopy2 {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkImageSubresourceLayers    srcSubresource;\n    VkOffset3D                  srcOffset;\n    VkImageSubresourceLayers    dstSubresource;\n    VkOffset3D                  dstOffset;\n    VkExtent3D                  extent;\n} VkImageCopy2;\n\ntypedef struct VkCopyImageInfo2 {\n    VkStructureType        sType;\n    const void*            pNext;\n    VkImage                srcImage;\n    VkImageLayout          srcImageLayout;\n    VkImage                dstImage;\n    VkImageLayout          dstImageLayout;\n    uint32_t               regionCount;\n    const VkImageCopy2*    pRegions;\n} VkCopyImageInfo2;\n\ntypedef struct VkBufferImageCopy2 {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkDeviceSize                bufferOffset;\n    uint32_t                    bufferRowLength;\n    uint32_t                    bufferImageHeight;\n    VkImageSubresourceLayers    imageSubresource;\n    VkOffset3D                  imageOffset;\n    VkExtent3D                  imageExtent;\n} VkBufferImageCopy2;\n\ntypedef struct VkCopyBufferToImageInfo2 {\n    VkStructureType              sType;\n    const void*                  pNext;\n    VkBuffer                     srcBuffer;\n    VkImage                      dstImage;\n    VkImageLayout                dstImageLayout;\n    uint32_t                     regionCount;\n    const VkBufferImageCopy2*    pRegions;\n} VkCopyBufferToImageInfo2;\n\ntypedef struct VkCopyImageToBufferInfo2 {\n    VkStructureType              sType;\n    const void*                  pNext;\n    VkImage                      srcImage;\n    VkImageLayout                srcImageLayout;\n    VkBuffer                     dstBuffer;\n    uint32_t                     regionCount;\n    const VkBufferImageCopy2*    pRegions;\n} VkCopyImageToBufferInfo2;\n\ntypedef struct VkImageBlit2 {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkImageSubresourceLayers    srcSubresource;\n    VkOffset3D                  srcOffsets[2];\n    VkImageSubresourceLayers    dstSubresource;\n    VkOffset3D                  dstOffsets[2];\n} VkImageBlit2;\n\ntypedef struct VkBlitImageInfo2 {\n    VkStructureType        sType;\n    const void*            pNext;\n    VkImage                srcImage;\n    VkImageLayout          srcImageLayout;\n    VkImage                dstImage;\n    VkImageLayout          dstImageLayout;\n    uint32_t               regionCount;\n    const VkImageBlit2*    pRegions;\n    VkFilter               filter;\n} VkBlitImageInfo2;\n\ntypedef struct VkImageResolve2 {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkImageSubresourceLayers    srcSubresource;\n    VkOffset3D                  srcOffset;\n    VkImageSubresourceLayers    dstSubresource;\n    VkOffset3D                  dstOffset;\n    VkExtent3D                  extent;\n} VkImageResolve2;\n\ntypedef struct VkResolveImageInfo2 {\n    VkStructureType           sType;\n    const void*               pNext;\n    VkImage                   srcImage;\n    VkImageLayout             srcImageLayout;\n    VkImage                   dstImage;\n    VkImageLayout             dstImageLayout;\n    uint32_t                  regionCount;\n    const VkImageResolve2*    pRegions;\n} VkResolveImageInfo2;\n\ntypedef struct VkPhysicalDeviceSubgroupSizeControlFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           subgroupSizeControl;\n    VkBool32           computeFullSubgroups;\n} VkPhysicalDeviceSubgroupSizeControlFeatures;\n\ntypedef struct VkPhysicalDeviceSubgroupSizeControlProperties {\n    VkStructureType       sType;\n    void*                 pNext;\n    uint32_t              minSubgroupSize;\n    uint32_t              maxSubgroupSize;\n    uint32_t              maxComputeWorkgroupSubgroups;\n    VkShaderStageFlags    requiredSubgroupSizeStages;\n} VkPhysicalDeviceSubgroupSizeControlProperties;\n\ntypedef struct VkPipelineShaderStageRequiredSubgroupSizeCreateInfo {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           requiredSubgroupSize;\n} VkPipelineShaderStageRequiredSubgroupSizeCreateInfo;\n\ntypedef struct VkPhysicalDeviceInlineUniformBlockFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           inlineUniformBlock;\n    VkBool32           descriptorBindingInlineUniformBlockUpdateAfterBind;\n} VkPhysicalDeviceInlineUniformBlockFeatures;\n\ntypedef struct VkPhysicalDeviceInlineUniformBlockProperties {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxInlineUniformBlockSize;\n    uint32_t           maxPerStageDescriptorInlineUniformBlocks;\n    uint32_t           maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks;\n    uint32_t           maxDescriptorSetInlineUniformBlocks;\n    uint32_t           maxDescriptorSetUpdateAfterBindInlineUniformBlocks;\n} VkPhysicalDeviceInlineUniformBlockProperties;\n\ntypedef struct VkWriteDescriptorSetInlineUniformBlock {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           dataSize;\n    const void*        pData;\n} VkWriteDescriptorSetInlineUniformBlock;\n\ntypedef struct VkDescriptorPoolInlineUniformBlockCreateInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           maxInlineUniformBlockBindings;\n} VkDescriptorPoolInlineUniformBlockCreateInfo;\n\ntypedef struct VkPhysicalDeviceTextureCompressionASTCHDRFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           textureCompressionASTC_HDR;\n} VkPhysicalDeviceTextureCompressionASTCHDRFeatures;\n\ntypedef struct VkRenderingAttachmentInfo {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkImageView              imageView;\n    VkImageLayout            imageLayout;\n    VkResolveModeFlagBits    resolveMode;\n    VkImageView              resolveImageView;\n    VkImageLayout            resolveImageLayout;\n    VkAttachmentLoadOp       loadOp;\n    VkAttachmentStoreOp      storeOp;\n    VkClearValue             clearValue;\n} VkRenderingAttachmentInfo;\n\ntypedef struct VkRenderingInfo {\n    VkStructureType                     sType;\n    const void*                         pNext;\n    VkRenderingFlags                    flags;\n    VkRect2D                            renderArea;\n    uint32_t                            layerCount;\n    uint32_t                            viewMask;\n    uint32_t                            colorAttachmentCount;\n    const VkRenderingAttachmentInfo*    pColorAttachments;\n    const VkRenderingAttachmentInfo*    pDepthAttachment;\n    const VkRenderingAttachmentInfo*    pStencilAttachment;\n} VkRenderingInfo;\n\ntypedef struct VkPipelineRenderingCreateInfo {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           viewMask;\n    uint32_t           colorAttachmentCount;\n    const VkFormat*    pColorAttachmentFormats;\n    VkFormat           depthAttachmentFormat;\n    VkFormat           stencilAttachmentFormat;\n} VkPipelineRenderingCreateInfo;\n\ntypedef struct VkPhysicalDeviceDynamicRenderingFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           dynamicRendering;\n} VkPhysicalDeviceDynamicRenderingFeatures;\n\ntypedef struct VkCommandBufferInheritanceRenderingInfo {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkRenderingFlags         flags;\n    uint32_t                 viewMask;\n    uint32_t                 colorAttachmentCount;\n    const VkFormat*          pColorAttachmentFormats;\n    VkFormat                 depthAttachmentFormat;\n    VkFormat                 stencilAttachmentFormat;\n    VkSampleCountFlagBits    rasterizationSamples;\n} VkCommandBufferInheritanceRenderingInfo;\n\ntypedef struct VkPhysicalDeviceShaderIntegerDotProductFeatures {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderIntegerDotProduct;\n} VkPhysicalDeviceShaderIntegerDotProductFeatures;\n\ntypedef struct VkPhysicalDeviceShaderIntegerDotProductProperties {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           integerDotProduct8BitUnsignedAccelerated;\n    VkBool32           integerDotProduct8BitSignedAccelerated;\n    VkBool32           integerDotProduct8BitMixedSignednessAccelerated;\n    VkBool32           integerDotProduct4x8BitPackedUnsignedAccelerated;\n    VkBool32           integerDotProduct4x8BitPackedSignedAccelerated;\n    VkBool32           integerDotProduct4x8BitPackedMixedSignednessAccelerated;\n    VkBool32           integerDotProduct16BitUnsignedAccelerated;\n    VkBool32           integerDotProduct16BitSignedAccelerated;\n    VkBool32           integerDotProduct16BitMixedSignednessAccelerated;\n    VkBool32           integerDotProduct32BitUnsignedAccelerated;\n    VkBool32           integerDotProduct32BitSignedAccelerated;\n    VkBool32           integerDotProduct32BitMixedSignednessAccelerated;\n    VkBool32           integerDotProduct64BitUnsignedAccelerated;\n    VkBool32           integerDotProduct64BitSignedAccelerated;\n    VkBool32           integerDotProduct64BitMixedSignednessAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating8BitUnsignedAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating8BitSignedAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating16BitUnsignedAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating16BitSignedAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating32BitUnsignedAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating32BitSignedAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating64BitUnsignedAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating64BitSignedAccelerated;\n    VkBool32           integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated;\n} VkPhysicalDeviceShaderIntegerDotProductProperties;\n\ntypedef struct VkPhysicalDeviceTexelBufferAlignmentProperties {\n    VkStructureType    sType;\n    void*              pNext;\n    VkDeviceSize       storageTexelBufferOffsetAlignmentBytes;\n    VkBool32           storageTexelBufferOffsetSingleTexelAlignment;\n    VkDeviceSize       uniformTexelBufferOffsetAlignmentBytes;\n    VkBool32           uniformTexelBufferOffsetSingleTexelAlignment;\n} VkPhysicalDeviceTexelBufferAlignmentProperties;\n\ntypedef struct VkFormatProperties3 {\n    VkStructureType          sType;\n    void*                    pNext;\n    VkFormatFeatureFlags2    linearTilingFeatures;\n    VkFormatFeatureFlags2    optimalTilingFeatures;\n    VkFormatFeatureFlags2    bufferFeatures;\n} VkFormatProperties3;\n\ntypedef struct VkPhysicalDeviceMaintenance4Features {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           maintenance4;\n} VkPhysicalDeviceMaintenance4Features;\n\ntypedef struct VkPhysicalDeviceMaintenance4Properties {\n    VkStructureType    sType;\n    void*              pNext;\n    VkDeviceSize       maxBufferSize;\n} VkPhysicalDeviceMaintenance4Properties;\n\ntypedef struct VkDeviceBufferMemoryRequirements {\n    VkStructureType              sType;\n    const void*                  pNext;\n    const VkBufferCreateInfo*    pCreateInfo;\n} VkDeviceBufferMemoryRequirements;\n\ntypedef struct VkDeviceImageMemoryRequirements {\n    VkStructureType             sType;\n    const void*                 pNext;\n    const VkImageCreateInfo*    pCreateInfo;\n    VkImageAspectFlagBits       planeAspect;\n} VkDeviceImageMemoryRequirements;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceToolProperties)(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolProperties* pToolProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreatePrivateDataSlot)(VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot);\ntypedef void (VKAPI_PTR *PFN_vkDestroyPrivateDataSlot)(VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkSetPrivateData)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data);\ntypedef void (VKAPI_PTR *PFN_vkGetPrivateData)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetEvent2)(VkCommandBuffer                   commandBuffer, VkEvent                                             event, const VkDependencyInfo*                             pDependencyInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdResetEvent2)(VkCommandBuffer                   commandBuffer, VkEvent                                             event, VkPipelineStageFlags2               stageMask);\ntypedef void (VKAPI_PTR *PFN_vkCmdWaitEvents2)(VkCommandBuffer                   commandBuffer, uint32_t                                            eventCount, const VkEvent*                     pEvents, const VkDependencyInfo*            pDependencyInfos);\ntypedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier2)(VkCommandBuffer                   commandBuffer, const VkDependencyInfo*                             pDependencyInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp2)(VkCommandBuffer                   commandBuffer, VkPipelineStageFlags2               stage, VkQueryPool                                         queryPool, uint32_t                                            query);\ntypedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit2)(VkQueue                           queue, uint32_t                            submitCount, const VkSubmitInfo2*              pSubmits, VkFence           fence);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer2)(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2* pCopyBufferInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyImage2)(VkCommandBuffer commandBuffer, const VkCopyImageInfo2* pCopyImageInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage2)(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer2)(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdBlitImage2)(VkCommandBuffer commandBuffer, const VkBlitImageInfo2* pBlitImageInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdResolveImage2)(VkCommandBuffer commandBuffer, const VkResolveImageInfo2* pResolveImageInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdBeginRendering)(VkCommandBuffer                   commandBuffer, const VkRenderingInfo*                              pRenderingInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdEndRendering)(VkCommandBuffer                   commandBuffer);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetCullMode)(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetFrontFace)(VkCommandBuffer commandBuffer, VkFrontFace frontFace);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveTopology)(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetViewportWithCount)(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetScissorWithCount)(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors);\ntypedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers2)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDepthTestEnable)(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDepthWriteEnable)(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDepthCompareOp)(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDepthBoundsTestEnable)(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetStencilTestEnable)(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetStencilOp)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetRasterizerDiscardEnable)(VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDepthBiasEnable)(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveRestartEnable)(VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable);\ntypedef void (VKAPI_PTR *PFN_vkGetDeviceBufferMemoryRequirements)(VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements);\ntypedef void (VKAPI_PTR *PFN_vkGetDeviceImageMemoryRequirements)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements);\ntypedef void (VKAPI_PTR *PFN_vkGetDeviceImageSparseMemoryRequirements)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceToolProperties(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pToolCount,\n    VkPhysicalDeviceToolProperties*             pToolProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreatePrivateDataSlot(\n    VkDevice                                    device,\n    const VkPrivateDataSlotCreateInfo*          pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkPrivateDataSlot*                          pPrivateDataSlot);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyPrivateDataSlot(\n    VkDevice                                    device,\n    VkPrivateDataSlot                           privateDataSlot,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkSetPrivateData(\n    VkDevice                                    device,\n    VkObjectType                                objectType,\n    uint64_t                                    objectHandle,\n    VkPrivateDataSlot                           privateDataSlot,\n    uint64_t                                    data);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPrivateData(\n    VkDevice                                    device,\n    VkObjectType                                objectType,\n    uint64_t                                    objectHandle,\n    VkPrivateDataSlot                           privateDataSlot,\n    uint64_t*                                   pData);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetEvent2(\n    VkCommandBuffer                             commandBuffer,\n    VkEvent                                     event,\n    const VkDependencyInfo*                     pDependencyInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdResetEvent2(\n    VkCommandBuffer                             commandBuffer,\n    VkEvent                                     event,\n    VkPipelineStageFlags2                       stageMask);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents2(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    eventCount,\n    const VkEvent*                              pEvents,\n    const VkDependencyInfo*                     pDependencyInfos);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier2(\n    VkCommandBuffer                             commandBuffer,\n    const VkDependencyInfo*                     pDependencyInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp2(\n    VkCommandBuffer                             commandBuffer,\n    VkPipelineStageFlags2                       stage,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    query);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit2(\n    VkQueue                                     queue,\n    uint32_t                                    submitCount,\n    const VkSubmitInfo2*                        pSubmits,\n    VkFence                                     fence);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer2(\n    VkCommandBuffer                             commandBuffer,\n    const VkCopyBufferInfo2*                    pCopyBufferInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyImage2(\n    VkCommandBuffer                             commandBuffer,\n    const VkCopyImageInfo2*                     pCopyImageInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage2(\n    VkCommandBuffer                             commandBuffer,\n    const VkCopyBufferToImageInfo2*             pCopyBufferToImageInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer2(\n    VkCommandBuffer                             commandBuffer,\n    const VkCopyImageToBufferInfo2*             pCopyImageToBufferInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBlitImage2(\n    VkCommandBuffer                             commandBuffer,\n    const VkBlitImageInfo2*                     pBlitImageInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdResolveImage2(\n    VkCommandBuffer                             commandBuffer,\n    const VkResolveImageInfo2*                  pResolveImageInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBeginRendering(\n    VkCommandBuffer                             commandBuffer,\n    const VkRenderingInfo*                      pRenderingInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdEndRendering(\n    VkCommandBuffer                             commandBuffer);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetCullMode(\n    VkCommandBuffer                             commandBuffer,\n    VkCullModeFlags                             cullMode);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetFrontFace(\n    VkCommandBuffer                             commandBuffer,\n    VkFrontFace                                 frontFace);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveTopology(\n    VkCommandBuffer                             commandBuffer,\n    VkPrimitiveTopology                         primitiveTopology);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWithCount(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    viewportCount,\n    const VkViewport*                           pViewports);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetScissorWithCount(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    scissorCount,\n    const VkRect2D*                             pScissors);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers2(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    firstBinding,\n    uint32_t                                    bindingCount,\n    const VkBuffer*                             pBuffers,\n    const VkDeviceSize*                         pOffsets,\n    const VkDeviceSize*                         pSizes,\n    const VkDeviceSize*                         pStrides);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDepthTestEnable(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    depthTestEnable);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDepthWriteEnable(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    depthWriteEnable);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDepthCompareOp(\n    VkCommandBuffer                             commandBuffer,\n    VkCompareOp                                 depthCompareOp);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBoundsTestEnable(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    depthBoundsTestEnable);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetStencilTestEnable(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    stencilTestEnable);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetStencilOp(\n    VkCommandBuffer                             commandBuffer,\n    VkStencilFaceFlags                          faceMask,\n    VkStencilOp                                 failOp,\n    VkStencilOp                                 passOp,\n    VkStencilOp                                 depthFailOp,\n    VkCompareOp                                 compareOp);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetRasterizerDiscardEnable(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    rasterizerDiscardEnable);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBiasEnable(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    depthBiasEnable);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveRestartEnable(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    primitiveRestartEnable);\n\nVKAPI_ATTR void VKAPI_CALL vkGetDeviceBufferMemoryRequirements(\n    VkDevice                                    device,\n    const VkDeviceBufferMemoryRequirements*     pInfo,\n    VkMemoryRequirements2*                      pMemoryRequirements);\n\nVKAPI_ATTR void VKAPI_CALL vkGetDeviceImageMemoryRequirements(\n    VkDevice                                    device,\n    const VkDeviceImageMemoryRequirements*      pInfo,\n    VkMemoryRequirements2*                      pMemoryRequirements);\n\nVKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSparseMemoryRequirements(\n    VkDevice                                    device,\n    const VkDeviceImageMemoryRequirements*      pInfo,\n    uint32_t*                                   pSparseMemoryRequirementCount,\n    VkSparseImageMemoryRequirements2*           pSparseMemoryRequirements);\n#endif\n\n\n#define VK_KHR_surface 1\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR)\n#define VK_KHR_SURFACE_SPEC_VERSION       25\n#define VK_KHR_SURFACE_EXTENSION_NAME     \"VK_KHR_surface\"\n\ntypedef enum VkPresentModeKHR {\n    VK_PRESENT_MODE_IMMEDIATE_KHR = 0,\n    VK_PRESENT_MODE_MAILBOX_KHR = 1,\n    VK_PRESENT_MODE_FIFO_KHR = 2,\n    VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3,\n    VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR = 1000111000,\n    VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR = 1000111001,\n    VK_PRESENT_MODE_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkPresentModeKHR;\n\ntypedef enum VkColorSpaceKHR {\n    VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0,\n    VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001,\n    VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002,\n    VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT = 1000104003,\n    VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004,\n    VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005,\n    VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006,\n    VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007,\n    VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008,\n    VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009,\n    VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010,\n    VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011,\n    VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012,\n    VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013,\n    VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT = 1000104014,\n    VK_COLOR_SPACE_DISPLAY_NATIVE_AMD = 1000213000,\n    VK_COLORSPACE_SRGB_NONLINEAR_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,\n    VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT,\n    VK_COLOR_SPACE_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkColorSpaceKHR;\n\ntypedef enum VkSurfaceTransformFlagBitsKHR {\n    VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001,\n    VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002,\n    VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004,\n    VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008,\n    VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010,\n    VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020,\n    VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040,\n    VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080,\n    VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100,\n    VK_SURFACE_TRANSFORM_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkSurfaceTransformFlagBitsKHR;\n\ntypedef enum VkCompositeAlphaFlagBitsKHR {\n    VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001,\n    VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002,\n    VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004,\n    VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008,\n    VK_COMPOSITE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkCompositeAlphaFlagBitsKHR;\ntypedef VkFlags VkCompositeAlphaFlagsKHR;\ntypedef VkFlags VkSurfaceTransformFlagsKHR;\ntypedef struct VkSurfaceCapabilitiesKHR {\n    uint32_t                         minImageCount;\n    uint32_t                         maxImageCount;\n    VkExtent2D                       currentExtent;\n    VkExtent2D                       minImageExtent;\n    VkExtent2D                       maxImageExtent;\n    uint32_t                         maxImageArrayLayers;\n    VkSurfaceTransformFlagsKHR       supportedTransforms;\n    VkSurfaceTransformFlagBitsKHR    currentTransform;\n    VkCompositeAlphaFlagsKHR         supportedCompositeAlpha;\n    VkImageUsageFlags                supportedUsageFlags;\n} VkSurfaceCapabilitiesKHR;\n\ntypedef struct VkSurfaceFormatKHR {\n    VkFormat           format;\n    VkColorSpaceKHR    colorSpace;\n} VkSurfaceFormatKHR;\n\ntypedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(\n    VkInstance                                  instance,\n    VkSurfaceKHR                                surface,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t                                    queueFamilyIndex,\n    VkSurfaceKHR                                surface,\n    VkBool32*                                   pSupported);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR(\n    VkPhysicalDevice                            physicalDevice,\n    VkSurfaceKHR                                surface,\n    VkSurfaceCapabilitiesKHR*                   pSurfaceCapabilities);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR(\n    VkPhysicalDevice                            physicalDevice,\n    VkSurfaceKHR                                surface,\n    uint32_t*                                   pSurfaceFormatCount,\n    VkSurfaceFormatKHR*                         pSurfaceFormats);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR(\n    VkPhysicalDevice                            physicalDevice,\n    VkSurfaceKHR                                surface,\n    uint32_t*                                   pPresentModeCount,\n    VkPresentModeKHR*                           pPresentModes);\n#endif\n\n\n#define VK_KHR_swapchain 1\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR)\n#define VK_KHR_SWAPCHAIN_SPEC_VERSION     70\n#define VK_KHR_SWAPCHAIN_EXTENSION_NAME   \"VK_KHR_swapchain\"\n\ntypedef enum VkSwapchainCreateFlagBitsKHR {\n    VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = 0x00000001,\n    VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR = 0x00000002,\n    VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR = 0x00000004,\n    VK_SWAPCHAIN_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkSwapchainCreateFlagBitsKHR;\ntypedef VkFlags VkSwapchainCreateFlagsKHR;\n\ntypedef enum VkDeviceGroupPresentModeFlagBitsKHR {\n    VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR = 0x00000001,\n    VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR = 0x00000002,\n    VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR = 0x00000004,\n    VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR = 0x00000008,\n    VK_DEVICE_GROUP_PRESENT_MODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkDeviceGroupPresentModeFlagBitsKHR;\ntypedef VkFlags VkDeviceGroupPresentModeFlagsKHR;\ntypedef struct VkSwapchainCreateInfoKHR {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkSwapchainCreateFlagsKHR        flags;\n    VkSurfaceKHR                     surface;\n    uint32_t                         minImageCount;\n    VkFormat                         imageFormat;\n    VkColorSpaceKHR                  imageColorSpace;\n    VkExtent2D                       imageExtent;\n    uint32_t                         imageArrayLayers;\n    VkImageUsageFlags                imageUsage;\n    VkSharingMode                    imageSharingMode;\n    uint32_t                         queueFamilyIndexCount;\n    const uint32_t*                  pQueueFamilyIndices;\n    VkSurfaceTransformFlagBitsKHR    preTransform;\n    VkCompositeAlphaFlagBitsKHR      compositeAlpha;\n    VkPresentModeKHR                 presentMode;\n    VkBool32                         clipped;\n    VkSwapchainKHR                   oldSwapchain;\n} VkSwapchainCreateInfoKHR;\n\ntypedef struct VkPresentInfoKHR {\n    VkStructureType          sType;\n    const void*              pNext;\n    uint32_t                 waitSemaphoreCount;\n    const VkSemaphore*       pWaitSemaphores;\n    uint32_t                 swapchainCount;\n    const VkSwapchainKHR*    pSwapchains;\n    const uint32_t*          pImageIndices;\n    VkResult*                pResults;\n} VkPresentInfoKHR;\n\ntypedef struct VkImageSwapchainCreateInfoKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkSwapchainKHR     swapchain;\n} VkImageSwapchainCreateInfoKHR;\n\ntypedef struct VkBindImageMemorySwapchainInfoKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkSwapchainKHR     swapchain;\n    uint32_t           imageIndex;\n} VkBindImageMemorySwapchainInfoKHR;\n\ntypedef struct VkAcquireNextImageInfoKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkSwapchainKHR     swapchain;\n    uint64_t           timeout;\n    VkSemaphore        semaphore;\n    VkFence            fence;\n    uint32_t           deviceMask;\n} VkAcquireNextImageInfoKHR;\n\ntypedef struct VkDeviceGroupPresentCapabilitiesKHR {\n    VkStructureType                     sType;\n    void*                               pNext;\n    uint32_t                            presentMask[VK_MAX_DEVICE_GROUP_SIZE];\n    VkDeviceGroupPresentModeFlagsKHR    modes;\n} VkDeviceGroupPresentCapabilitiesKHR;\n\ntypedef struct VkDeviceGroupPresentInfoKHR {\n    VkStructureType                        sType;\n    const void*                            pNext;\n    uint32_t                               swapchainCount;\n    const uint32_t*                        pDeviceMasks;\n    VkDeviceGroupPresentModeFlagBitsKHR    mode;\n} VkDeviceGroupPresentInfoKHR;\n\ntypedef struct VkDeviceGroupSwapchainCreateInfoKHR {\n    VkStructureType                     sType;\n    const void*                         pNext;\n    VkDeviceGroupPresentModeFlagsKHR    modes;\n} VkDeviceGroupSwapchainCreateInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain);\ntypedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages);\ntypedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex);\ntypedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupPresentCapabilitiesKHR)(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModesKHR)(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDevicePresentRectanglesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects);\ntypedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImage2KHR)(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(\n    VkDevice                                    device,\n    const VkSwapchainCreateInfoKHR*             pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkSwapchainKHR*                             pSwapchain);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(\n    VkDevice                                    device,\n    VkSwapchainKHR                              swapchain,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(\n    VkDevice                                    device,\n    VkSwapchainKHR                              swapchain,\n    uint32_t*                                   pSwapchainImageCount,\n    VkImage*                                    pSwapchainImages);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(\n    VkDevice                                    device,\n    VkSwapchainKHR                              swapchain,\n    uint64_t                                    timeout,\n    VkSemaphore                                 semaphore,\n    VkFence                                     fence,\n    uint32_t*                                   pImageIndex);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(\n    VkQueue                                     queue,\n    const VkPresentInfoKHR*                     pPresentInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHR(\n    VkDevice                                    device,\n    VkDeviceGroupPresentCapabilitiesKHR*        pDeviceGroupPresentCapabilities);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR(\n    VkDevice                                    device,\n    VkSurfaceKHR                                surface,\n    VkDeviceGroupPresentModeFlagsKHR*           pModes);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR(\n    VkPhysicalDevice                            physicalDevice,\n    VkSurfaceKHR                                surface,\n    uint32_t*                                   pRectCount,\n    VkRect2D*                                   pRects);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHR(\n    VkDevice                                    device,\n    const VkAcquireNextImageInfoKHR*            pAcquireInfo,\n    uint32_t*                                   pImageIndex);\n#endif\n\n\n#define VK_KHR_display 1\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR)\n#define VK_KHR_DISPLAY_SPEC_VERSION       23\n#define VK_KHR_DISPLAY_EXTENSION_NAME     \"VK_KHR_display\"\ntypedef VkFlags VkDisplayModeCreateFlagsKHR;\n\ntypedef enum VkDisplayPlaneAlphaFlagBitsKHR {\n    VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001,\n    VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002,\n    VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004,\n    VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008,\n    VK_DISPLAY_PLANE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkDisplayPlaneAlphaFlagBitsKHR;\ntypedef VkFlags VkDisplayPlaneAlphaFlagsKHR;\ntypedef VkFlags VkDisplaySurfaceCreateFlagsKHR;\ntypedef struct VkDisplayModeParametersKHR {\n    VkExtent2D    visibleRegion;\n    uint32_t      refreshRate;\n} VkDisplayModeParametersKHR;\n\ntypedef struct VkDisplayModeCreateInfoKHR {\n    VkStructureType                sType;\n    const void*                    pNext;\n    VkDisplayModeCreateFlagsKHR    flags;\n    VkDisplayModeParametersKHR     parameters;\n} VkDisplayModeCreateInfoKHR;\n\ntypedef struct VkDisplayModePropertiesKHR {\n    VkDisplayModeKHR              displayMode;\n    VkDisplayModeParametersKHR    parameters;\n} VkDisplayModePropertiesKHR;\n\ntypedef struct VkDisplayPlaneCapabilitiesKHR {\n    VkDisplayPlaneAlphaFlagsKHR    supportedAlpha;\n    VkOffset2D                     minSrcPosition;\n    VkOffset2D                     maxSrcPosition;\n    VkExtent2D                     minSrcExtent;\n    VkExtent2D                     maxSrcExtent;\n    VkOffset2D                     minDstPosition;\n    VkOffset2D                     maxDstPosition;\n    VkExtent2D                     minDstExtent;\n    VkExtent2D                     maxDstExtent;\n} VkDisplayPlaneCapabilitiesKHR;\n\ntypedef struct VkDisplayPlanePropertiesKHR {\n    VkDisplayKHR    currentDisplay;\n    uint32_t        currentStackIndex;\n} VkDisplayPlanePropertiesKHR;\n\ntypedef struct VkDisplayPropertiesKHR {\n    VkDisplayKHR                  display;\n    const char*                   displayName;\n    VkExtent2D                    physicalDimensions;\n    VkExtent2D                    physicalResolution;\n    VkSurfaceTransformFlagsKHR    supportedTransforms;\n    VkBool32                      planeReorderPossible;\n    VkBool32                      persistentContent;\n} VkDisplayPropertiesKHR;\n\ntypedef struct VkDisplaySurfaceCreateInfoKHR {\n    VkStructureType                   sType;\n    const void*                       pNext;\n    VkDisplaySurfaceCreateFlagsKHR    flags;\n    VkDisplayModeKHR                  displayMode;\n    uint32_t                          planeIndex;\n    uint32_t                          planeStackIndex;\n    VkSurfaceTransformFlagBitsKHR     transform;\n    float                             globalAlpha;\n    VkDisplayPlaneAlphaFlagBitsKHR    alphaMode;\n    VkExtent2D                        imageExtent;\n} VkDisplaySurfaceCreateInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pPropertyCount,\n    VkDisplayPropertiesKHR*                     pProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pPropertyCount,\n    VkDisplayPlanePropertiesKHR*                pProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t                                    planeIndex,\n    uint32_t*                                   pDisplayCount,\n    VkDisplayKHR*                               pDisplays);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR(\n    VkPhysicalDevice                            physicalDevice,\n    VkDisplayKHR                                display,\n    uint32_t*                                   pPropertyCount,\n    VkDisplayModePropertiesKHR*                 pProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR(\n    VkPhysicalDevice                            physicalDevice,\n    VkDisplayKHR                                display,\n    const VkDisplayModeCreateInfoKHR*           pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkDisplayModeKHR*                           pMode);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR(\n    VkPhysicalDevice                            physicalDevice,\n    VkDisplayModeKHR                            mode,\n    uint32_t                                    planeIndex,\n    VkDisplayPlaneCapabilitiesKHR*              pCapabilities);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR(\n    VkInstance                                  instance,\n    const VkDisplaySurfaceCreateInfoKHR*        pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkSurfaceKHR*                               pSurface);\n#endif\n\n\n#define VK_KHR_display_swapchain 1\n#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 10\n#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME \"VK_KHR_display_swapchain\"\ntypedef struct VkDisplayPresentInfoKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkRect2D           srcRect;\n    VkRect2D           dstRect;\n    VkBool32           persistent;\n} VkDisplayPresentInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR(\n    VkDevice                                    device,\n    uint32_t                                    swapchainCount,\n    const VkSwapchainCreateInfoKHR*             pCreateInfos,\n    const VkAllocationCallbacks*                pAllocator,\n    VkSwapchainKHR*                             pSwapchains);\n#endif\n\n\n#define VK_KHR_sampler_mirror_clamp_to_edge 1\n#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 3\n#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME \"VK_KHR_sampler_mirror_clamp_to_edge\"\n\n\n#define VK_KHR_dynamic_rendering 1\n#define VK_KHR_DYNAMIC_RENDERING_SPEC_VERSION 1\n#define VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME \"VK_KHR_dynamic_rendering\"\ntypedef VkRenderingFlags VkRenderingFlagsKHR;\n\ntypedef VkRenderingFlagBits VkRenderingFlagBitsKHR;\n\ntypedef VkRenderingInfo VkRenderingInfoKHR;\n\ntypedef VkRenderingAttachmentInfo VkRenderingAttachmentInfoKHR;\n\ntypedef VkPipelineRenderingCreateInfo VkPipelineRenderingCreateInfoKHR;\n\ntypedef VkPhysicalDeviceDynamicRenderingFeatures VkPhysicalDeviceDynamicRenderingFeaturesKHR;\n\ntypedef VkCommandBufferInheritanceRenderingInfo VkCommandBufferInheritanceRenderingInfoKHR;\n\ntypedef struct VkRenderingFragmentShadingRateAttachmentInfoKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkImageView        imageView;\n    VkImageLayout      imageLayout;\n    VkExtent2D         shadingRateAttachmentTexelSize;\n} VkRenderingFragmentShadingRateAttachmentInfoKHR;\n\ntypedef struct VkRenderingFragmentDensityMapAttachmentInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkImageView        imageView;\n    VkImageLayout      imageLayout;\n} VkRenderingFragmentDensityMapAttachmentInfoEXT;\n\ntypedef struct VkAttachmentSampleCountInfoAMD {\n    VkStructureType                 sType;\n    const void*                     pNext;\n    uint32_t                        colorAttachmentCount;\n    const VkSampleCountFlagBits*    pColorAttachmentSamples;\n    VkSampleCountFlagBits           depthStencilAttachmentSamples;\n} VkAttachmentSampleCountInfoAMD;\n\ntypedef VkAttachmentSampleCountInfoAMD VkAttachmentSampleCountInfoNV;\n\ntypedef struct VkMultiviewPerViewAttributesInfoNVX {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBool32           perViewAttributes;\n    VkBool32           perViewAttributesPositionXOnly;\n} VkMultiviewPerViewAttributesInfoNVX;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdBeginRenderingKHR)(VkCommandBuffer                   commandBuffer, const VkRenderingInfo*                              pRenderingInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdEndRenderingKHR)(VkCommandBuffer                   commandBuffer);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderingKHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkRenderingInfo*                      pRenderingInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdEndRenderingKHR(\n    VkCommandBuffer                             commandBuffer);\n#endif\n\n\n#define VK_KHR_multiview 1\n#define VK_KHR_MULTIVIEW_SPEC_VERSION     1\n#define VK_KHR_MULTIVIEW_EXTENSION_NAME   \"VK_KHR_multiview\"\ntypedef VkRenderPassMultiviewCreateInfo VkRenderPassMultiviewCreateInfoKHR;\n\ntypedef VkPhysicalDeviceMultiviewFeatures VkPhysicalDeviceMultiviewFeaturesKHR;\n\ntypedef VkPhysicalDeviceMultiviewProperties VkPhysicalDeviceMultiviewPropertiesKHR;\n\n\n\n#define VK_KHR_get_physical_device_properties2 1\n#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 2\n#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME \"VK_KHR_get_physical_device_properties2\"\ntypedef VkPhysicalDeviceFeatures2 VkPhysicalDeviceFeatures2KHR;\n\ntypedef VkPhysicalDeviceProperties2 VkPhysicalDeviceProperties2KHR;\n\ntypedef VkFormatProperties2 VkFormatProperties2KHR;\n\ntypedef VkImageFormatProperties2 VkImageFormatProperties2KHR;\n\ntypedef VkPhysicalDeviceImageFormatInfo2 VkPhysicalDeviceImageFormatInfo2KHR;\n\ntypedef VkQueueFamilyProperties2 VkQueueFamilyProperties2KHR;\n\ntypedef VkPhysicalDeviceMemoryProperties2 VkPhysicalDeviceMemoryProperties2KHR;\n\ntypedef VkSparseImageFormatProperties2 VkSparseImageFormatProperties2KHR;\n\ntypedef VkPhysicalDeviceSparseImageFormatInfo2 VkPhysicalDeviceSparseImageFormatInfo2KHR;\n\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2KHR)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR(\n    VkPhysicalDevice                            physicalDevice,\n    VkPhysicalDeviceFeatures2*                  pFeatures);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR(\n    VkPhysicalDevice                            physicalDevice,\n    VkPhysicalDeviceProperties2*                pProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR(\n    VkPhysicalDevice                            physicalDevice,\n    VkFormat                                    format,\n    VkFormatProperties2*                        pFormatProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR(\n    VkPhysicalDevice                            physicalDevice,\n    const VkPhysicalDeviceImageFormatInfo2*     pImageFormatInfo,\n    VkImageFormatProperties2*                   pImageFormatProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pQueueFamilyPropertyCount,\n    VkQueueFamilyProperties2*                   pQueueFamilyProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR(\n    VkPhysicalDevice                            physicalDevice,\n    VkPhysicalDeviceMemoryProperties2*          pMemoryProperties);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR(\n    VkPhysicalDevice                            physicalDevice,\n    const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo,\n    uint32_t*                                   pPropertyCount,\n    VkSparseImageFormatProperties2*             pProperties);\n#endif\n\n\n#define VK_KHR_device_group 1\n#define VK_KHR_DEVICE_GROUP_SPEC_VERSION  4\n#define VK_KHR_DEVICE_GROUP_EXTENSION_NAME \"VK_KHR_device_group\"\ntypedef VkPeerMemoryFeatureFlags VkPeerMemoryFeatureFlagsKHR;\n\ntypedef VkPeerMemoryFeatureFlagBits VkPeerMemoryFeatureFlagBitsKHR;\n\ntypedef VkMemoryAllocateFlags VkMemoryAllocateFlagsKHR;\n\ntypedef VkMemoryAllocateFlagBits VkMemoryAllocateFlagBitsKHR;\n\ntypedef VkMemoryAllocateFlagsInfo VkMemoryAllocateFlagsInfoKHR;\n\ntypedef VkDeviceGroupRenderPassBeginInfo VkDeviceGroupRenderPassBeginInfoKHR;\n\ntypedef VkDeviceGroupCommandBufferBeginInfo VkDeviceGroupCommandBufferBeginInfoKHR;\n\ntypedef VkDeviceGroupSubmitInfo VkDeviceGroupSubmitInfoKHR;\n\ntypedef VkDeviceGroupBindSparseInfo VkDeviceGroupBindSparseInfoKHR;\n\ntypedef VkBindBufferMemoryDeviceGroupInfo VkBindBufferMemoryDeviceGroupInfoKHR;\n\ntypedef VkBindImageMemoryDeviceGroupInfo VkBindImageMemoryDeviceGroupInfoKHR;\n\ntypedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMaskKHR)(VkCommandBuffer commandBuffer, uint32_t deviceMask);\ntypedef void (VKAPI_PTR *PFN_vkCmdDispatchBaseKHR)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHR(\n    VkDevice                                    device,\n    uint32_t                                    heapIndex,\n    uint32_t                                    localDeviceIndex,\n    uint32_t                                    remoteDeviceIndex,\n    VkPeerMemoryFeatureFlags*                   pPeerMemoryFeatures);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMaskKHR(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    deviceMask);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHR(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    baseGroupX,\n    uint32_t                                    baseGroupY,\n    uint32_t                                    baseGroupZ,\n    uint32_t                                    groupCountX,\n    uint32_t                                    groupCountY,\n    uint32_t                                    groupCountZ);\n#endif\n\n\n#define VK_KHR_shader_draw_parameters 1\n#define VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION 1\n#define VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME \"VK_KHR_shader_draw_parameters\"\n\n\n#define VK_KHR_maintenance1 1\n#define VK_KHR_MAINTENANCE_1_SPEC_VERSION 2\n#define VK_KHR_MAINTENANCE_1_EXTENSION_NAME \"VK_KHR_maintenance1\"\n#define VK_KHR_MAINTENANCE1_SPEC_VERSION  VK_KHR_MAINTENANCE_1_SPEC_VERSION\n#define VK_KHR_MAINTENANCE1_EXTENSION_NAME VK_KHR_MAINTENANCE_1_EXTENSION_NAME\ntypedef VkCommandPoolTrimFlags VkCommandPoolTrimFlagsKHR;\n\ntypedef void (VKAPI_PTR *PFN_vkTrimCommandPoolKHR)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkTrimCommandPoolKHR(\n    VkDevice                                    device,\n    VkCommandPool                               commandPool,\n    VkCommandPoolTrimFlags                      flags);\n#endif\n\n\n#define VK_KHR_device_group_creation 1\n#define VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION 1\n#define VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME \"VK_KHR_device_group_creation\"\n#define VK_MAX_DEVICE_GROUP_SIZE_KHR      VK_MAX_DEVICE_GROUP_SIZE\ntypedef VkPhysicalDeviceGroupProperties VkPhysicalDeviceGroupPropertiesKHR;\n\ntypedef VkDeviceGroupDeviceCreateInfo VkDeviceGroupDeviceCreateInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroupsKHR)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHR(\n    VkInstance                                  instance,\n    uint32_t*                                   pPhysicalDeviceGroupCount,\n    VkPhysicalDeviceGroupProperties*            pPhysicalDeviceGroupProperties);\n#endif\n\n\n#define VK_KHR_external_memory_capabilities 1\n#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1\n#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME \"VK_KHR_external_memory_capabilities\"\n#define VK_LUID_SIZE_KHR                  VK_LUID_SIZE\ntypedef VkExternalMemoryHandleTypeFlags VkExternalMemoryHandleTypeFlagsKHR;\n\ntypedef VkExternalMemoryHandleTypeFlagBits VkExternalMemoryHandleTypeFlagBitsKHR;\n\ntypedef VkExternalMemoryFeatureFlags VkExternalMemoryFeatureFlagsKHR;\n\ntypedef VkExternalMemoryFeatureFlagBits VkExternalMemoryFeatureFlagBitsKHR;\n\ntypedef VkExternalMemoryProperties VkExternalMemoryPropertiesKHR;\n\ntypedef VkPhysicalDeviceExternalImageFormatInfo VkPhysicalDeviceExternalImageFormatInfoKHR;\n\ntypedef VkExternalImageFormatProperties VkExternalImageFormatPropertiesKHR;\n\ntypedef VkPhysicalDeviceExternalBufferInfo VkPhysicalDeviceExternalBufferInfoKHR;\n\ntypedef VkExternalBufferProperties VkExternalBufferPropertiesKHR;\n\ntypedef VkPhysicalDeviceIDProperties VkPhysicalDeviceIDPropertiesKHR;\n\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHR(\n    VkPhysicalDevice                            physicalDevice,\n    const VkPhysicalDeviceExternalBufferInfo*   pExternalBufferInfo,\n    VkExternalBufferProperties*                 pExternalBufferProperties);\n#endif\n\n\n#define VK_KHR_external_memory 1\n#define VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION 1\n#define VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME \"VK_KHR_external_memory\"\n#define VK_QUEUE_FAMILY_EXTERNAL_KHR      VK_QUEUE_FAMILY_EXTERNAL\ntypedef VkExternalMemoryImageCreateInfo VkExternalMemoryImageCreateInfoKHR;\n\ntypedef VkExternalMemoryBufferCreateInfo VkExternalMemoryBufferCreateInfoKHR;\n\ntypedef VkExportMemoryAllocateInfo VkExportMemoryAllocateInfoKHR;\n\n\n\n#define VK_KHR_external_memory_fd 1\n#define VK_KHR_EXTERNAL_MEMORY_FD_SPEC_VERSION 1\n#define VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME \"VK_KHR_external_memory_fd\"\ntypedef struct VkImportMemoryFdInfoKHR {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkExternalMemoryHandleTypeFlagBits    handleType;\n    int                                   fd;\n} VkImportMemoryFdInfoKHR;\n\ntypedef struct VkMemoryFdPropertiesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           memoryTypeBits;\n} VkMemoryFdPropertiesKHR;\n\ntypedef struct VkMemoryGetFdInfoKHR {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkDeviceMemory                        memory;\n    VkExternalMemoryHandleTypeFlagBits    handleType;\n} VkMemoryGetFdInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdKHR)(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdPropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHR(\n    VkDevice                                    device,\n    const VkMemoryGetFdInfoKHR*                 pGetFdInfo,\n    int*                                        pFd);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHR(\n    VkDevice                                    device,\n    VkExternalMemoryHandleTypeFlagBits          handleType,\n    int                                         fd,\n    VkMemoryFdPropertiesKHR*                    pMemoryFdProperties);\n#endif\n\n\n#define VK_KHR_external_semaphore_capabilities 1\n#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION 1\n#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME \"VK_KHR_external_semaphore_capabilities\"\ntypedef VkExternalSemaphoreHandleTypeFlags VkExternalSemaphoreHandleTypeFlagsKHR;\n\ntypedef VkExternalSemaphoreHandleTypeFlagBits VkExternalSemaphoreHandleTypeFlagBitsKHR;\n\ntypedef VkExternalSemaphoreFeatureFlags VkExternalSemaphoreFeatureFlagsKHR;\n\ntypedef VkExternalSemaphoreFeatureFlagBits VkExternalSemaphoreFeatureFlagBitsKHR;\n\ntypedef VkPhysicalDeviceExternalSemaphoreInfo VkPhysicalDeviceExternalSemaphoreInfoKHR;\n\ntypedef VkExternalSemaphoreProperties VkExternalSemaphorePropertiesKHR;\n\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(\n    VkPhysicalDevice                            physicalDevice,\n    const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,\n    VkExternalSemaphoreProperties*              pExternalSemaphoreProperties);\n#endif\n\n\n#define VK_KHR_external_semaphore 1\n#define VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION 1\n#define VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME \"VK_KHR_external_semaphore\"\ntypedef VkSemaphoreImportFlags VkSemaphoreImportFlagsKHR;\n\ntypedef VkSemaphoreImportFlagBits VkSemaphoreImportFlagBitsKHR;\n\ntypedef VkExportSemaphoreCreateInfo VkExportSemaphoreCreateInfoKHR;\n\n\n\n#define VK_KHR_external_semaphore_fd 1\n#define VK_KHR_EXTERNAL_SEMAPHORE_FD_SPEC_VERSION 1\n#define VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME \"VK_KHR_external_semaphore_fd\"\ntypedef struct VkImportSemaphoreFdInfoKHR {\n    VkStructureType                          sType;\n    const void*                              pNext;\n    VkSemaphore                              semaphore;\n    VkSemaphoreImportFlags                   flags;\n    VkExternalSemaphoreHandleTypeFlagBits    handleType;\n    int                                      fd;\n} VkImportSemaphoreFdInfoKHR;\n\ntypedef struct VkSemaphoreGetFdInfoKHR {\n    VkStructureType                          sType;\n    const void*                              pNext;\n    VkSemaphore                              semaphore;\n    VkExternalSemaphoreHandleTypeFlagBits    handleType;\n} VkSemaphoreGetFdInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreFdKHR)(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreFdKHR)(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHR(\n    VkDevice                                    device,\n    const VkImportSemaphoreFdInfoKHR*           pImportSemaphoreFdInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR(\n    VkDevice                                    device,\n    const VkSemaphoreGetFdInfoKHR*              pGetFdInfo,\n    int*                                        pFd);\n#endif\n\n\n#define VK_KHR_push_descriptor 1\n#define VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION 2\n#define VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME \"VK_KHR_push_descriptor\"\ntypedef struct VkPhysicalDevicePushDescriptorPropertiesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxPushDescriptors;\n} VkPhysicalDevicePushDescriptorPropertiesKHR;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetKHR)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites);\ntypedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplateKHR)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetKHR(\n    VkCommandBuffer                             commandBuffer,\n    VkPipelineBindPoint                         pipelineBindPoint,\n    VkPipelineLayout                            layout,\n    uint32_t                                    set,\n    uint32_t                                    descriptorWriteCount,\n    const VkWriteDescriptorSet*                 pDescriptorWrites);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplateKHR(\n    VkCommandBuffer                             commandBuffer,\n    VkDescriptorUpdateTemplate                  descriptorUpdateTemplate,\n    VkPipelineLayout                            layout,\n    uint32_t                                    set,\n    const void*                                 pData);\n#endif\n\n\n#define VK_KHR_shader_float16_int8 1\n#define VK_KHR_SHADER_FLOAT16_INT8_SPEC_VERSION 1\n#define VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME \"VK_KHR_shader_float16_int8\"\ntypedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceShaderFloat16Int8FeaturesKHR;\n\ntypedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceFloat16Int8FeaturesKHR;\n\n\n\n#define VK_KHR_16bit_storage 1\n#define VK_KHR_16BIT_STORAGE_SPEC_VERSION 1\n#define VK_KHR_16BIT_STORAGE_EXTENSION_NAME \"VK_KHR_16bit_storage\"\ntypedef VkPhysicalDevice16BitStorageFeatures VkPhysicalDevice16BitStorageFeaturesKHR;\n\n\n\n#define VK_KHR_incremental_present 1\n#define VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION 2\n#define VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME \"VK_KHR_incremental_present\"\ntypedef struct VkRectLayerKHR {\n    VkOffset2D    offset;\n    VkExtent2D    extent;\n    uint32_t      layer;\n} VkRectLayerKHR;\n\ntypedef struct VkPresentRegionKHR {\n    uint32_t                 rectangleCount;\n    const VkRectLayerKHR*    pRectangles;\n} VkPresentRegionKHR;\n\ntypedef struct VkPresentRegionsKHR {\n    VkStructureType              sType;\n    const void*                  pNext;\n    uint32_t                     swapchainCount;\n    const VkPresentRegionKHR*    pRegions;\n} VkPresentRegionsKHR;\n\n\n\n#define VK_KHR_descriptor_update_template 1\ntypedef VkDescriptorUpdateTemplate VkDescriptorUpdateTemplateKHR;\n\n#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION 1\n#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME \"VK_KHR_descriptor_update_template\"\ntypedef VkDescriptorUpdateTemplateType VkDescriptorUpdateTemplateTypeKHR;\n\ntypedef VkDescriptorUpdateTemplateCreateFlags VkDescriptorUpdateTemplateCreateFlagsKHR;\n\ntypedef VkDescriptorUpdateTemplateEntry VkDescriptorUpdateTemplateEntryKHR;\n\ntypedef VkDescriptorUpdateTemplateCreateInfo VkDescriptorUpdateTemplateCreateInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplateKHR)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate);\ntypedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplateKHR)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator);\ntypedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplateKHR)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR(\n    VkDevice                                    device,\n    const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkDescriptorUpdateTemplate*                 pDescriptorUpdateTemplate);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR(\n    VkDevice                                    device,\n    VkDescriptorUpdateTemplate                  descriptorUpdateTemplate,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR(\n    VkDevice                                    device,\n    VkDescriptorSet                             descriptorSet,\n    VkDescriptorUpdateTemplate                  descriptorUpdateTemplate,\n    const void*                                 pData);\n#endif\n\n\n#define VK_KHR_imageless_framebuffer 1\n#define VK_KHR_IMAGELESS_FRAMEBUFFER_SPEC_VERSION 1\n#define VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME \"VK_KHR_imageless_framebuffer\"\ntypedef VkPhysicalDeviceImagelessFramebufferFeatures VkPhysicalDeviceImagelessFramebufferFeaturesKHR;\n\ntypedef VkFramebufferAttachmentsCreateInfo VkFramebufferAttachmentsCreateInfoKHR;\n\ntypedef VkFramebufferAttachmentImageInfo VkFramebufferAttachmentImageInfoKHR;\n\ntypedef VkRenderPassAttachmentBeginInfo VkRenderPassAttachmentBeginInfoKHR;\n\n\n\n#define VK_KHR_create_renderpass2 1\n#define VK_KHR_CREATE_RENDERPASS_2_SPEC_VERSION 1\n#define VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME \"VK_KHR_create_renderpass2\"\ntypedef VkRenderPassCreateInfo2 VkRenderPassCreateInfo2KHR;\n\ntypedef VkAttachmentDescription2 VkAttachmentDescription2KHR;\n\ntypedef VkAttachmentReference2 VkAttachmentReference2KHR;\n\ntypedef VkSubpassDescription2 VkSubpassDescription2KHR;\n\ntypedef VkSubpassDependency2 VkSubpassDependency2KHR;\n\ntypedef VkSubpassBeginInfo VkSubpassBeginInfoKHR;\n\ntypedef VkSubpassEndInfo VkSubpassEndInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2KHR)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);\ntypedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo*      pRenderPassBegin, const VkSubpassBeginInfo*      pSubpassBeginInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo*      pSubpassBeginInfo, const VkSubpassEndInfo*        pSubpassEndInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo*        pSubpassEndInfo);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2KHR(\n    VkDevice                                    device,\n    const VkRenderPassCreateInfo2*              pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkRenderPass*                               pRenderPass);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2KHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkRenderPassBeginInfo*                pRenderPassBegin,\n    const VkSubpassBeginInfo*                   pSubpassBeginInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2KHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkSubpassBeginInfo*                   pSubpassBeginInfo,\n    const VkSubpassEndInfo*                     pSubpassEndInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2KHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkSubpassEndInfo*                     pSubpassEndInfo);\n#endif\n\n\n#define VK_KHR_shared_presentable_image 1\n#define VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION 1\n#define VK_KHR_SHARED_PRESENTABLE_IMAGE_EXTENSION_NAME \"VK_KHR_shared_presentable_image\"\ntypedef struct VkSharedPresentSurfaceCapabilitiesKHR {\n    VkStructureType      sType;\n    void*                pNext;\n    VkImageUsageFlags    sharedPresentSupportedUsageFlags;\n} VkSharedPresentSurfaceCapabilitiesKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainStatusKHR)(VkDevice device, VkSwapchainKHR swapchain);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainStatusKHR(\n    VkDevice                                    device,\n    VkSwapchainKHR                              swapchain);\n#endif\n\n\n#define VK_KHR_external_fence_capabilities 1\n#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION 1\n#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME \"VK_KHR_external_fence_capabilities\"\ntypedef VkExternalFenceHandleTypeFlags VkExternalFenceHandleTypeFlagsKHR;\n\ntypedef VkExternalFenceHandleTypeFlagBits VkExternalFenceHandleTypeFlagBitsKHR;\n\ntypedef VkExternalFenceFeatureFlags VkExternalFenceFeatureFlagsKHR;\n\ntypedef VkExternalFenceFeatureFlagBits VkExternalFenceFeatureFlagBitsKHR;\n\ntypedef VkPhysicalDeviceExternalFenceInfo VkPhysicalDeviceExternalFenceInfoKHR;\n\ntypedef VkExternalFenceProperties VkExternalFencePropertiesKHR;\n\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFencePropertiesKHR(\n    VkPhysicalDevice                            physicalDevice,\n    const VkPhysicalDeviceExternalFenceInfo*    pExternalFenceInfo,\n    VkExternalFenceProperties*                  pExternalFenceProperties);\n#endif\n\n\n#define VK_KHR_external_fence 1\n#define VK_KHR_EXTERNAL_FENCE_SPEC_VERSION 1\n#define VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME \"VK_KHR_external_fence\"\ntypedef VkFenceImportFlags VkFenceImportFlagsKHR;\n\ntypedef VkFenceImportFlagBits VkFenceImportFlagBitsKHR;\n\ntypedef VkExportFenceCreateInfo VkExportFenceCreateInfoKHR;\n\n\n\n#define VK_KHR_external_fence_fd 1\n#define VK_KHR_EXTERNAL_FENCE_FD_SPEC_VERSION 1\n#define VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME \"VK_KHR_external_fence_fd\"\ntypedef struct VkImportFenceFdInfoKHR {\n    VkStructureType                      sType;\n    const void*                          pNext;\n    VkFence                              fence;\n    VkFenceImportFlags                   flags;\n    VkExternalFenceHandleTypeFlagBits    handleType;\n    int                                  fd;\n} VkImportFenceFdInfoKHR;\n\ntypedef struct VkFenceGetFdInfoKHR {\n    VkStructureType                      sType;\n    const void*                          pNext;\n    VkFence                              fence;\n    VkExternalFenceHandleTypeFlagBits    handleType;\n} VkFenceGetFdInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkImportFenceFdKHR)(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetFenceFdKHR)(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkImportFenceFdKHR(\n    VkDevice                                    device,\n    const VkImportFenceFdInfoKHR*               pImportFenceFdInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetFenceFdKHR(\n    VkDevice                                    device,\n    const VkFenceGetFdInfoKHR*                  pGetFdInfo,\n    int*                                        pFd);\n#endif\n\n\n#define VK_KHR_performance_query 1\n#define VK_KHR_PERFORMANCE_QUERY_SPEC_VERSION 1\n#define VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME \"VK_KHR_performance_query\"\n\ntypedef enum VkPerformanceCounterUnitKHR {\n    VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR = 0,\n    VK_PERFORMANCE_COUNTER_UNIT_PERCENTAGE_KHR = 1,\n    VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR = 2,\n    VK_PERFORMANCE_COUNTER_UNIT_BYTES_KHR = 3,\n    VK_PERFORMANCE_COUNTER_UNIT_BYTES_PER_SECOND_KHR = 4,\n    VK_PERFORMANCE_COUNTER_UNIT_KELVIN_KHR = 5,\n    VK_PERFORMANCE_COUNTER_UNIT_WATTS_KHR = 6,\n    VK_PERFORMANCE_COUNTER_UNIT_VOLTS_KHR = 7,\n    VK_PERFORMANCE_COUNTER_UNIT_AMPS_KHR = 8,\n    VK_PERFORMANCE_COUNTER_UNIT_HERTZ_KHR = 9,\n    VK_PERFORMANCE_COUNTER_UNIT_CYCLES_KHR = 10,\n    VK_PERFORMANCE_COUNTER_UNIT_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkPerformanceCounterUnitKHR;\n\ntypedef enum VkPerformanceCounterScopeKHR {\n    VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR = 0,\n    VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR = 1,\n    VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR = 2,\n    VK_QUERY_SCOPE_COMMAND_BUFFER_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR,\n    VK_QUERY_SCOPE_RENDER_PASS_KHR = VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR,\n    VK_QUERY_SCOPE_COMMAND_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR,\n    VK_PERFORMANCE_COUNTER_SCOPE_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkPerformanceCounterScopeKHR;\n\ntypedef enum VkPerformanceCounterStorageKHR {\n    VK_PERFORMANCE_COUNTER_STORAGE_INT32_KHR = 0,\n    VK_PERFORMANCE_COUNTER_STORAGE_INT64_KHR = 1,\n    VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR = 2,\n    VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR = 3,\n    VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR = 4,\n    VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR = 5,\n    VK_PERFORMANCE_COUNTER_STORAGE_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkPerformanceCounterStorageKHR;\n\ntypedef enum VkPerformanceCounterDescriptionFlagBitsKHR {\n    VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR = 0x00000001,\n    VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR = 0x00000002,\n    VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR,\n    VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR,\n    VK_PERFORMANCE_COUNTER_DESCRIPTION_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkPerformanceCounterDescriptionFlagBitsKHR;\ntypedef VkFlags VkPerformanceCounterDescriptionFlagsKHR;\n\ntypedef enum VkAcquireProfilingLockFlagBitsKHR {\n    VK_ACQUIRE_PROFILING_LOCK_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkAcquireProfilingLockFlagBitsKHR;\ntypedef VkFlags VkAcquireProfilingLockFlagsKHR;\ntypedef struct VkPhysicalDevicePerformanceQueryFeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           performanceCounterQueryPools;\n    VkBool32           performanceCounterMultipleQueryPools;\n} VkPhysicalDevicePerformanceQueryFeaturesKHR;\n\ntypedef struct VkPhysicalDevicePerformanceQueryPropertiesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           allowCommandBufferQueryCopies;\n} VkPhysicalDevicePerformanceQueryPropertiesKHR;\n\ntypedef struct VkPerformanceCounterKHR {\n    VkStructureType                   sType;\n    void*                             pNext;\n    VkPerformanceCounterUnitKHR       unit;\n    VkPerformanceCounterScopeKHR      scope;\n    VkPerformanceCounterStorageKHR    storage;\n    uint8_t                           uuid[VK_UUID_SIZE];\n} VkPerformanceCounterKHR;\n\ntypedef struct VkPerformanceCounterDescriptionKHR {\n    VkStructureType                            sType;\n    void*                                      pNext;\n    VkPerformanceCounterDescriptionFlagsKHR    flags;\n    char                                       name[VK_MAX_DESCRIPTION_SIZE];\n    char                                       category[VK_MAX_DESCRIPTION_SIZE];\n    char                                       description[VK_MAX_DESCRIPTION_SIZE];\n} VkPerformanceCounterDescriptionKHR;\n\ntypedef struct VkQueryPoolPerformanceCreateInfoKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           queueFamilyIndex;\n    uint32_t           counterIndexCount;\n    const uint32_t*    pCounterIndices;\n} VkQueryPoolPerformanceCreateInfoKHR;\n\ntypedef union VkPerformanceCounterResultKHR {\n    int32_t     int32;\n    int64_t     int64;\n    uint32_t    uint32;\n    uint64_t    uint64;\n    float       float32;\n    double      float64;\n} VkPerformanceCounterResultKHR;\n\ntypedef struct VkAcquireProfilingLockInfoKHR {\n    VkStructureType                   sType;\n    const void*                       pNext;\n    VkAcquireProfilingLockFlagsKHR    flags;\n    uint64_t                          timeout;\n} VkAcquireProfilingLockInfoKHR;\n\ntypedef struct VkPerformanceQuerySubmitInfoKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           counterPassIndex;\n} VkPerformanceQuerySubmitInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t* pCounterCount, VkPerformanceCounterKHR* pCounters, VkPerformanceCounterDescriptionKHR* pCounterDescriptions);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR)(VkPhysicalDevice physicalDevice, const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses);\ntypedef VkResult (VKAPI_PTR *PFN_vkAcquireProfilingLockKHR)(VkDevice device, const VkAcquireProfilingLockInfoKHR* pInfo);\ntypedef void (VKAPI_PTR *PFN_vkReleaseProfilingLockKHR)(VkDevice device);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t                                    queueFamilyIndex,\n    uint32_t*                                   pCounterCount,\n    VkPerformanceCounterKHR*                    pCounters,\n    VkPerformanceCounterDescriptionKHR*         pCounterDescriptions);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(\n    VkPhysicalDevice                            physicalDevice,\n    const VkQueryPoolPerformanceCreateInfoKHR*  pPerformanceQueryCreateInfo,\n    uint32_t*                                   pNumPasses);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkAcquireProfilingLockKHR(\n    VkDevice                                    device,\n    const VkAcquireProfilingLockInfoKHR*        pInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkReleaseProfilingLockKHR(\n    VkDevice                                    device);\n#endif\n\n\n#define VK_KHR_maintenance2 1\n#define VK_KHR_MAINTENANCE_2_SPEC_VERSION 1\n#define VK_KHR_MAINTENANCE_2_EXTENSION_NAME \"VK_KHR_maintenance2\"\n#define VK_KHR_MAINTENANCE2_SPEC_VERSION  VK_KHR_MAINTENANCE_2_SPEC_VERSION\n#define VK_KHR_MAINTENANCE2_EXTENSION_NAME VK_KHR_MAINTENANCE_2_EXTENSION_NAME\ntypedef VkPointClippingBehavior VkPointClippingBehaviorKHR;\n\ntypedef VkTessellationDomainOrigin VkTessellationDomainOriginKHR;\n\ntypedef VkPhysicalDevicePointClippingProperties VkPhysicalDevicePointClippingPropertiesKHR;\n\ntypedef VkRenderPassInputAttachmentAspectCreateInfo VkRenderPassInputAttachmentAspectCreateInfoKHR;\n\ntypedef VkInputAttachmentAspectReference VkInputAttachmentAspectReferenceKHR;\n\ntypedef VkImageViewUsageCreateInfo VkImageViewUsageCreateInfoKHR;\n\ntypedef VkPipelineTessellationDomainOriginStateCreateInfo VkPipelineTessellationDomainOriginStateCreateInfoKHR;\n\n\n\n#define VK_KHR_get_surface_capabilities2 1\n#define VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION 1\n#define VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME \"VK_KHR_get_surface_capabilities2\"\ntypedef struct VkPhysicalDeviceSurfaceInfo2KHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkSurfaceKHR       surface;\n} VkPhysicalDeviceSurfaceInfo2KHR;\n\ntypedef struct VkSurfaceCapabilities2KHR {\n    VkStructureType             sType;\n    void*                       pNext;\n    VkSurfaceCapabilitiesKHR    surfaceCapabilities;\n} VkSurfaceCapabilities2KHR;\n\ntypedef struct VkSurfaceFormat2KHR {\n    VkStructureType       sType;\n    void*                 pNext;\n    VkSurfaceFormatKHR    surfaceFormat;\n} VkSurfaceFormat2KHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2KHR(\n    VkPhysicalDevice                            physicalDevice,\n    const VkPhysicalDeviceSurfaceInfo2KHR*      pSurfaceInfo,\n    VkSurfaceCapabilities2KHR*                  pSurfaceCapabilities);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats2KHR(\n    VkPhysicalDevice                            physicalDevice,\n    const VkPhysicalDeviceSurfaceInfo2KHR*      pSurfaceInfo,\n    uint32_t*                                   pSurfaceFormatCount,\n    VkSurfaceFormat2KHR*                        pSurfaceFormats);\n#endif\n\n\n#define VK_KHR_variable_pointers 1\n#define VK_KHR_VARIABLE_POINTERS_SPEC_VERSION 1\n#define VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME \"VK_KHR_variable_pointers\"\ntypedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeaturesKHR;\n\ntypedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointersFeaturesKHR;\n\n\n\n#define VK_KHR_get_display_properties2 1\n#define VK_KHR_GET_DISPLAY_PROPERTIES_2_SPEC_VERSION 1\n#define VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME \"VK_KHR_get_display_properties2\"\ntypedef struct VkDisplayProperties2KHR {\n    VkStructureType           sType;\n    void*                     pNext;\n    VkDisplayPropertiesKHR    displayProperties;\n} VkDisplayProperties2KHR;\n\ntypedef struct VkDisplayPlaneProperties2KHR {\n    VkStructureType                sType;\n    void*                          pNext;\n    VkDisplayPlanePropertiesKHR    displayPlaneProperties;\n} VkDisplayPlaneProperties2KHR;\n\ntypedef struct VkDisplayModeProperties2KHR {\n    VkStructureType               sType;\n    void*                         pNext;\n    VkDisplayModePropertiesKHR    displayModeProperties;\n} VkDisplayModeProperties2KHR;\n\ntypedef struct VkDisplayPlaneInfo2KHR {\n    VkStructureType     sType;\n    const void*         pNext;\n    VkDisplayModeKHR    mode;\n    uint32_t            planeIndex;\n} VkDisplayPlaneInfo2KHR;\n\ntypedef struct VkDisplayPlaneCapabilities2KHR {\n    VkStructureType                  sType;\n    void*                            pNext;\n    VkDisplayPlaneCapabilitiesKHR    capabilities;\n} VkDisplayPlaneCapabilities2KHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayProperties2KHR* pProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlaneProperties2KHR* pProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModeProperties2KHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayProperties2KHR(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pPropertyCount,\n    VkDisplayProperties2KHR*                    pProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlaneProperties2KHR(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pPropertyCount,\n    VkDisplayPlaneProperties2KHR*               pProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModeProperties2KHR(\n    VkPhysicalDevice                            physicalDevice,\n    VkDisplayKHR                                display,\n    uint32_t*                                   pPropertyCount,\n    VkDisplayModeProperties2KHR*                pProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilities2KHR(\n    VkPhysicalDevice                            physicalDevice,\n    const VkDisplayPlaneInfo2KHR*               pDisplayPlaneInfo,\n    VkDisplayPlaneCapabilities2KHR*             pCapabilities);\n#endif\n\n\n#define VK_KHR_dedicated_allocation 1\n#define VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION 3\n#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME \"VK_KHR_dedicated_allocation\"\ntypedef VkMemoryDedicatedRequirements VkMemoryDedicatedRequirementsKHR;\n\ntypedef VkMemoryDedicatedAllocateInfo VkMemoryDedicatedAllocateInfoKHR;\n\n\n\n#define VK_KHR_storage_buffer_storage_class 1\n#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION 1\n#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME \"VK_KHR_storage_buffer_storage_class\"\n\n\n#define VK_KHR_relaxed_block_layout 1\n#define VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION 1\n#define VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME \"VK_KHR_relaxed_block_layout\"\n\n\n#define VK_KHR_get_memory_requirements2 1\n#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION 1\n#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME \"VK_KHR_get_memory_requirements2\"\ntypedef VkBufferMemoryRequirementsInfo2 VkBufferMemoryRequirementsInfo2KHR;\n\ntypedef VkImageMemoryRequirementsInfo2 VkImageMemoryRequirementsInfo2KHR;\n\ntypedef VkImageSparseMemoryRequirementsInfo2 VkImageSparseMemoryRequirementsInfo2KHR;\n\ntypedef VkMemoryRequirements2 VkMemoryRequirements2KHR;\n\ntypedef VkSparseImageMemoryRequirements2 VkSparseImageMemoryRequirements2KHR;\n\ntypedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2KHR)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements);\ntypedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2KHR)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements);\ntypedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2KHR)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2KHR(\n    VkDevice                                    device,\n    const VkImageMemoryRequirementsInfo2*       pInfo,\n    VkMemoryRequirements2*                      pMemoryRequirements);\n\nVKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2KHR(\n    VkDevice                                    device,\n    const VkBufferMemoryRequirementsInfo2*      pInfo,\n    VkMemoryRequirements2*                      pMemoryRequirements);\n\nVKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2KHR(\n    VkDevice                                    device,\n    const VkImageSparseMemoryRequirementsInfo2* pInfo,\n    uint32_t*                                   pSparseMemoryRequirementCount,\n    VkSparseImageMemoryRequirements2*           pSparseMemoryRequirements);\n#endif\n\n\n#define VK_KHR_image_format_list 1\n#define VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION 1\n#define VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME \"VK_KHR_image_format_list\"\ntypedef VkImageFormatListCreateInfo VkImageFormatListCreateInfoKHR;\n\n\n\n#define VK_KHR_sampler_ycbcr_conversion 1\ntypedef VkSamplerYcbcrConversion VkSamplerYcbcrConversionKHR;\n\n#define VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION 14\n#define VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME \"VK_KHR_sampler_ycbcr_conversion\"\ntypedef VkSamplerYcbcrModelConversion VkSamplerYcbcrModelConversionKHR;\n\ntypedef VkSamplerYcbcrRange VkSamplerYcbcrRangeKHR;\n\ntypedef VkChromaLocation VkChromaLocationKHR;\n\ntypedef VkSamplerYcbcrConversionCreateInfo VkSamplerYcbcrConversionCreateInfoKHR;\n\ntypedef VkSamplerYcbcrConversionInfo VkSamplerYcbcrConversionInfoKHR;\n\ntypedef VkBindImagePlaneMemoryInfo VkBindImagePlaneMemoryInfoKHR;\n\ntypedef VkImagePlaneMemoryRequirementsInfo VkImagePlaneMemoryRequirementsInfoKHR;\n\ntypedef VkPhysicalDeviceSamplerYcbcrConversionFeatures VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR;\n\ntypedef VkSamplerYcbcrConversionImageFormatProperties VkSamplerYcbcrConversionImageFormatPropertiesKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversionKHR)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion);\ntypedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversionKHR)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversionKHR(\n    VkDevice                                    device,\n    const VkSamplerYcbcrConversionCreateInfo*   pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkSamplerYcbcrConversion*                   pYcbcrConversion);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversionKHR(\n    VkDevice                                    device,\n    VkSamplerYcbcrConversion                    ycbcrConversion,\n    const VkAllocationCallbacks*                pAllocator);\n#endif\n\n\n#define VK_KHR_bind_memory2 1\n#define VK_KHR_BIND_MEMORY_2_SPEC_VERSION 1\n#define VK_KHR_BIND_MEMORY_2_EXTENSION_NAME \"VK_KHR_bind_memory2\"\ntypedef VkBindBufferMemoryInfo VkBindBufferMemoryInfoKHR;\n\ntypedef VkBindImageMemoryInfo VkBindImageMemoryInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos);\ntypedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHR(\n    VkDevice                                    device,\n    uint32_t                                    bindInfoCount,\n    const VkBindBufferMemoryInfo*               pBindInfos);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR(\n    VkDevice                                    device,\n    uint32_t                                    bindInfoCount,\n    const VkBindImageMemoryInfo*                pBindInfos);\n#endif\n\n\n#define VK_KHR_maintenance3 1\n#define VK_KHR_MAINTENANCE_3_SPEC_VERSION 1\n#define VK_KHR_MAINTENANCE_3_EXTENSION_NAME \"VK_KHR_maintenance3\"\n#define VK_KHR_MAINTENANCE3_SPEC_VERSION  VK_KHR_MAINTENANCE_3_SPEC_VERSION\n#define VK_KHR_MAINTENANCE3_EXTENSION_NAME VK_KHR_MAINTENANCE_3_EXTENSION_NAME\ntypedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR;\n\ntypedef VkDescriptorSetLayoutSupport VkDescriptorSetLayoutSupportKHR;\n\ntypedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupportKHR)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupportKHR(\n    VkDevice                                    device,\n    const VkDescriptorSetLayoutCreateInfo*      pCreateInfo,\n    VkDescriptorSetLayoutSupport*               pSupport);\n#endif\n\n\n#define VK_KHR_draw_indirect_count 1\n#define VK_KHR_DRAW_INDIRECT_COUNT_SPEC_VERSION 1\n#define VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME \"VK_KHR_draw_indirect_count\"\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountKHR(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    buffer,\n    VkDeviceSize                                offset,\n    VkBuffer                                    countBuffer,\n    VkDeviceSize                                countBufferOffset,\n    uint32_t                                    maxDrawCount,\n    uint32_t                                    stride);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountKHR(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    buffer,\n    VkDeviceSize                                offset,\n    VkBuffer                                    countBuffer,\n    VkDeviceSize                                countBufferOffset,\n    uint32_t                                    maxDrawCount,\n    uint32_t                                    stride);\n#endif\n\n\n#define VK_KHR_shader_subgroup_extended_types 1\n#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_SPEC_VERSION 1\n#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_EXTENSION_NAME \"VK_KHR_shader_subgroup_extended_types\"\ntypedef VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR;\n\n\n\n#define VK_KHR_8bit_storage 1\n#define VK_KHR_8BIT_STORAGE_SPEC_VERSION  1\n#define VK_KHR_8BIT_STORAGE_EXTENSION_NAME \"VK_KHR_8bit_storage\"\ntypedef VkPhysicalDevice8BitStorageFeatures VkPhysicalDevice8BitStorageFeaturesKHR;\n\n\n\n#define VK_KHR_shader_atomic_int64 1\n#define VK_KHR_SHADER_ATOMIC_INT64_SPEC_VERSION 1\n#define VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME \"VK_KHR_shader_atomic_int64\"\ntypedef VkPhysicalDeviceShaderAtomicInt64Features VkPhysicalDeviceShaderAtomicInt64FeaturesKHR;\n\n\n\n#define VK_KHR_shader_clock 1\n#define VK_KHR_SHADER_CLOCK_SPEC_VERSION  1\n#define VK_KHR_SHADER_CLOCK_EXTENSION_NAME \"VK_KHR_shader_clock\"\ntypedef struct VkPhysicalDeviceShaderClockFeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderSubgroupClock;\n    VkBool32           shaderDeviceClock;\n} VkPhysicalDeviceShaderClockFeaturesKHR;\n\n\n\n#define VK_KHR_global_priority 1\n#define VK_MAX_GLOBAL_PRIORITY_SIZE_KHR   16U\n#define VK_KHR_GLOBAL_PRIORITY_SPEC_VERSION 1\n#define VK_KHR_GLOBAL_PRIORITY_EXTENSION_NAME \"VK_KHR_global_priority\"\n\ntypedef enum VkQueueGlobalPriorityKHR {\n    VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR = 128,\n    VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR = 256,\n    VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR = 512,\n    VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR = 1024,\n    VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR,\n    VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR,\n    VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR,\n    VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR,\n    VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkQueueGlobalPriorityKHR;\ntypedef struct VkDeviceQueueGlobalPriorityCreateInfoKHR {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkQueueGlobalPriorityKHR    globalPriority;\n} VkDeviceQueueGlobalPriorityCreateInfoKHR;\n\ntypedef struct VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           globalPriorityQuery;\n} VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR;\n\ntypedef struct VkQueueFamilyGlobalPriorityPropertiesKHR {\n    VkStructureType             sType;\n    void*                       pNext;\n    uint32_t                    priorityCount;\n    VkQueueGlobalPriorityKHR    priorities[VK_MAX_GLOBAL_PRIORITY_SIZE_KHR];\n} VkQueueFamilyGlobalPriorityPropertiesKHR;\n\n\n\n#define VK_KHR_driver_properties 1\n#define VK_KHR_DRIVER_PROPERTIES_SPEC_VERSION 1\n#define VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME \"VK_KHR_driver_properties\"\n#define VK_MAX_DRIVER_NAME_SIZE_KHR       VK_MAX_DRIVER_NAME_SIZE\n#define VK_MAX_DRIVER_INFO_SIZE_KHR       VK_MAX_DRIVER_INFO_SIZE\ntypedef VkDriverId VkDriverIdKHR;\n\ntypedef VkConformanceVersion VkConformanceVersionKHR;\n\ntypedef VkPhysicalDeviceDriverProperties VkPhysicalDeviceDriverPropertiesKHR;\n\n\n\n#define VK_KHR_shader_float_controls 1\n#define VK_KHR_SHADER_FLOAT_CONTROLS_SPEC_VERSION 4\n#define VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME \"VK_KHR_shader_float_controls\"\ntypedef VkShaderFloatControlsIndependence VkShaderFloatControlsIndependenceKHR;\n\ntypedef VkPhysicalDeviceFloatControlsProperties VkPhysicalDeviceFloatControlsPropertiesKHR;\n\n\n\n#define VK_KHR_depth_stencil_resolve 1\n#define VK_KHR_DEPTH_STENCIL_RESOLVE_SPEC_VERSION 1\n#define VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME \"VK_KHR_depth_stencil_resolve\"\ntypedef VkResolveModeFlagBits VkResolveModeFlagBitsKHR;\n\ntypedef VkResolveModeFlags VkResolveModeFlagsKHR;\n\ntypedef VkSubpassDescriptionDepthStencilResolve VkSubpassDescriptionDepthStencilResolveKHR;\n\ntypedef VkPhysicalDeviceDepthStencilResolveProperties VkPhysicalDeviceDepthStencilResolvePropertiesKHR;\n\n\n\n#define VK_KHR_swapchain_mutable_format 1\n#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_SPEC_VERSION 1\n#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME \"VK_KHR_swapchain_mutable_format\"\n\n\n#define VK_KHR_timeline_semaphore 1\n#define VK_KHR_TIMELINE_SEMAPHORE_SPEC_VERSION 2\n#define VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME \"VK_KHR_timeline_semaphore\"\ntypedef VkSemaphoreType VkSemaphoreTypeKHR;\n\ntypedef VkSemaphoreWaitFlagBits VkSemaphoreWaitFlagBitsKHR;\n\ntypedef VkSemaphoreWaitFlags VkSemaphoreWaitFlagsKHR;\n\ntypedef VkPhysicalDeviceTimelineSemaphoreFeatures VkPhysicalDeviceTimelineSemaphoreFeaturesKHR;\n\ntypedef VkPhysicalDeviceTimelineSemaphoreProperties VkPhysicalDeviceTimelineSemaphorePropertiesKHR;\n\ntypedef VkSemaphoreTypeCreateInfo VkSemaphoreTypeCreateInfoKHR;\n\ntypedef VkTimelineSemaphoreSubmitInfo VkTimelineSemaphoreSubmitInfoKHR;\n\ntypedef VkSemaphoreWaitInfo VkSemaphoreWaitInfoKHR;\n\ntypedef VkSemaphoreSignalInfo VkSemaphoreSignalInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValueKHR)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue);\ntypedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphoresKHR)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout);\ntypedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphoreKHR)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValueKHR(\n    VkDevice                                    device,\n    VkSemaphore                                 semaphore,\n    uint64_t*                                   pValue);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphoresKHR(\n    VkDevice                                    device,\n    const VkSemaphoreWaitInfo*                  pWaitInfo,\n    uint64_t                                    timeout);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphoreKHR(\n    VkDevice                                    device,\n    const VkSemaphoreSignalInfo*                pSignalInfo);\n#endif\n\n\n#define VK_KHR_vulkan_memory_model 1\n#define VK_KHR_VULKAN_MEMORY_MODEL_SPEC_VERSION 3\n#define VK_KHR_VULKAN_MEMORY_MODEL_EXTENSION_NAME \"VK_KHR_vulkan_memory_model\"\ntypedef VkPhysicalDeviceVulkanMemoryModelFeatures VkPhysicalDeviceVulkanMemoryModelFeaturesKHR;\n\n\n\n#define VK_KHR_shader_terminate_invocation 1\n#define VK_KHR_SHADER_TERMINATE_INVOCATION_SPEC_VERSION 1\n#define VK_KHR_SHADER_TERMINATE_INVOCATION_EXTENSION_NAME \"VK_KHR_shader_terminate_invocation\"\ntypedef VkPhysicalDeviceShaderTerminateInvocationFeatures VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR;\n\n\n\n#define VK_KHR_fragment_shading_rate 1\n#define VK_KHR_FRAGMENT_SHADING_RATE_SPEC_VERSION 2\n#define VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME \"VK_KHR_fragment_shading_rate\"\n\ntypedef enum VkFragmentShadingRateCombinerOpKHR {\n    VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR = 0,\n    VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR = 1,\n    VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR = 2,\n    VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR = 3,\n    VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR = 4,\n    VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkFragmentShadingRateCombinerOpKHR;\ntypedef struct VkFragmentShadingRateAttachmentInfoKHR {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    const VkAttachmentReference2*    pFragmentShadingRateAttachment;\n    VkExtent2D                       shadingRateAttachmentTexelSize;\n} VkFragmentShadingRateAttachmentInfoKHR;\n\ntypedef struct VkPipelineFragmentShadingRateStateCreateInfoKHR {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkExtent2D                            fragmentSize;\n    VkFragmentShadingRateCombinerOpKHR    combinerOps[2];\n} VkPipelineFragmentShadingRateStateCreateInfoKHR;\n\ntypedef struct VkPhysicalDeviceFragmentShadingRateFeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           pipelineFragmentShadingRate;\n    VkBool32           primitiveFragmentShadingRate;\n    VkBool32           attachmentFragmentShadingRate;\n} VkPhysicalDeviceFragmentShadingRateFeaturesKHR;\n\ntypedef struct VkPhysicalDeviceFragmentShadingRatePropertiesKHR {\n    VkStructureType          sType;\n    void*                    pNext;\n    VkExtent2D               minFragmentShadingRateAttachmentTexelSize;\n    VkExtent2D               maxFragmentShadingRateAttachmentTexelSize;\n    uint32_t                 maxFragmentShadingRateAttachmentTexelSizeAspectRatio;\n    VkBool32                 primitiveFragmentShadingRateWithMultipleViewports;\n    VkBool32                 layeredShadingRateAttachments;\n    VkBool32                 fragmentShadingRateNonTrivialCombinerOps;\n    VkExtent2D               maxFragmentSize;\n    uint32_t                 maxFragmentSizeAspectRatio;\n    uint32_t                 maxFragmentShadingRateCoverageSamples;\n    VkSampleCountFlagBits    maxFragmentShadingRateRasterizationSamples;\n    VkBool32                 fragmentShadingRateWithShaderDepthStencilWrites;\n    VkBool32                 fragmentShadingRateWithSampleMask;\n    VkBool32                 fragmentShadingRateWithShaderSampleMask;\n    VkBool32                 fragmentShadingRateWithConservativeRasterization;\n    VkBool32                 fragmentShadingRateWithFragmentShaderInterlock;\n    VkBool32                 fragmentShadingRateWithCustomSampleLocations;\n    VkBool32                 fragmentShadingRateStrictMultiplyCombiner;\n} VkPhysicalDeviceFragmentShadingRatePropertiesKHR;\n\ntypedef struct VkPhysicalDeviceFragmentShadingRateKHR {\n    VkStructureType       sType;\n    void*                 pNext;\n    VkSampleCountFlags    sampleCounts;\n    VkExtent2D            fragmentSize;\n} VkPhysicalDeviceFragmentShadingRateKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pFragmentShadingRateCount, VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetFragmentShadingRateKHR)(VkCommandBuffer           commandBuffer, const VkExtent2D*                           pFragmentSize, const VkFragmentShadingRateCombinerOpKHR    combinerOps[2]);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceFragmentShadingRatesKHR(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pFragmentShadingRateCount,\n    VkPhysicalDeviceFragmentShadingRateKHR*     pFragmentShadingRates);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetFragmentShadingRateKHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkExtent2D*                           pFragmentSize,\n    const VkFragmentShadingRateCombinerOpKHR    combinerOps[2]);\n#endif\n\n\n#define VK_KHR_spirv_1_4 1\n#define VK_KHR_SPIRV_1_4_SPEC_VERSION     1\n#define VK_KHR_SPIRV_1_4_EXTENSION_NAME   \"VK_KHR_spirv_1_4\"\n\n\n#define VK_KHR_surface_protected_capabilities 1\n#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_SPEC_VERSION 1\n#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_EXTENSION_NAME \"VK_KHR_surface_protected_capabilities\"\ntypedef struct VkSurfaceProtectedCapabilitiesKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBool32           supportsProtected;\n} VkSurfaceProtectedCapabilitiesKHR;\n\n\n\n#define VK_KHR_separate_depth_stencil_layouts 1\n#define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_SPEC_VERSION 1\n#define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_EXTENSION_NAME \"VK_KHR_separate_depth_stencil_layouts\"\ntypedef VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR;\n\ntypedef VkAttachmentReferenceStencilLayout VkAttachmentReferenceStencilLayoutKHR;\n\ntypedef VkAttachmentDescriptionStencilLayout VkAttachmentDescriptionStencilLayoutKHR;\n\n\n\n#define VK_KHR_present_wait 1\n#define VK_KHR_PRESENT_WAIT_SPEC_VERSION  1\n#define VK_KHR_PRESENT_WAIT_EXTENSION_NAME \"VK_KHR_present_wait\"\ntypedef struct VkPhysicalDevicePresentWaitFeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           presentWait;\n} VkPhysicalDevicePresentWaitFeaturesKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkWaitForPresentKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t presentId, uint64_t timeout);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkWaitForPresentKHR(\n    VkDevice                                    device,\n    VkSwapchainKHR                              swapchain,\n    uint64_t                                    presentId,\n    uint64_t                                    timeout);\n#endif\n\n\n#define VK_KHR_uniform_buffer_standard_layout 1\n#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_SPEC_VERSION 1\n#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME \"VK_KHR_uniform_buffer_standard_layout\"\ntypedef VkPhysicalDeviceUniformBufferStandardLayoutFeatures VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR;\n\n\n\n#define VK_KHR_buffer_device_address 1\n#define VK_KHR_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 1\n#define VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME \"VK_KHR_buffer_device_address\"\ntypedef VkPhysicalDeviceBufferDeviceAddressFeatures VkPhysicalDeviceBufferDeviceAddressFeaturesKHR;\n\ntypedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoKHR;\n\ntypedef VkBufferOpaqueCaptureAddressCreateInfo VkBufferOpaqueCaptureAddressCreateInfoKHR;\n\ntypedef VkMemoryOpaqueCaptureAddressAllocateInfo VkMemoryOpaqueCaptureAddressAllocateInfoKHR;\n\ntypedef VkDeviceMemoryOpaqueCaptureAddressInfo VkDeviceMemoryOpaqueCaptureAddressInfoKHR;\n\ntypedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo);\ntypedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo);\ntypedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressKHR(\n    VkDevice                                    device,\n    const VkBufferDeviceAddressInfo*            pInfo);\n\nVKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddressKHR(\n    VkDevice                                    device,\n    const VkBufferDeviceAddressInfo*            pInfo);\n\nVKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddressKHR(\n    VkDevice                                    device,\n    const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo);\n#endif\n\n\n#define VK_KHR_deferred_host_operations 1\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeferredOperationKHR)\n#define VK_KHR_DEFERRED_HOST_OPERATIONS_SPEC_VERSION 4\n#define VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME \"VK_KHR_deferred_host_operations\"\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateDeferredOperationKHR)(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation);\ntypedef void (VKAPI_PTR *PFN_vkDestroyDeferredOperationKHR)(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator);\ntypedef uint32_t (VKAPI_PTR *PFN_vkGetDeferredOperationMaxConcurrencyKHR)(VkDevice device, VkDeferredOperationKHR operation);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetDeferredOperationResultKHR)(VkDevice device, VkDeferredOperationKHR operation);\ntypedef VkResult (VKAPI_PTR *PFN_vkDeferredOperationJoinKHR)(VkDevice device, VkDeferredOperationKHR operation);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateDeferredOperationKHR(\n    VkDevice                                    device,\n    const VkAllocationCallbacks*                pAllocator,\n    VkDeferredOperationKHR*                     pDeferredOperation);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyDeferredOperationKHR(\n    VkDevice                                    device,\n    VkDeferredOperationKHR                      operation,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR uint32_t VKAPI_CALL vkGetDeferredOperationMaxConcurrencyKHR(\n    VkDevice                                    device,\n    VkDeferredOperationKHR                      operation);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetDeferredOperationResultKHR(\n    VkDevice                                    device,\n    VkDeferredOperationKHR                      operation);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkDeferredOperationJoinKHR(\n    VkDevice                                    device,\n    VkDeferredOperationKHR                      operation);\n#endif\n\n\n#define VK_KHR_pipeline_executable_properties 1\n#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_SPEC_VERSION 1\n#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME \"VK_KHR_pipeline_executable_properties\"\n\ntypedef enum VkPipelineExecutableStatisticFormatKHR {\n    VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR = 0,\n    VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_INT64_KHR = 1,\n    VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR = 2,\n    VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR = 3,\n    VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkPipelineExecutableStatisticFormatKHR;\ntypedef struct VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           pipelineExecutableInfo;\n} VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR;\n\ntypedef struct VkPipelineInfoKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkPipeline         pipeline;\n} VkPipelineInfoKHR;\n\ntypedef struct VkPipelineExecutablePropertiesKHR {\n    VkStructureType       sType;\n    void*                 pNext;\n    VkShaderStageFlags    stages;\n    char                  name[VK_MAX_DESCRIPTION_SIZE];\n    char                  description[VK_MAX_DESCRIPTION_SIZE];\n    uint32_t              subgroupSize;\n} VkPipelineExecutablePropertiesKHR;\n\ntypedef struct VkPipelineExecutableInfoKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkPipeline         pipeline;\n    uint32_t           executableIndex;\n} VkPipelineExecutableInfoKHR;\n\ntypedef union VkPipelineExecutableStatisticValueKHR {\n    VkBool32    b32;\n    int64_t     i64;\n    uint64_t    u64;\n    double      f64;\n} VkPipelineExecutableStatisticValueKHR;\n\ntypedef struct VkPipelineExecutableStatisticKHR {\n    VkStructureType                           sType;\n    void*                                     pNext;\n    char                                      name[VK_MAX_DESCRIPTION_SIZE];\n    char                                      description[VK_MAX_DESCRIPTION_SIZE];\n    VkPipelineExecutableStatisticFormatKHR    format;\n    VkPipelineExecutableStatisticValueKHR     value;\n} VkPipelineExecutableStatisticKHR;\n\ntypedef struct VkPipelineExecutableInternalRepresentationKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    char               name[VK_MAX_DESCRIPTION_SIZE];\n    char               description[VK_MAX_DESCRIPTION_SIZE];\n    VkBool32           isText;\n    size_t             dataSize;\n    void*              pData;\n} VkPipelineExecutableInternalRepresentationKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutablePropertiesKHR)(VkDevice                        device, const VkPipelineInfoKHR*        pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableStatisticsKHR)(VkDevice                        device, const VkPipelineExecutableInfoKHR*  pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableInternalRepresentationsKHR)(VkDevice                        device, const VkPipelineExecutableInfoKHR*  pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutablePropertiesKHR(\n    VkDevice                                    device,\n    const VkPipelineInfoKHR*                    pPipelineInfo,\n    uint32_t*                                   pExecutableCount,\n    VkPipelineExecutablePropertiesKHR*          pProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableStatisticsKHR(\n    VkDevice                                    device,\n    const VkPipelineExecutableInfoKHR*          pExecutableInfo,\n    uint32_t*                                   pStatisticCount,\n    VkPipelineExecutableStatisticKHR*           pStatistics);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableInternalRepresentationsKHR(\n    VkDevice                                    device,\n    const VkPipelineExecutableInfoKHR*          pExecutableInfo,\n    uint32_t*                                   pInternalRepresentationCount,\n    VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations);\n#endif\n\n\n#define VK_KHR_shader_integer_dot_product 1\n#define VK_KHR_SHADER_INTEGER_DOT_PRODUCT_SPEC_VERSION 1\n#define VK_KHR_SHADER_INTEGER_DOT_PRODUCT_EXTENSION_NAME \"VK_KHR_shader_integer_dot_product\"\ntypedef VkPhysicalDeviceShaderIntegerDotProductFeatures VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR;\n\ntypedef VkPhysicalDeviceShaderIntegerDotProductProperties VkPhysicalDeviceShaderIntegerDotProductPropertiesKHR;\n\n\n\n#define VK_KHR_pipeline_library 1\n#define VK_KHR_PIPELINE_LIBRARY_SPEC_VERSION 1\n#define VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME \"VK_KHR_pipeline_library\"\ntypedef struct VkPipelineLibraryCreateInfoKHR {\n    VkStructureType      sType;\n    const void*          pNext;\n    uint32_t             libraryCount;\n    const VkPipeline*    pLibraries;\n} VkPipelineLibraryCreateInfoKHR;\n\n\n\n#define VK_KHR_shader_non_semantic_info 1\n#define VK_KHR_SHADER_NON_SEMANTIC_INFO_SPEC_VERSION 1\n#define VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME \"VK_KHR_shader_non_semantic_info\"\n\n\n#define VK_KHR_present_id 1\n#define VK_KHR_PRESENT_ID_SPEC_VERSION    1\n#define VK_KHR_PRESENT_ID_EXTENSION_NAME  \"VK_KHR_present_id\"\ntypedef struct VkPresentIdKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           swapchainCount;\n    const uint64_t*    pPresentIds;\n} VkPresentIdKHR;\n\ntypedef struct VkPhysicalDevicePresentIdFeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           presentId;\n} VkPhysicalDevicePresentIdFeaturesKHR;\n\n\n\n#define VK_KHR_synchronization2 1\n#define VK_KHR_SYNCHRONIZATION_2_SPEC_VERSION 1\n#define VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME \"VK_KHR_synchronization2\"\ntypedef VkPipelineStageFlags2 VkPipelineStageFlags2KHR;\n\ntypedef VkPipelineStageFlagBits2 VkPipelineStageFlagBits2KHR;\n\ntypedef VkAccessFlags2 VkAccessFlags2KHR;\n\ntypedef VkAccessFlagBits2 VkAccessFlagBits2KHR;\n\ntypedef VkSubmitFlagBits VkSubmitFlagBitsKHR;\n\ntypedef VkSubmitFlags VkSubmitFlagsKHR;\n\ntypedef VkMemoryBarrier2 VkMemoryBarrier2KHR;\n\ntypedef VkBufferMemoryBarrier2 VkBufferMemoryBarrier2KHR;\n\ntypedef VkImageMemoryBarrier2 VkImageMemoryBarrier2KHR;\n\ntypedef VkDependencyInfo VkDependencyInfoKHR;\n\ntypedef VkSubmitInfo2 VkSubmitInfo2KHR;\n\ntypedef VkSemaphoreSubmitInfo VkSemaphoreSubmitInfoKHR;\n\ntypedef VkCommandBufferSubmitInfo VkCommandBufferSubmitInfoKHR;\n\ntypedef VkPhysicalDeviceSynchronization2Features VkPhysicalDeviceSynchronization2FeaturesKHR;\n\ntypedef struct VkQueueFamilyCheckpointProperties2NV {\n    VkStructureType          sType;\n    void*                    pNext;\n    VkPipelineStageFlags2    checkpointExecutionStageMask;\n} VkQueueFamilyCheckpointProperties2NV;\n\ntypedef struct VkCheckpointData2NV {\n    VkStructureType          sType;\n    void*                    pNext;\n    VkPipelineStageFlags2    stage;\n    void*                    pCheckpointMarker;\n} VkCheckpointData2NV;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdSetEvent2KHR)(VkCommandBuffer                   commandBuffer, VkEvent                                             event, const VkDependencyInfo*                             pDependencyInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdResetEvent2KHR)(VkCommandBuffer                   commandBuffer, VkEvent                                             event, VkPipelineStageFlags2               stageMask);\ntypedef void (VKAPI_PTR *PFN_vkCmdWaitEvents2KHR)(VkCommandBuffer                   commandBuffer, uint32_t                                            eventCount, const VkEvent*                     pEvents, const VkDependencyInfo*            pDependencyInfos);\ntypedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier2KHR)(VkCommandBuffer                   commandBuffer, const VkDependencyInfo*                             pDependencyInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp2KHR)(VkCommandBuffer                   commandBuffer, VkPipelineStageFlags2               stage, VkQueryPool                                         queryPool, uint32_t                                            query);\ntypedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit2KHR)(VkQueue                           queue, uint32_t                            submitCount, const VkSubmitInfo2*              pSubmits, VkFence           fence);\ntypedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarker2AMD)(VkCommandBuffer                   commandBuffer, VkPipelineStageFlags2               stage, VkBuffer                                            dstBuffer, VkDeviceSize                                        dstOffset, uint32_t                                            marker);\ntypedef void (VKAPI_PTR *PFN_vkGetQueueCheckpointData2NV)(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointData2NV* pCheckpointData);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdSetEvent2KHR(\n    VkCommandBuffer                             commandBuffer,\n    VkEvent                                     event,\n    const VkDependencyInfo*                     pDependencyInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdResetEvent2KHR(\n    VkCommandBuffer                             commandBuffer,\n    VkEvent                                     event,\n    VkPipelineStageFlags2                       stageMask);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents2KHR(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    eventCount,\n    const VkEvent*                              pEvents,\n    const VkDependencyInfo*                     pDependencyInfos);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier2KHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkDependencyInfo*                     pDependencyInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp2KHR(\n    VkCommandBuffer                             commandBuffer,\n    VkPipelineStageFlags2                       stage,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    query);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit2KHR(\n    VkQueue                                     queue,\n    uint32_t                                    submitCount,\n    const VkSubmitInfo2*                        pSubmits,\n    VkFence                                     fence);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarker2AMD(\n    VkCommandBuffer                             commandBuffer,\n    VkPipelineStageFlags2                       stage,\n    VkBuffer                                    dstBuffer,\n    VkDeviceSize                                dstOffset,\n    uint32_t                                    marker);\n\nVKAPI_ATTR void VKAPI_CALL vkGetQueueCheckpointData2NV(\n    VkQueue                                     queue,\n    uint32_t*                                   pCheckpointDataCount,\n    VkCheckpointData2NV*                        pCheckpointData);\n#endif\n\n\n#define VK_KHR_fragment_shader_barycentric 1\n#define VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION 1\n#define VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME \"VK_KHR_fragment_shader_barycentric\"\ntypedef struct VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           fragmentShaderBarycentric;\n} VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR;\n\ntypedef struct VkPhysicalDeviceFragmentShaderBarycentricPropertiesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           triStripVertexOrderIndependentOfProvokingVertex;\n} VkPhysicalDeviceFragmentShaderBarycentricPropertiesKHR;\n\n\n\n#define VK_KHR_shader_subgroup_uniform_control_flow 1\n#define VK_KHR_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_SPEC_VERSION 1\n#define VK_KHR_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_EXTENSION_NAME \"VK_KHR_shader_subgroup_uniform_control_flow\"\ntypedef struct VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderSubgroupUniformControlFlow;\n} VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR;\n\n\n\n#define VK_KHR_zero_initialize_workgroup_memory 1\n#define VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_SPEC_VERSION 1\n#define VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_EXTENSION_NAME \"VK_KHR_zero_initialize_workgroup_memory\"\ntypedef VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR;\n\n\n\n#define VK_KHR_workgroup_memory_explicit_layout 1\n#define VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_SPEC_VERSION 1\n#define VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_EXTENSION_NAME \"VK_KHR_workgroup_memory_explicit_layout\"\ntypedef struct VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           workgroupMemoryExplicitLayout;\n    VkBool32           workgroupMemoryExplicitLayoutScalarBlockLayout;\n    VkBool32           workgroupMemoryExplicitLayout8BitAccess;\n    VkBool32           workgroupMemoryExplicitLayout16BitAccess;\n} VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR;\n\n\n\n#define VK_KHR_copy_commands2 1\n#define VK_KHR_COPY_COMMANDS_2_SPEC_VERSION 1\n#define VK_KHR_COPY_COMMANDS_2_EXTENSION_NAME \"VK_KHR_copy_commands2\"\ntypedef VkCopyBufferInfo2 VkCopyBufferInfo2KHR;\n\ntypedef VkCopyImageInfo2 VkCopyImageInfo2KHR;\n\ntypedef VkCopyBufferToImageInfo2 VkCopyBufferToImageInfo2KHR;\n\ntypedef VkCopyImageToBufferInfo2 VkCopyImageToBufferInfo2KHR;\n\ntypedef VkBlitImageInfo2 VkBlitImageInfo2KHR;\n\ntypedef VkResolveImageInfo2 VkResolveImageInfo2KHR;\n\ntypedef VkBufferCopy2 VkBufferCopy2KHR;\n\ntypedef VkImageCopy2 VkImageCopy2KHR;\n\ntypedef VkImageBlit2 VkImageBlit2KHR;\n\ntypedef VkBufferImageCopy2 VkBufferImageCopy2KHR;\n\ntypedef VkImageResolve2 VkImageResolve2KHR;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2* pCopyBufferInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageInfo2* pCopyImageInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdBlitImage2KHR)(VkCommandBuffer commandBuffer, const VkBlitImageInfo2* pBlitImageInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdResolveImage2KHR)(VkCommandBuffer commandBuffer, const VkResolveImageInfo2* pResolveImageInfo);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer2KHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkCopyBufferInfo2*                    pCopyBufferInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyImage2KHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkCopyImageInfo2*                     pCopyImageInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage2KHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkCopyBufferToImageInfo2*             pCopyBufferToImageInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer2KHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkCopyImageToBufferInfo2*             pCopyImageToBufferInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBlitImage2KHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkBlitImageInfo2*                     pBlitImageInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdResolveImage2KHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkResolveImageInfo2*                  pResolveImageInfo);\n#endif\n\n\n#define VK_KHR_format_feature_flags2 1\n#define VK_KHR_FORMAT_FEATURE_FLAGS_2_SPEC_VERSION 1\n#define VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME \"VK_KHR_format_feature_flags2\"\ntypedef VkFormatFeatureFlags2 VkFormatFeatureFlags2KHR;\n\ntypedef VkFormatFeatureFlagBits2 VkFormatFeatureFlagBits2KHR;\n\ntypedef VkFormatProperties3 VkFormatProperties3KHR;\n\n\n\n#define VK_KHR_ray_tracing_maintenance1 1\n#define VK_KHR_RAY_TRACING_MAINTENANCE_1_SPEC_VERSION 1\n#define VK_KHR_RAY_TRACING_MAINTENANCE_1_EXTENSION_NAME \"VK_KHR_ray_tracing_maintenance1\"\ntypedef struct VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           rayTracingMaintenance1;\n    VkBool32           rayTracingPipelineTraceRaysIndirect2;\n} VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR;\n\ntypedef struct VkTraceRaysIndirectCommand2KHR {\n    VkDeviceAddress    raygenShaderRecordAddress;\n    VkDeviceSize       raygenShaderRecordSize;\n    VkDeviceAddress    missShaderBindingTableAddress;\n    VkDeviceSize       missShaderBindingTableSize;\n    VkDeviceSize       missShaderBindingTableStride;\n    VkDeviceAddress    hitShaderBindingTableAddress;\n    VkDeviceSize       hitShaderBindingTableSize;\n    VkDeviceSize       hitShaderBindingTableStride;\n    VkDeviceAddress    callableShaderBindingTableAddress;\n    VkDeviceSize       callableShaderBindingTableSize;\n    VkDeviceSize       callableShaderBindingTableStride;\n    uint32_t           width;\n    uint32_t           height;\n    uint32_t           depth;\n} VkTraceRaysIndirectCommand2KHR;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirect2KHR)(VkCommandBuffer commandBuffer, VkDeviceAddress indirectDeviceAddress);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirect2KHR(\n    VkCommandBuffer                             commandBuffer,\n    VkDeviceAddress                             indirectDeviceAddress);\n#endif\n\n\n#define VK_KHR_portability_enumeration 1\n#define VK_KHR_PORTABILITY_ENUMERATION_SPEC_VERSION 1\n#define VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME \"VK_KHR_portability_enumeration\"\n\n\n#define VK_KHR_maintenance4 1\n#define VK_KHR_MAINTENANCE_4_SPEC_VERSION 2\n#define VK_KHR_MAINTENANCE_4_EXTENSION_NAME \"VK_KHR_maintenance4\"\ntypedef VkPhysicalDeviceMaintenance4Features VkPhysicalDeviceMaintenance4FeaturesKHR;\n\ntypedef VkPhysicalDeviceMaintenance4Properties VkPhysicalDeviceMaintenance4PropertiesKHR;\n\ntypedef VkDeviceBufferMemoryRequirements VkDeviceBufferMemoryRequirementsKHR;\n\ntypedef VkDeviceImageMemoryRequirements VkDeviceImageMemoryRequirementsKHR;\n\ntypedef void (VKAPI_PTR *PFN_vkGetDeviceBufferMemoryRequirementsKHR)(VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements);\ntypedef void (VKAPI_PTR *PFN_vkGetDeviceImageMemoryRequirementsKHR)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements);\ntypedef void (VKAPI_PTR *PFN_vkGetDeviceImageSparseMemoryRequirementsKHR)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkGetDeviceBufferMemoryRequirementsKHR(\n    VkDevice                                    device,\n    const VkDeviceBufferMemoryRequirements*     pInfo,\n    VkMemoryRequirements2*                      pMemoryRequirements);\n\nVKAPI_ATTR void VKAPI_CALL vkGetDeviceImageMemoryRequirementsKHR(\n    VkDevice                                    device,\n    const VkDeviceImageMemoryRequirements*      pInfo,\n    VkMemoryRequirements2*                      pMemoryRequirements);\n\nVKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSparseMemoryRequirementsKHR(\n    VkDevice                                    device,\n    const VkDeviceImageMemoryRequirements*      pInfo,\n    uint32_t*                                   pSparseMemoryRequirementCount,\n    VkSparseImageMemoryRequirements2*           pSparseMemoryRequirements);\n#endif\n\n\n#define VK_EXT_debug_report 1\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT)\n#define VK_EXT_DEBUG_REPORT_SPEC_VERSION  10\n#define VK_EXT_DEBUG_REPORT_EXTENSION_NAME \"VK_EXT_debug_report\"\n\ntypedef enum VkDebugReportObjectTypeEXT {\n    VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0,\n    VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1,\n    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2,\n    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3,\n    VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4,\n    VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5,\n    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6,\n    VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7,\n    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8,\n    VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9,\n    VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10,\n    VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11,\n    VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12,\n    VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13,\n    VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14,\n    VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15,\n    VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16,\n    VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17,\n    VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18,\n    VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19,\n    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20,\n    VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21,\n    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22,\n    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23,\n    VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24,\n    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25,\n    VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26,\n    VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27,\n    VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT = 28,\n    VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29,\n    VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30,\n    VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33,\n    VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000,\n    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000,\n    VK_DEBUG_REPORT_OBJECT_TYPE_CU_MODULE_NVX_EXT = 1000029000,\n    VK_DEBUG_REPORT_OBJECT_TYPE_CU_FUNCTION_NVX_EXT = 1000029001,\n    VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT = 1000150000,\n    VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000,\n    VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA_EXT = 1000366000,\n    VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT,\n    VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT,\n    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT,\n    VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT,\n    VK_DEBUG_REPORT_OBJECT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkDebugReportObjectTypeEXT;\n\ntypedef enum VkDebugReportFlagBitsEXT {\n    VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001,\n    VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002,\n    VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004,\n    VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008,\n    VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010,\n    VK_DEBUG_REPORT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkDebugReportFlagBitsEXT;\ntypedef VkFlags VkDebugReportFlagsEXT;\ntypedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)(\n    VkDebugReportFlagsEXT                       flags,\n    VkDebugReportObjectTypeEXT                  objectType,\n    uint64_t                                    object,\n    size_t                                      location,\n    int32_t                                     messageCode,\n    const char*                                 pLayerPrefix,\n    const char*                                 pMessage,\n    void*                                       pUserData);\n\ntypedef struct VkDebugReportCallbackCreateInfoEXT {\n    VkStructureType                 sType;\n    const void*                     pNext;\n    VkDebugReportFlagsEXT           flags;\n    PFN_vkDebugReportCallbackEXT    pfnCallback;\n    void*                           pUserData;\n} VkDebugReportCallbackCreateInfoEXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback);\ntypedef void (VKAPI_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator);\ntypedef void (VKAPI_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(\n    VkInstance                                  instance,\n    const VkDebugReportCallbackCreateInfoEXT*   pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkDebugReportCallbackEXT*                   pCallback);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(\n    VkInstance                                  instance,\n    VkDebugReportCallbackEXT                    callback,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(\n    VkInstance                                  instance,\n    VkDebugReportFlagsEXT                       flags,\n    VkDebugReportObjectTypeEXT                  objectType,\n    uint64_t                                    object,\n    size_t                                      location,\n    int32_t                                     messageCode,\n    const char*                                 pLayerPrefix,\n    const char*                                 pMessage);\n#endif\n\n\n#define VK_NV_glsl_shader 1\n#define VK_NV_GLSL_SHADER_SPEC_VERSION    1\n#define VK_NV_GLSL_SHADER_EXTENSION_NAME  \"VK_NV_glsl_shader\"\n\n\n#define VK_EXT_depth_range_unrestricted 1\n#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_SPEC_VERSION 1\n#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME \"VK_EXT_depth_range_unrestricted\"\n\n\n#define VK_IMG_filter_cubic 1\n#define VK_IMG_FILTER_CUBIC_SPEC_VERSION  1\n#define VK_IMG_FILTER_CUBIC_EXTENSION_NAME \"VK_IMG_filter_cubic\"\n\n\n#define VK_AMD_rasterization_order 1\n#define VK_AMD_RASTERIZATION_ORDER_SPEC_VERSION 1\n#define VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME \"VK_AMD_rasterization_order\"\n\ntypedef enum VkRasterizationOrderAMD {\n    VK_RASTERIZATION_ORDER_STRICT_AMD = 0,\n    VK_RASTERIZATION_ORDER_RELAXED_AMD = 1,\n    VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFFFFFF\n} VkRasterizationOrderAMD;\ntypedef struct VkPipelineRasterizationStateRasterizationOrderAMD {\n    VkStructureType            sType;\n    const void*                pNext;\n    VkRasterizationOrderAMD    rasterizationOrder;\n} VkPipelineRasterizationStateRasterizationOrderAMD;\n\n\n\n#define VK_AMD_shader_trinary_minmax 1\n#define VK_AMD_SHADER_TRINARY_MINMAX_SPEC_VERSION 1\n#define VK_AMD_SHADER_TRINARY_MINMAX_EXTENSION_NAME \"VK_AMD_shader_trinary_minmax\"\n\n\n#define VK_AMD_shader_explicit_vertex_parameter 1\n#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_SPEC_VERSION 1\n#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_EXTENSION_NAME \"VK_AMD_shader_explicit_vertex_parameter\"\n\n\n#define VK_EXT_debug_marker 1\n#define VK_EXT_DEBUG_MARKER_SPEC_VERSION  4\n#define VK_EXT_DEBUG_MARKER_EXTENSION_NAME \"VK_EXT_debug_marker\"\ntypedef struct VkDebugMarkerObjectNameInfoEXT {\n    VkStructureType               sType;\n    const void*                   pNext;\n    VkDebugReportObjectTypeEXT    objectType;\n    uint64_t                      object;\n    const char*                   pObjectName;\n} VkDebugMarkerObjectNameInfoEXT;\n\ntypedef struct VkDebugMarkerObjectTagInfoEXT {\n    VkStructureType               sType;\n    const void*                   pNext;\n    VkDebugReportObjectTypeEXT    objectType;\n    uint64_t                      object;\n    uint64_t                      tagName;\n    size_t                        tagSize;\n    const void*                   pTag;\n} VkDebugMarkerObjectTagInfoEXT;\n\ntypedef struct VkDebugMarkerMarkerInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    const char*        pMarkerName;\n    float              color[4];\n} VkDebugMarkerMarkerInfoEXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectTagEXT)(VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectNameEXT)(VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerBeginEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerEndEXT)(VkCommandBuffer commandBuffer);\ntypedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerInsertEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectTagEXT(\n    VkDevice                                    device,\n    const VkDebugMarkerObjectTagInfoEXT*        pTagInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectNameEXT(\n    VkDevice                                    device,\n    const VkDebugMarkerObjectNameInfoEXT*       pNameInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerBeginEXT(\n    VkCommandBuffer                             commandBuffer,\n    const VkDebugMarkerMarkerInfoEXT*           pMarkerInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerEndEXT(\n    VkCommandBuffer                             commandBuffer);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerInsertEXT(\n    VkCommandBuffer                             commandBuffer,\n    const VkDebugMarkerMarkerInfoEXT*           pMarkerInfo);\n#endif\n\n\n#define VK_AMD_gcn_shader 1\n#define VK_AMD_GCN_SHADER_SPEC_VERSION    1\n#define VK_AMD_GCN_SHADER_EXTENSION_NAME  \"VK_AMD_gcn_shader\"\n\n\n#define VK_NV_dedicated_allocation 1\n#define VK_NV_DEDICATED_ALLOCATION_SPEC_VERSION 1\n#define VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME \"VK_NV_dedicated_allocation\"\ntypedef struct VkDedicatedAllocationImageCreateInfoNV {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBool32           dedicatedAllocation;\n} VkDedicatedAllocationImageCreateInfoNV;\n\ntypedef struct VkDedicatedAllocationBufferCreateInfoNV {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBool32           dedicatedAllocation;\n} VkDedicatedAllocationBufferCreateInfoNV;\n\ntypedef struct VkDedicatedAllocationMemoryAllocateInfoNV {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkImage            image;\n    VkBuffer           buffer;\n} VkDedicatedAllocationMemoryAllocateInfoNV;\n\n\n\n#define VK_EXT_transform_feedback 1\n#define VK_EXT_TRANSFORM_FEEDBACK_SPEC_VERSION 1\n#define VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME \"VK_EXT_transform_feedback\"\ntypedef VkFlags VkPipelineRasterizationStateStreamCreateFlagsEXT;\ntypedef struct VkPhysicalDeviceTransformFeedbackFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           transformFeedback;\n    VkBool32           geometryStreams;\n} VkPhysicalDeviceTransformFeedbackFeaturesEXT;\n\ntypedef struct VkPhysicalDeviceTransformFeedbackPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxTransformFeedbackStreams;\n    uint32_t           maxTransformFeedbackBuffers;\n    VkDeviceSize       maxTransformFeedbackBufferSize;\n    uint32_t           maxTransformFeedbackStreamDataSize;\n    uint32_t           maxTransformFeedbackBufferDataSize;\n    uint32_t           maxTransformFeedbackBufferDataStride;\n    VkBool32           transformFeedbackQueries;\n    VkBool32           transformFeedbackStreamsLinesTriangles;\n    VkBool32           transformFeedbackRasterizationStreamSelect;\n    VkBool32           transformFeedbackDraw;\n} VkPhysicalDeviceTransformFeedbackPropertiesEXT;\n\ntypedef struct VkPipelineRasterizationStateStreamCreateInfoEXT {\n    VkStructureType                                     sType;\n    const void*                                         pNext;\n    VkPipelineRasterizationStateStreamCreateFlagsEXT    flags;\n    uint32_t                                            rasterizationStream;\n} VkPipelineRasterizationStateStreamCreateInfoEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdBindTransformFeedbackBuffersEXT)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes);\ntypedef void (VKAPI_PTR *PFN_vkCmdBeginTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets);\ntypedef void (VKAPI_PTR *PFN_vkCmdEndTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets);\ntypedef void (VKAPI_PTR *PFN_vkCmdBeginQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index);\ntypedef void (VKAPI_PTR *PFN_vkCmdEndQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index);\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectByteCountEXT)(VkCommandBuffer commandBuffer, uint32_t instanceCount, uint32_t firstInstance, VkBuffer counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdBindTransformFeedbackBuffersEXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    firstBinding,\n    uint32_t                                    bindingCount,\n    const VkBuffer*                             pBuffers,\n    const VkDeviceSize*                         pOffsets,\n    const VkDeviceSize*                         pSizes);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBeginTransformFeedbackEXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    firstCounterBuffer,\n    uint32_t                                    counterBufferCount,\n    const VkBuffer*                             pCounterBuffers,\n    const VkDeviceSize*                         pCounterBufferOffsets);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdEndTransformFeedbackEXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    firstCounterBuffer,\n    uint32_t                                    counterBufferCount,\n    const VkBuffer*                             pCounterBuffers,\n    const VkDeviceSize*                         pCounterBufferOffsets);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBeginQueryIndexedEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    query,\n    VkQueryControlFlags                         flags,\n    uint32_t                                    index);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdEndQueryIndexedEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    query,\n    uint32_t                                    index);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectByteCountEXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    instanceCount,\n    uint32_t                                    firstInstance,\n    VkBuffer                                    counterBuffer,\n    VkDeviceSize                                counterBufferOffset,\n    uint32_t                                    counterOffset,\n    uint32_t                                    vertexStride);\n#endif\n\n\n#define VK_NVX_binary_import 1\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCuModuleNVX)\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCuFunctionNVX)\n#define VK_NVX_BINARY_IMPORT_SPEC_VERSION 1\n#define VK_NVX_BINARY_IMPORT_EXTENSION_NAME \"VK_NVX_binary_import\"\ntypedef struct VkCuModuleCreateInfoNVX {\n    VkStructureType    sType;\n    const void*        pNext;\n    size_t             dataSize;\n    const void*        pData;\n} VkCuModuleCreateInfoNVX;\n\ntypedef struct VkCuFunctionCreateInfoNVX {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkCuModuleNVX      module;\n    const char*        pName;\n} VkCuFunctionCreateInfoNVX;\n\ntypedef struct VkCuLaunchInfoNVX {\n    VkStructureType        sType;\n    const void*            pNext;\n    VkCuFunctionNVX        function;\n    uint32_t               gridDimX;\n    uint32_t               gridDimY;\n    uint32_t               gridDimZ;\n    uint32_t               blockDimX;\n    uint32_t               blockDimY;\n    uint32_t               blockDimZ;\n    uint32_t               sharedMemBytes;\n    size_t                 paramCount;\n    const void* const *    pParams;\n    size_t                 extraCount;\n    const void* const *    pExtras;\n} VkCuLaunchInfoNVX;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateCuModuleNVX)(VkDevice device, const VkCuModuleCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCuModuleNVX* pModule);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateCuFunctionNVX)(VkDevice device, const VkCuFunctionCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCuFunctionNVX* pFunction);\ntypedef void (VKAPI_PTR *PFN_vkDestroyCuModuleNVX)(VkDevice device, VkCuModuleNVX module, const VkAllocationCallbacks* pAllocator);\ntypedef void (VKAPI_PTR *PFN_vkDestroyCuFunctionNVX)(VkDevice device, VkCuFunctionNVX function, const VkAllocationCallbacks* pAllocator);\ntypedef void (VKAPI_PTR *PFN_vkCmdCuLaunchKernelNVX)(VkCommandBuffer commandBuffer, const VkCuLaunchInfoNVX* pLaunchInfo);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateCuModuleNVX(\n    VkDevice                                    device,\n    const VkCuModuleCreateInfoNVX*              pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkCuModuleNVX*                              pModule);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateCuFunctionNVX(\n    VkDevice                                    device,\n    const VkCuFunctionCreateInfoNVX*            pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkCuFunctionNVX*                            pFunction);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyCuModuleNVX(\n    VkDevice                                    device,\n    VkCuModuleNVX                               module,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyCuFunctionNVX(\n    VkDevice                                    device,\n    VkCuFunctionNVX                             function,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCuLaunchKernelNVX(\n    VkCommandBuffer                             commandBuffer,\n    const VkCuLaunchInfoNVX*                    pLaunchInfo);\n#endif\n\n\n#define VK_NVX_image_view_handle 1\n#define VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION 2\n#define VK_NVX_IMAGE_VIEW_HANDLE_EXTENSION_NAME \"VK_NVX_image_view_handle\"\ntypedef struct VkImageViewHandleInfoNVX {\n    VkStructureType     sType;\n    const void*         pNext;\n    VkImageView         imageView;\n    VkDescriptorType    descriptorType;\n    VkSampler           sampler;\n} VkImageViewHandleInfoNVX;\n\ntypedef struct VkImageViewAddressPropertiesNVX {\n    VkStructureType    sType;\n    void*              pNext;\n    VkDeviceAddress    deviceAddress;\n    VkDeviceSize       size;\n} VkImageViewAddressPropertiesNVX;\n\ntypedef uint32_t (VKAPI_PTR *PFN_vkGetImageViewHandleNVX)(VkDevice device, const VkImageViewHandleInfoNVX* pInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetImageViewAddressNVX)(VkDevice device, VkImageView imageView, VkImageViewAddressPropertiesNVX* pProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR uint32_t VKAPI_CALL vkGetImageViewHandleNVX(\n    VkDevice                                    device,\n    const VkImageViewHandleInfoNVX*             pInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetImageViewAddressNVX(\n    VkDevice                                    device,\n    VkImageView                                 imageView,\n    VkImageViewAddressPropertiesNVX*            pProperties);\n#endif\n\n\n#define VK_AMD_draw_indirect_count 1\n#define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 2\n#define VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME \"VK_AMD_draw_indirect_count\"\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountAMD(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    buffer,\n    VkDeviceSize                                offset,\n    VkBuffer                                    countBuffer,\n    VkDeviceSize                                countBufferOffset,\n    uint32_t                                    maxDrawCount,\n    uint32_t                                    stride);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountAMD(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    buffer,\n    VkDeviceSize                                offset,\n    VkBuffer                                    countBuffer,\n    VkDeviceSize                                countBufferOffset,\n    uint32_t                                    maxDrawCount,\n    uint32_t                                    stride);\n#endif\n\n\n#define VK_AMD_negative_viewport_height 1\n#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_SPEC_VERSION 1\n#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME \"VK_AMD_negative_viewport_height\"\n\n\n#define VK_AMD_gpu_shader_half_float 1\n#define VK_AMD_GPU_SHADER_HALF_FLOAT_SPEC_VERSION 2\n#define VK_AMD_GPU_SHADER_HALF_FLOAT_EXTENSION_NAME \"VK_AMD_gpu_shader_half_float\"\n\n\n#define VK_AMD_shader_ballot 1\n#define VK_AMD_SHADER_BALLOT_SPEC_VERSION 1\n#define VK_AMD_SHADER_BALLOT_EXTENSION_NAME \"VK_AMD_shader_ballot\"\n\n\n#define VK_AMD_texture_gather_bias_lod 1\n#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_SPEC_VERSION 1\n#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME \"VK_AMD_texture_gather_bias_lod\"\ntypedef struct VkTextureLODGatherFormatPropertiesAMD {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           supportsTextureGatherLODBiasAMD;\n} VkTextureLODGatherFormatPropertiesAMD;\n\n\n\n#define VK_AMD_shader_info 1\n#define VK_AMD_SHADER_INFO_SPEC_VERSION   1\n#define VK_AMD_SHADER_INFO_EXTENSION_NAME \"VK_AMD_shader_info\"\n\ntypedef enum VkShaderInfoTypeAMD {\n    VK_SHADER_INFO_TYPE_STATISTICS_AMD = 0,\n    VK_SHADER_INFO_TYPE_BINARY_AMD = 1,\n    VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD = 2,\n    VK_SHADER_INFO_TYPE_MAX_ENUM_AMD = 0x7FFFFFFF\n} VkShaderInfoTypeAMD;\ntypedef struct VkShaderResourceUsageAMD {\n    uint32_t    numUsedVgprs;\n    uint32_t    numUsedSgprs;\n    uint32_t    ldsSizePerLocalWorkGroup;\n    size_t      ldsUsageSizeInBytes;\n    size_t      scratchMemUsageInBytes;\n} VkShaderResourceUsageAMD;\n\ntypedef struct VkShaderStatisticsInfoAMD {\n    VkShaderStageFlags          shaderStageMask;\n    VkShaderResourceUsageAMD    resourceUsage;\n    uint32_t                    numPhysicalVgprs;\n    uint32_t                    numPhysicalSgprs;\n    uint32_t                    numAvailableVgprs;\n    uint32_t                    numAvailableSgprs;\n    uint32_t                    computeWorkGroupSize[3];\n} VkShaderStatisticsInfoAMD;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetShaderInfoAMD)(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetShaderInfoAMD(\n    VkDevice                                    device,\n    VkPipeline                                  pipeline,\n    VkShaderStageFlagBits                       shaderStage,\n    VkShaderInfoTypeAMD                         infoType,\n    size_t*                                     pInfoSize,\n    void*                                       pInfo);\n#endif\n\n\n#define VK_AMD_shader_image_load_store_lod 1\n#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_SPEC_VERSION 1\n#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_EXTENSION_NAME \"VK_AMD_shader_image_load_store_lod\"\n\n\n#define VK_NV_corner_sampled_image 1\n#define VK_NV_CORNER_SAMPLED_IMAGE_SPEC_VERSION 2\n#define VK_NV_CORNER_SAMPLED_IMAGE_EXTENSION_NAME \"VK_NV_corner_sampled_image\"\ntypedef struct VkPhysicalDeviceCornerSampledImageFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           cornerSampledImage;\n} VkPhysicalDeviceCornerSampledImageFeaturesNV;\n\n\n\n#define VK_IMG_format_pvrtc 1\n#define VK_IMG_FORMAT_PVRTC_SPEC_VERSION  1\n#define VK_IMG_FORMAT_PVRTC_EXTENSION_NAME \"VK_IMG_format_pvrtc\"\n\n\n#define VK_NV_external_memory_capabilities 1\n#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1\n#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME \"VK_NV_external_memory_capabilities\"\n\ntypedef enum VkExternalMemoryHandleTypeFlagBitsNV {\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV = 0x00000001,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV = 0x00000002,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV = 0x00000004,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV = 0x00000008,\n    VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF\n} VkExternalMemoryHandleTypeFlagBitsNV;\ntypedef VkFlags VkExternalMemoryHandleTypeFlagsNV;\n\ntypedef enum VkExternalMemoryFeatureFlagBitsNV {\n    VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV = 0x00000001,\n    VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV = 0x00000002,\n    VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV = 0x00000004,\n    VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF\n} VkExternalMemoryFeatureFlagBitsNV;\ntypedef VkFlags VkExternalMemoryFeatureFlagsNV;\ntypedef struct VkExternalImageFormatPropertiesNV {\n    VkImageFormatProperties              imageFormatProperties;\n    VkExternalMemoryFeatureFlagsNV       externalMemoryFeatures;\n    VkExternalMemoryHandleTypeFlagsNV    exportFromImportedHandleTypes;\n    VkExternalMemoryHandleTypeFlagsNV    compatibleHandleTypes;\n} VkExternalImageFormatPropertiesNV;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceExternalImageFormatPropertiesNV(\n    VkPhysicalDevice                            physicalDevice,\n    VkFormat                                    format,\n    VkImageType                                 type,\n    VkImageTiling                               tiling,\n    VkImageUsageFlags                           usage,\n    VkImageCreateFlags                          flags,\n    VkExternalMemoryHandleTypeFlagsNV           externalHandleType,\n    VkExternalImageFormatPropertiesNV*          pExternalImageFormatProperties);\n#endif\n\n\n#define VK_NV_external_memory 1\n#define VK_NV_EXTERNAL_MEMORY_SPEC_VERSION 1\n#define VK_NV_EXTERNAL_MEMORY_EXTENSION_NAME \"VK_NV_external_memory\"\ntypedef struct VkExternalMemoryImageCreateInfoNV {\n    VkStructureType                      sType;\n    const void*                          pNext;\n    VkExternalMemoryHandleTypeFlagsNV    handleTypes;\n} VkExternalMemoryImageCreateInfoNV;\n\ntypedef struct VkExportMemoryAllocateInfoNV {\n    VkStructureType                      sType;\n    const void*                          pNext;\n    VkExternalMemoryHandleTypeFlagsNV    handleTypes;\n} VkExportMemoryAllocateInfoNV;\n\n\n\n#define VK_EXT_validation_flags 1\n#define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 2\n#define VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME \"VK_EXT_validation_flags\"\n\ntypedef enum VkValidationCheckEXT {\n    VK_VALIDATION_CHECK_ALL_EXT = 0,\n    VK_VALIDATION_CHECK_SHADERS_EXT = 1,\n    VK_VALIDATION_CHECK_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkValidationCheckEXT;\ntypedef struct VkValidationFlagsEXT {\n    VkStructureType                sType;\n    const void*                    pNext;\n    uint32_t                       disabledValidationCheckCount;\n    const VkValidationCheckEXT*    pDisabledValidationChecks;\n} VkValidationFlagsEXT;\n\n\n\n#define VK_EXT_shader_subgroup_ballot 1\n#define VK_EXT_SHADER_SUBGROUP_BALLOT_SPEC_VERSION 1\n#define VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME \"VK_EXT_shader_subgroup_ballot\"\n\n\n#define VK_EXT_shader_subgroup_vote 1\n#define VK_EXT_SHADER_SUBGROUP_VOTE_SPEC_VERSION 1\n#define VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME \"VK_EXT_shader_subgroup_vote\"\n\n\n#define VK_EXT_texture_compression_astc_hdr 1\n#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_SPEC_VERSION 1\n#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_EXTENSION_NAME \"VK_EXT_texture_compression_astc_hdr\"\ntypedef VkPhysicalDeviceTextureCompressionASTCHDRFeatures VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT;\n\n\n\n#define VK_EXT_astc_decode_mode 1\n#define VK_EXT_ASTC_DECODE_MODE_SPEC_VERSION 1\n#define VK_EXT_ASTC_DECODE_MODE_EXTENSION_NAME \"VK_EXT_astc_decode_mode\"\ntypedef struct VkImageViewASTCDecodeModeEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkFormat           decodeMode;\n} VkImageViewASTCDecodeModeEXT;\n\ntypedef struct VkPhysicalDeviceASTCDecodeFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           decodeModeSharedExponent;\n} VkPhysicalDeviceASTCDecodeFeaturesEXT;\n\n\n\n#define VK_EXT_pipeline_robustness 1\n#define VK_EXT_PIPELINE_ROBUSTNESS_SPEC_VERSION 1\n#define VK_EXT_PIPELINE_ROBUSTNESS_EXTENSION_NAME \"VK_EXT_pipeline_robustness\"\n\ntypedef enum VkPipelineRobustnessBufferBehaviorEXT {\n    VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT = 0,\n    VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT = 1,\n    VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT = 2,\n    VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT = 3,\n    VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkPipelineRobustnessBufferBehaviorEXT;\n\ntypedef enum VkPipelineRobustnessImageBehaviorEXT {\n    VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT_EXT = 0,\n    VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT = 1,\n    VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT = 2,\n    VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT = 3,\n    VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkPipelineRobustnessImageBehaviorEXT;\ntypedef struct VkPhysicalDevicePipelineRobustnessFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           pipelineRobustness;\n} VkPhysicalDevicePipelineRobustnessFeaturesEXT;\n\ntypedef struct VkPhysicalDevicePipelineRobustnessPropertiesEXT {\n    VkStructureType                          sType;\n    void*                                    pNext;\n    VkPipelineRobustnessBufferBehaviorEXT    defaultRobustnessStorageBuffers;\n    VkPipelineRobustnessBufferBehaviorEXT    defaultRobustnessUniformBuffers;\n    VkPipelineRobustnessBufferBehaviorEXT    defaultRobustnessVertexInputs;\n    VkPipelineRobustnessImageBehaviorEXT     defaultRobustnessImages;\n} VkPhysicalDevicePipelineRobustnessPropertiesEXT;\n\ntypedef struct VkPipelineRobustnessCreateInfoEXT {\n    VkStructureType                          sType;\n    const void*                              pNext;\n    VkPipelineRobustnessBufferBehaviorEXT    storageBuffers;\n    VkPipelineRobustnessBufferBehaviorEXT    uniformBuffers;\n    VkPipelineRobustnessBufferBehaviorEXT    vertexInputs;\n    VkPipelineRobustnessImageBehaviorEXT     images;\n} VkPipelineRobustnessCreateInfoEXT;\n\n\n\n#define VK_EXT_conditional_rendering 1\n#define VK_EXT_CONDITIONAL_RENDERING_SPEC_VERSION 2\n#define VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME \"VK_EXT_conditional_rendering\"\n\ntypedef enum VkConditionalRenderingFlagBitsEXT {\n    VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT = 0x00000001,\n    VK_CONDITIONAL_RENDERING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkConditionalRenderingFlagBitsEXT;\ntypedef VkFlags VkConditionalRenderingFlagsEXT;\ntypedef struct VkConditionalRenderingBeginInfoEXT {\n    VkStructureType                   sType;\n    const void*                       pNext;\n    VkBuffer                          buffer;\n    VkDeviceSize                      offset;\n    VkConditionalRenderingFlagsEXT    flags;\n} VkConditionalRenderingBeginInfoEXT;\n\ntypedef struct VkPhysicalDeviceConditionalRenderingFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           conditionalRendering;\n    VkBool32           inheritedConditionalRendering;\n} VkPhysicalDeviceConditionalRenderingFeaturesEXT;\n\ntypedef struct VkCommandBufferInheritanceConditionalRenderingInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBool32           conditionalRenderingEnable;\n} VkCommandBufferInheritanceConditionalRenderingInfoEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdBeginConditionalRenderingEXT)(VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin);\ntypedef void (VKAPI_PTR *PFN_vkCmdEndConditionalRenderingEXT)(VkCommandBuffer commandBuffer);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdBeginConditionalRenderingEXT(\n    VkCommandBuffer                             commandBuffer,\n    const VkConditionalRenderingBeginInfoEXT*   pConditionalRenderingBegin);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdEndConditionalRenderingEXT(\n    VkCommandBuffer                             commandBuffer);\n#endif\n\n\n#define VK_NV_clip_space_w_scaling 1\n#define VK_NV_CLIP_SPACE_W_SCALING_SPEC_VERSION 1\n#define VK_NV_CLIP_SPACE_W_SCALING_EXTENSION_NAME \"VK_NV_clip_space_w_scaling\"\ntypedef struct VkViewportWScalingNV {\n    float    xcoeff;\n    float    ycoeff;\n} VkViewportWScalingNV;\n\ntypedef struct VkPipelineViewportWScalingStateCreateInfoNV {\n    VkStructureType                sType;\n    const void*                    pNext;\n    VkBool32                       viewportWScalingEnable;\n    uint32_t                       viewportCount;\n    const VkViewportWScalingNV*    pViewportWScalings;\n} VkPipelineViewportWScalingStateCreateInfoNV;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdSetViewportWScalingNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWScalingNV(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    firstViewport,\n    uint32_t                                    viewportCount,\n    const VkViewportWScalingNV*                 pViewportWScalings);\n#endif\n\n\n#define VK_EXT_direct_mode_display 1\n#define VK_EXT_DIRECT_MODE_DISPLAY_SPEC_VERSION 1\n#define VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME \"VK_EXT_direct_mode_display\"\ntypedef VkResult (VKAPI_PTR *PFN_vkReleaseDisplayEXT)(VkPhysicalDevice physicalDevice, VkDisplayKHR display);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT(\n    VkPhysicalDevice                            physicalDevice,\n    VkDisplayKHR                                display);\n#endif\n\n\n#define VK_EXT_display_surface_counter 1\n#define VK_EXT_DISPLAY_SURFACE_COUNTER_SPEC_VERSION 1\n#define VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME \"VK_EXT_display_surface_counter\"\n\ntypedef enum VkSurfaceCounterFlagBitsEXT {\n    VK_SURFACE_COUNTER_VBLANK_BIT_EXT = 0x00000001,\n    VK_SURFACE_COUNTER_VBLANK_EXT = VK_SURFACE_COUNTER_VBLANK_BIT_EXT,\n    VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkSurfaceCounterFlagBitsEXT;\ntypedef VkFlags VkSurfaceCounterFlagsEXT;\ntypedef struct VkSurfaceCapabilities2EXT {\n    VkStructureType                  sType;\n    void*                            pNext;\n    uint32_t                         minImageCount;\n    uint32_t                         maxImageCount;\n    VkExtent2D                       currentExtent;\n    VkExtent2D                       minImageExtent;\n    VkExtent2D                       maxImageExtent;\n    uint32_t                         maxImageArrayLayers;\n    VkSurfaceTransformFlagsKHR       supportedTransforms;\n    VkSurfaceTransformFlagBitsKHR    currentTransform;\n    VkCompositeAlphaFlagsKHR         supportedCompositeAlpha;\n    VkImageUsageFlags                supportedUsageFlags;\n    VkSurfaceCounterFlagsEXT         supportedSurfaceCounters;\n} VkSurfaceCapabilities2EXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT(\n    VkPhysicalDevice                            physicalDevice,\n    VkSurfaceKHR                                surface,\n    VkSurfaceCapabilities2EXT*                  pSurfaceCapabilities);\n#endif\n\n\n#define VK_EXT_display_control 1\n#define VK_EXT_DISPLAY_CONTROL_SPEC_VERSION 1\n#define VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME \"VK_EXT_display_control\"\n\ntypedef enum VkDisplayPowerStateEXT {\n    VK_DISPLAY_POWER_STATE_OFF_EXT = 0,\n    VK_DISPLAY_POWER_STATE_SUSPEND_EXT = 1,\n    VK_DISPLAY_POWER_STATE_ON_EXT = 2,\n    VK_DISPLAY_POWER_STATE_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkDisplayPowerStateEXT;\n\ntypedef enum VkDeviceEventTypeEXT {\n    VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT = 0,\n    VK_DEVICE_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkDeviceEventTypeEXT;\n\ntypedef enum VkDisplayEventTypeEXT {\n    VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT = 0,\n    VK_DISPLAY_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkDisplayEventTypeEXT;\ntypedef struct VkDisplayPowerInfoEXT {\n    VkStructureType           sType;\n    const void*               pNext;\n    VkDisplayPowerStateEXT    powerState;\n} VkDisplayPowerInfoEXT;\n\ntypedef struct VkDeviceEventInfoEXT {\n    VkStructureType         sType;\n    const void*             pNext;\n    VkDeviceEventTypeEXT    deviceEvent;\n} VkDeviceEventInfoEXT;\n\ntypedef struct VkDisplayEventInfoEXT {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkDisplayEventTypeEXT    displayEvent;\n} VkDisplayEventInfoEXT;\n\ntypedef struct VkSwapchainCounterCreateInfoEXT {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkSurfaceCounterFlagsEXT    surfaceCounters;\n} VkSwapchainCounterCreateInfoEXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkDisplayPowerControlEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkRegisterDeviceEventEXT)(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence);\ntypedef VkResult (VKAPI_PTR *PFN_vkRegisterDisplayEventEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainCounterEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkDisplayPowerControlEXT(\n    VkDevice                                    device,\n    VkDisplayKHR                                display,\n    const VkDisplayPowerInfoEXT*                pDisplayPowerInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkRegisterDeviceEventEXT(\n    VkDevice                                    device,\n    const VkDeviceEventInfoEXT*                 pDeviceEventInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkFence*                                    pFence);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkRegisterDisplayEventEXT(\n    VkDevice                                    device,\n    VkDisplayKHR                                display,\n    const VkDisplayEventInfoEXT*                pDisplayEventInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkFence*                                    pFence);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainCounterEXT(\n    VkDevice                                    device,\n    VkSwapchainKHR                              swapchain,\n    VkSurfaceCounterFlagBitsEXT                 counter,\n    uint64_t*                                   pCounterValue);\n#endif\n\n\n#define VK_GOOGLE_display_timing 1\n#define VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION 1\n#define VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME \"VK_GOOGLE_display_timing\"\ntypedef struct VkRefreshCycleDurationGOOGLE {\n    uint64_t    refreshDuration;\n} VkRefreshCycleDurationGOOGLE;\n\ntypedef struct VkPastPresentationTimingGOOGLE {\n    uint32_t    presentID;\n    uint64_t    desiredPresentTime;\n    uint64_t    actualPresentTime;\n    uint64_t    earliestPresentTime;\n    uint64_t    presentMargin;\n} VkPastPresentationTimingGOOGLE;\n\ntypedef struct VkPresentTimeGOOGLE {\n    uint32_t    presentID;\n    uint64_t    desiredPresentTime;\n} VkPresentTimeGOOGLE;\n\ntypedef struct VkPresentTimesInfoGOOGLE {\n    VkStructureType               sType;\n    const void*                   pNext;\n    uint32_t                      swapchainCount;\n    const VkPresentTimeGOOGLE*    pTimes;\n} VkPresentTimesInfoGOOGLE;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetRefreshCycleDurationGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetRefreshCycleDurationGOOGLE(\n    VkDevice                                    device,\n    VkSwapchainKHR                              swapchain,\n    VkRefreshCycleDurationGOOGLE*               pDisplayTimingProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE(\n    VkDevice                                    device,\n    VkSwapchainKHR                              swapchain,\n    uint32_t*                                   pPresentationTimingCount,\n    VkPastPresentationTimingGOOGLE*             pPresentationTimings);\n#endif\n\n\n#define VK_NV_sample_mask_override_coverage 1\n#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_SPEC_VERSION 1\n#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME \"VK_NV_sample_mask_override_coverage\"\n\n\n#define VK_NV_geometry_shader_passthrough 1\n#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_SPEC_VERSION 1\n#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME \"VK_NV_geometry_shader_passthrough\"\n\n\n#define VK_NV_viewport_array2 1\n#define VK_NV_VIEWPORT_ARRAY_2_SPEC_VERSION 1\n#define VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME \"VK_NV_viewport_array2\"\n#define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION VK_NV_VIEWPORT_ARRAY_2_SPEC_VERSION\n#define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME\n\n\n#define VK_NVX_multiview_per_view_attributes 1\n#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_SPEC_VERSION 1\n#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME \"VK_NVX_multiview_per_view_attributes\"\ntypedef struct VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           perViewPositionAllComponents;\n} VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX;\n\n\n\n#define VK_NV_viewport_swizzle 1\n#define VK_NV_VIEWPORT_SWIZZLE_SPEC_VERSION 1\n#define VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME \"VK_NV_viewport_swizzle\"\n\ntypedef enum VkViewportCoordinateSwizzleNV {\n    VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV = 0,\n    VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV = 1,\n    VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV = 2,\n    VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV = 3,\n    VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV = 4,\n    VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV = 5,\n    VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV = 6,\n    VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV = 7,\n    VK_VIEWPORT_COORDINATE_SWIZZLE_MAX_ENUM_NV = 0x7FFFFFFF\n} VkViewportCoordinateSwizzleNV;\ntypedef VkFlags VkPipelineViewportSwizzleStateCreateFlagsNV;\ntypedef struct VkViewportSwizzleNV {\n    VkViewportCoordinateSwizzleNV    x;\n    VkViewportCoordinateSwizzleNV    y;\n    VkViewportCoordinateSwizzleNV    z;\n    VkViewportCoordinateSwizzleNV    w;\n} VkViewportSwizzleNV;\n\ntypedef struct VkPipelineViewportSwizzleStateCreateInfoNV {\n    VkStructureType                                sType;\n    const void*                                    pNext;\n    VkPipelineViewportSwizzleStateCreateFlagsNV    flags;\n    uint32_t                                       viewportCount;\n    const VkViewportSwizzleNV*                     pViewportSwizzles;\n} VkPipelineViewportSwizzleStateCreateInfoNV;\n\n\n\n#define VK_EXT_discard_rectangles 1\n#define VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION 1\n#define VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME \"VK_EXT_discard_rectangles\"\n\ntypedef enum VkDiscardRectangleModeEXT {\n    VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT = 0,\n    VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT = 1,\n    VK_DISCARD_RECTANGLE_MODE_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkDiscardRectangleModeEXT;\ntypedef VkFlags VkPipelineDiscardRectangleStateCreateFlagsEXT;\ntypedef struct VkPhysicalDeviceDiscardRectanglePropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxDiscardRectangles;\n} VkPhysicalDeviceDiscardRectanglePropertiesEXT;\n\ntypedef struct VkPipelineDiscardRectangleStateCreateInfoEXT {\n    VkStructureType                                  sType;\n    const void*                                      pNext;\n    VkPipelineDiscardRectangleStateCreateFlagsEXT    flags;\n    VkDiscardRectangleModeEXT                        discardRectangleMode;\n    uint32_t                                         discardRectangleCount;\n    const VkRect2D*                                  pDiscardRectangles;\n} VkPipelineDiscardRectangleStateCreateInfoEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleEXT)(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleEXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    firstDiscardRectangle,\n    uint32_t                                    discardRectangleCount,\n    const VkRect2D*                             pDiscardRectangles);\n#endif\n\n\n#define VK_EXT_conservative_rasterization 1\n#define VK_EXT_CONSERVATIVE_RASTERIZATION_SPEC_VERSION 1\n#define VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME \"VK_EXT_conservative_rasterization\"\n\ntypedef enum VkConservativeRasterizationModeEXT {\n    VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT = 0,\n    VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT = 1,\n    VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT = 2,\n    VK_CONSERVATIVE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkConservativeRasterizationModeEXT;\ntypedef VkFlags VkPipelineRasterizationConservativeStateCreateFlagsEXT;\ntypedef struct VkPhysicalDeviceConservativeRasterizationPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    float              primitiveOverestimationSize;\n    float              maxExtraPrimitiveOverestimationSize;\n    float              extraPrimitiveOverestimationSizeGranularity;\n    VkBool32           primitiveUnderestimation;\n    VkBool32           conservativePointAndLineRasterization;\n    VkBool32           degenerateTrianglesRasterized;\n    VkBool32           degenerateLinesRasterized;\n    VkBool32           fullyCoveredFragmentShaderInputVariable;\n    VkBool32           conservativeRasterizationPostDepthCoverage;\n} VkPhysicalDeviceConservativeRasterizationPropertiesEXT;\n\ntypedef struct VkPipelineRasterizationConservativeStateCreateInfoEXT {\n    VkStructureType                                           sType;\n    const void*                                               pNext;\n    VkPipelineRasterizationConservativeStateCreateFlagsEXT    flags;\n    VkConservativeRasterizationModeEXT                        conservativeRasterizationMode;\n    float                                                     extraPrimitiveOverestimationSize;\n} VkPipelineRasterizationConservativeStateCreateInfoEXT;\n\n\n\n#define VK_EXT_depth_clip_enable 1\n#define VK_EXT_DEPTH_CLIP_ENABLE_SPEC_VERSION 1\n#define VK_EXT_DEPTH_CLIP_ENABLE_EXTENSION_NAME \"VK_EXT_depth_clip_enable\"\ntypedef VkFlags VkPipelineRasterizationDepthClipStateCreateFlagsEXT;\ntypedef struct VkPhysicalDeviceDepthClipEnableFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           depthClipEnable;\n} VkPhysicalDeviceDepthClipEnableFeaturesEXT;\n\ntypedef struct VkPipelineRasterizationDepthClipStateCreateInfoEXT {\n    VkStructureType                                        sType;\n    const void*                                            pNext;\n    VkPipelineRasterizationDepthClipStateCreateFlagsEXT    flags;\n    VkBool32                                               depthClipEnable;\n} VkPipelineRasterizationDepthClipStateCreateInfoEXT;\n\n\n\n#define VK_EXT_swapchain_colorspace 1\n#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 4\n#define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME \"VK_EXT_swapchain_colorspace\"\n\n\n#define VK_EXT_hdr_metadata 1\n#define VK_EXT_HDR_METADATA_SPEC_VERSION  2\n#define VK_EXT_HDR_METADATA_EXTENSION_NAME \"VK_EXT_hdr_metadata\"\ntypedef struct VkXYColorEXT {\n    float    x;\n    float    y;\n} VkXYColorEXT;\n\ntypedef struct VkHdrMetadataEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkXYColorEXT       displayPrimaryRed;\n    VkXYColorEXT       displayPrimaryGreen;\n    VkXYColorEXT       displayPrimaryBlue;\n    VkXYColorEXT       whitePoint;\n    float              maxLuminance;\n    float              minLuminance;\n    float              maxContentLightLevel;\n    float              maxFrameAverageLightLevel;\n} VkHdrMetadataEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkSetHdrMetadataEXT)(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkSetHdrMetadataEXT(\n    VkDevice                                    device,\n    uint32_t                                    swapchainCount,\n    const VkSwapchainKHR*                       pSwapchains,\n    const VkHdrMetadataEXT*                     pMetadata);\n#endif\n\n\n#define VK_EXT_external_memory_dma_buf 1\n#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_SPEC_VERSION 1\n#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME \"VK_EXT_external_memory_dma_buf\"\n\n\n#define VK_EXT_queue_family_foreign 1\n#define VK_EXT_QUEUE_FAMILY_FOREIGN_SPEC_VERSION 1\n#define VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME \"VK_EXT_queue_family_foreign\"\n#define VK_QUEUE_FAMILY_FOREIGN_EXT       (~2U)\n\n\n#define VK_EXT_debug_utils 1\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugUtilsMessengerEXT)\n#define VK_EXT_DEBUG_UTILS_SPEC_VERSION   2\n#define VK_EXT_DEBUG_UTILS_EXTENSION_NAME \"VK_EXT_debug_utils\"\ntypedef VkFlags VkDebugUtilsMessengerCallbackDataFlagsEXT;\n\ntypedef enum VkDebugUtilsMessageSeverityFlagBitsEXT {\n    VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT = 0x00000001,\n    VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT = 0x00000010,\n    VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT = 0x00000100,\n    VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT = 0x00001000,\n    VK_DEBUG_UTILS_MESSAGE_SEVERITY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkDebugUtilsMessageSeverityFlagBitsEXT;\n\ntypedef enum VkDebugUtilsMessageTypeFlagBitsEXT {\n    VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT = 0x00000001,\n    VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT = 0x00000002,\n    VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT = 0x00000004,\n    VK_DEBUG_UTILS_MESSAGE_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkDebugUtilsMessageTypeFlagBitsEXT;\ntypedef VkFlags VkDebugUtilsMessageTypeFlagsEXT;\ntypedef VkFlags VkDebugUtilsMessageSeverityFlagsEXT;\ntypedef VkFlags VkDebugUtilsMessengerCreateFlagsEXT;\ntypedef struct VkDebugUtilsLabelEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    const char*        pLabelName;\n    float              color[4];\n} VkDebugUtilsLabelEXT;\n\ntypedef struct VkDebugUtilsObjectNameInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkObjectType       objectType;\n    uint64_t           objectHandle;\n    const char*        pObjectName;\n} VkDebugUtilsObjectNameInfoEXT;\n\ntypedef struct VkDebugUtilsMessengerCallbackDataEXT {\n    VkStructureType                              sType;\n    const void*                                  pNext;\n    VkDebugUtilsMessengerCallbackDataFlagsEXT    flags;\n    const char*                                  pMessageIdName;\n    int32_t                                      messageIdNumber;\n    const char*                                  pMessage;\n    uint32_t                                     queueLabelCount;\n    const VkDebugUtilsLabelEXT*                  pQueueLabels;\n    uint32_t                                     cmdBufLabelCount;\n    const VkDebugUtilsLabelEXT*                  pCmdBufLabels;\n    uint32_t                                     objectCount;\n    const VkDebugUtilsObjectNameInfoEXT*         pObjects;\n} VkDebugUtilsMessengerCallbackDataEXT;\n\ntypedef VkBool32 (VKAPI_PTR *PFN_vkDebugUtilsMessengerCallbackEXT)(\n    VkDebugUtilsMessageSeverityFlagBitsEXT           messageSeverity,\n    VkDebugUtilsMessageTypeFlagsEXT                  messageTypes,\n    const VkDebugUtilsMessengerCallbackDataEXT*      pCallbackData,\n    void*                                            pUserData);\n\ntypedef struct VkDebugUtilsMessengerCreateInfoEXT {\n    VkStructureType                         sType;\n    const void*                             pNext;\n    VkDebugUtilsMessengerCreateFlagsEXT     flags;\n    VkDebugUtilsMessageSeverityFlagsEXT     messageSeverity;\n    VkDebugUtilsMessageTypeFlagsEXT         messageType;\n    PFN_vkDebugUtilsMessengerCallbackEXT    pfnUserCallback;\n    void*                                   pUserData;\n} VkDebugUtilsMessengerCreateInfoEXT;\n\ntypedef struct VkDebugUtilsObjectTagInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkObjectType       objectType;\n    uint64_t           objectHandle;\n    uint64_t           tagName;\n    size_t             tagSize;\n    const void*        pTag;\n} VkDebugUtilsObjectTagInfoEXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectNameEXT)(VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectTagEXT)(VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo);\ntypedef void (VKAPI_PTR *PFN_vkQueueBeginDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo);\ntypedef void (VKAPI_PTR *PFN_vkQueueEndDebugUtilsLabelEXT)(VkQueue queue);\ntypedef void (VKAPI_PTR *PFN_vkQueueInsertDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdBeginDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdEndDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer);\ntypedef void (VKAPI_PTR *PFN_vkCmdInsertDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateDebugUtilsMessengerEXT)(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger);\ntypedef void (VKAPI_PTR *PFN_vkDestroyDebugUtilsMessengerEXT)(VkInstance instance, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator);\ntypedef void (VKAPI_PTR *PFN_vkSubmitDebugUtilsMessageEXT)(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectNameEXT(\n    VkDevice                                    device,\n    const VkDebugUtilsObjectNameInfoEXT*        pNameInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectTagEXT(\n    VkDevice                                    device,\n    const VkDebugUtilsObjectTagInfoEXT*         pTagInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkQueueBeginDebugUtilsLabelEXT(\n    VkQueue                                     queue,\n    const VkDebugUtilsLabelEXT*                 pLabelInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkQueueEndDebugUtilsLabelEXT(\n    VkQueue                                     queue);\n\nVKAPI_ATTR void VKAPI_CALL vkQueueInsertDebugUtilsLabelEXT(\n    VkQueue                                     queue,\n    const VkDebugUtilsLabelEXT*                 pLabelInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBeginDebugUtilsLabelEXT(\n    VkCommandBuffer                             commandBuffer,\n    const VkDebugUtilsLabelEXT*                 pLabelInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdEndDebugUtilsLabelEXT(\n    VkCommandBuffer                             commandBuffer);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdInsertDebugUtilsLabelEXT(\n    VkCommandBuffer                             commandBuffer,\n    const VkDebugUtilsLabelEXT*                 pLabelInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugUtilsMessengerEXT(\n    VkInstance                                  instance,\n    const VkDebugUtilsMessengerCreateInfoEXT*   pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkDebugUtilsMessengerEXT*                   pMessenger);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyDebugUtilsMessengerEXT(\n    VkInstance                                  instance,\n    VkDebugUtilsMessengerEXT                    messenger,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR void VKAPI_CALL vkSubmitDebugUtilsMessageEXT(\n    VkInstance                                  instance,\n    VkDebugUtilsMessageSeverityFlagBitsEXT      messageSeverity,\n    VkDebugUtilsMessageTypeFlagsEXT             messageTypes,\n    const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData);\n#endif\n\n\n#define VK_EXT_sampler_filter_minmax 1\n#define VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION 2\n#define VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME \"VK_EXT_sampler_filter_minmax\"\ntypedef VkSamplerReductionMode VkSamplerReductionModeEXT;\n\ntypedef VkSamplerReductionModeCreateInfo VkSamplerReductionModeCreateInfoEXT;\n\ntypedef VkPhysicalDeviceSamplerFilterMinmaxProperties VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT;\n\n\n\n#define VK_AMD_gpu_shader_int16 1\n#define VK_AMD_GPU_SHADER_INT16_SPEC_VERSION 2\n#define VK_AMD_GPU_SHADER_INT16_EXTENSION_NAME \"VK_AMD_gpu_shader_int16\"\n\n\n#define VK_AMD_mixed_attachment_samples 1\n#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_SPEC_VERSION 1\n#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME \"VK_AMD_mixed_attachment_samples\"\n\n\n#define VK_AMD_shader_fragment_mask 1\n#define VK_AMD_SHADER_FRAGMENT_MASK_SPEC_VERSION 1\n#define VK_AMD_SHADER_FRAGMENT_MASK_EXTENSION_NAME \"VK_AMD_shader_fragment_mask\"\n\n\n#define VK_EXT_inline_uniform_block 1\n#define VK_EXT_INLINE_UNIFORM_BLOCK_SPEC_VERSION 1\n#define VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME \"VK_EXT_inline_uniform_block\"\ntypedef VkPhysicalDeviceInlineUniformBlockFeatures VkPhysicalDeviceInlineUniformBlockFeaturesEXT;\n\ntypedef VkPhysicalDeviceInlineUniformBlockProperties VkPhysicalDeviceInlineUniformBlockPropertiesEXT;\n\ntypedef VkWriteDescriptorSetInlineUniformBlock VkWriteDescriptorSetInlineUniformBlockEXT;\n\ntypedef VkDescriptorPoolInlineUniformBlockCreateInfo VkDescriptorPoolInlineUniformBlockCreateInfoEXT;\n\n\n\n#define VK_EXT_shader_stencil_export 1\n#define VK_EXT_SHADER_STENCIL_EXPORT_SPEC_VERSION 1\n#define VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME \"VK_EXT_shader_stencil_export\"\n\n\n#define VK_EXT_sample_locations 1\n#define VK_EXT_SAMPLE_LOCATIONS_SPEC_VERSION 1\n#define VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME \"VK_EXT_sample_locations\"\ntypedef struct VkSampleLocationEXT {\n    float    x;\n    float    y;\n} VkSampleLocationEXT;\n\ntypedef struct VkSampleLocationsInfoEXT {\n    VkStructureType               sType;\n    const void*                   pNext;\n    VkSampleCountFlagBits         sampleLocationsPerPixel;\n    VkExtent2D                    sampleLocationGridSize;\n    uint32_t                      sampleLocationsCount;\n    const VkSampleLocationEXT*    pSampleLocations;\n} VkSampleLocationsInfoEXT;\n\ntypedef struct VkAttachmentSampleLocationsEXT {\n    uint32_t                    attachmentIndex;\n    VkSampleLocationsInfoEXT    sampleLocationsInfo;\n} VkAttachmentSampleLocationsEXT;\n\ntypedef struct VkSubpassSampleLocationsEXT {\n    uint32_t                    subpassIndex;\n    VkSampleLocationsInfoEXT    sampleLocationsInfo;\n} VkSubpassSampleLocationsEXT;\n\ntypedef struct VkRenderPassSampleLocationsBeginInfoEXT {\n    VkStructureType                          sType;\n    const void*                              pNext;\n    uint32_t                                 attachmentInitialSampleLocationsCount;\n    const VkAttachmentSampleLocationsEXT*    pAttachmentInitialSampleLocations;\n    uint32_t                                 postSubpassSampleLocationsCount;\n    const VkSubpassSampleLocationsEXT*       pPostSubpassSampleLocations;\n} VkRenderPassSampleLocationsBeginInfoEXT;\n\ntypedef struct VkPipelineSampleLocationsStateCreateInfoEXT {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkBool32                    sampleLocationsEnable;\n    VkSampleLocationsInfoEXT    sampleLocationsInfo;\n} VkPipelineSampleLocationsStateCreateInfoEXT;\n\ntypedef struct VkPhysicalDeviceSampleLocationsPropertiesEXT {\n    VkStructureType       sType;\n    void*                 pNext;\n    VkSampleCountFlags    sampleLocationSampleCounts;\n    VkExtent2D            maxSampleLocationGridSize;\n    float                 sampleLocationCoordinateRange[2];\n    uint32_t              sampleLocationSubPixelBits;\n    VkBool32              variableSampleLocations;\n} VkPhysicalDeviceSampleLocationsPropertiesEXT;\n\ntypedef struct VkMultisamplePropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkExtent2D         maxSampleLocationGridSize;\n} VkMultisamplePropertiesEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdSetSampleLocationsEXT)(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo);\ntypedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)(VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdSetSampleLocationsEXT(\n    VkCommandBuffer                             commandBuffer,\n    const VkSampleLocationsInfoEXT*             pSampleLocationsInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMultisamplePropertiesEXT(\n    VkPhysicalDevice                            physicalDevice,\n    VkSampleCountFlagBits                       samples,\n    VkMultisamplePropertiesEXT*                 pMultisampleProperties);\n#endif\n\n\n#define VK_EXT_blend_operation_advanced 1\n#define VK_EXT_BLEND_OPERATION_ADVANCED_SPEC_VERSION 2\n#define VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME \"VK_EXT_blend_operation_advanced\"\n\ntypedef enum VkBlendOverlapEXT {\n    VK_BLEND_OVERLAP_UNCORRELATED_EXT = 0,\n    VK_BLEND_OVERLAP_DISJOINT_EXT = 1,\n    VK_BLEND_OVERLAP_CONJOINT_EXT = 2,\n    VK_BLEND_OVERLAP_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkBlendOverlapEXT;\ntypedef struct VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           advancedBlendCoherentOperations;\n} VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT;\n\ntypedef struct VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           advancedBlendMaxColorAttachments;\n    VkBool32           advancedBlendIndependentBlend;\n    VkBool32           advancedBlendNonPremultipliedSrcColor;\n    VkBool32           advancedBlendNonPremultipliedDstColor;\n    VkBool32           advancedBlendCorrelatedOverlap;\n    VkBool32           advancedBlendAllOperations;\n} VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT;\n\ntypedef struct VkPipelineColorBlendAdvancedStateCreateInfoEXT {\n    VkStructureType      sType;\n    const void*          pNext;\n    VkBool32             srcPremultiplied;\n    VkBool32             dstPremultiplied;\n    VkBlendOverlapEXT    blendOverlap;\n} VkPipelineColorBlendAdvancedStateCreateInfoEXT;\n\n\n\n#define VK_NV_fragment_coverage_to_color 1\n#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_SPEC_VERSION 1\n#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME \"VK_NV_fragment_coverage_to_color\"\ntypedef VkFlags VkPipelineCoverageToColorStateCreateFlagsNV;\ntypedef struct VkPipelineCoverageToColorStateCreateInfoNV {\n    VkStructureType                                sType;\n    const void*                                    pNext;\n    VkPipelineCoverageToColorStateCreateFlagsNV    flags;\n    VkBool32                                       coverageToColorEnable;\n    uint32_t                                       coverageToColorLocation;\n} VkPipelineCoverageToColorStateCreateInfoNV;\n\n\n\n#define VK_NV_framebuffer_mixed_samples 1\n#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_SPEC_VERSION 1\n#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME \"VK_NV_framebuffer_mixed_samples\"\n\ntypedef enum VkCoverageModulationModeNV {\n    VK_COVERAGE_MODULATION_MODE_NONE_NV = 0,\n    VK_COVERAGE_MODULATION_MODE_RGB_NV = 1,\n    VK_COVERAGE_MODULATION_MODE_ALPHA_NV = 2,\n    VK_COVERAGE_MODULATION_MODE_RGBA_NV = 3,\n    VK_COVERAGE_MODULATION_MODE_MAX_ENUM_NV = 0x7FFFFFFF\n} VkCoverageModulationModeNV;\ntypedef VkFlags VkPipelineCoverageModulationStateCreateFlagsNV;\ntypedef struct VkPipelineCoverageModulationStateCreateInfoNV {\n    VkStructureType                                   sType;\n    const void*                                       pNext;\n    VkPipelineCoverageModulationStateCreateFlagsNV    flags;\n    VkCoverageModulationModeNV                        coverageModulationMode;\n    VkBool32                                          coverageModulationTableEnable;\n    uint32_t                                          coverageModulationTableCount;\n    const float*                                      pCoverageModulationTable;\n} VkPipelineCoverageModulationStateCreateInfoNV;\n\n\n\n#define VK_NV_fill_rectangle 1\n#define VK_NV_FILL_RECTANGLE_SPEC_VERSION 1\n#define VK_NV_FILL_RECTANGLE_EXTENSION_NAME \"VK_NV_fill_rectangle\"\n\n\n#define VK_NV_shader_sm_builtins 1\n#define VK_NV_SHADER_SM_BUILTINS_SPEC_VERSION 1\n#define VK_NV_SHADER_SM_BUILTINS_EXTENSION_NAME \"VK_NV_shader_sm_builtins\"\ntypedef struct VkPhysicalDeviceShaderSMBuiltinsPropertiesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           shaderSMCount;\n    uint32_t           shaderWarpsPerSM;\n} VkPhysicalDeviceShaderSMBuiltinsPropertiesNV;\n\ntypedef struct VkPhysicalDeviceShaderSMBuiltinsFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderSMBuiltins;\n} VkPhysicalDeviceShaderSMBuiltinsFeaturesNV;\n\n\n\n#define VK_EXT_post_depth_coverage 1\n#define VK_EXT_POST_DEPTH_COVERAGE_SPEC_VERSION 1\n#define VK_EXT_POST_DEPTH_COVERAGE_EXTENSION_NAME \"VK_EXT_post_depth_coverage\"\n\n\n#define VK_EXT_image_drm_format_modifier 1\n#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_SPEC_VERSION 2\n#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME \"VK_EXT_image_drm_format_modifier\"\ntypedef struct VkDrmFormatModifierPropertiesEXT {\n    uint64_t                drmFormatModifier;\n    uint32_t                drmFormatModifierPlaneCount;\n    VkFormatFeatureFlags    drmFormatModifierTilingFeatures;\n} VkDrmFormatModifierPropertiesEXT;\n\ntypedef struct VkDrmFormatModifierPropertiesListEXT {\n    VkStructureType                      sType;\n    void*                                pNext;\n    uint32_t                             drmFormatModifierCount;\n    VkDrmFormatModifierPropertiesEXT*    pDrmFormatModifierProperties;\n} VkDrmFormatModifierPropertiesListEXT;\n\ntypedef struct VkPhysicalDeviceImageDrmFormatModifierInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint64_t           drmFormatModifier;\n    VkSharingMode      sharingMode;\n    uint32_t           queueFamilyIndexCount;\n    const uint32_t*    pQueueFamilyIndices;\n} VkPhysicalDeviceImageDrmFormatModifierInfoEXT;\n\ntypedef struct VkImageDrmFormatModifierListCreateInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           drmFormatModifierCount;\n    const uint64_t*    pDrmFormatModifiers;\n} VkImageDrmFormatModifierListCreateInfoEXT;\n\ntypedef struct VkImageDrmFormatModifierExplicitCreateInfoEXT {\n    VkStructureType               sType;\n    const void*                   pNext;\n    uint64_t                      drmFormatModifier;\n    uint32_t                      drmFormatModifierPlaneCount;\n    const VkSubresourceLayout*    pPlaneLayouts;\n} VkImageDrmFormatModifierExplicitCreateInfoEXT;\n\ntypedef struct VkImageDrmFormatModifierPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint64_t           drmFormatModifier;\n} VkImageDrmFormatModifierPropertiesEXT;\n\ntypedef struct VkDrmFormatModifierProperties2EXT {\n    uint64_t                 drmFormatModifier;\n    uint32_t                 drmFormatModifierPlaneCount;\n    VkFormatFeatureFlags2    drmFormatModifierTilingFeatures;\n} VkDrmFormatModifierProperties2EXT;\n\ntypedef struct VkDrmFormatModifierPropertiesList2EXT {\n    VkStructureType                       sType;\n    void*                                 pNext;\n    uint32_t                              drmFormatModifierCount;\n    VkDrmFormatModifierProperties2EXT*    pDrmFormatModifierProperties;\n} VkDrmFormatModifierPropertiesList2EXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetImageDrmFormatModifierPropertiesEXT)(VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetImageDrmFormatModifierPropertiesEXT(\n    VkDevice                                    device,\n    VkImage                                     image,\n    VkImageDrmFormatModifierPropertiesEXT*      pProperties);\n#endif\n\n\n#define VK_EXT_validation_cache 1\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkValidationCacheEXT)\n#define VK_EXT_VALIDATION_CACHE_SPEC_VERSION 1\n#define VK_EXT_VALIDATION_CACHE_EXTENSION_NAME \"VK_EXT_validation_cache\"\n\ntypedef enum VkValidationCacheHeaderVersionEXT {\n    VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT = 1,\n    VK_VALIDATION_CACHE_HEADER_VERSION_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkValidationCacheHeaderVersionEXT;\ntypedef VkFlags VkValidationCacheCreateFlagsEXT;\ntypedef struct VkValidationCacheCreateInfoEXT {\n    VkStructureType                    sType;\n    const void*                        pNext;\n    VkValidationCacheCreateFlagsEXT    flags;\n    size_t                             initialDataSize;\n    const void*                        pInitialData;\n} VkValidationCacheCreateInfoEXT;\n\ntypedef struct VkShaderModuleValidationCacheCreateInfoEXT {\n    VkStructureType         sType;\n    const void*             pNext;\n    VkValidationCacheEXT    validationCache;\n} VkShaderModuleValidationCacheCreateInfoEXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateValidationCacheEXT)(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache);\ntypedef void (VKAPI_PTR *PFN_vkDestroyValidationCacheEXT)(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkMergeValidationCachesEXT)(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetValidationCacheDataEXT)(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateValidationCacheEXT(\n    VkDevice                                    device,\n    const VkValidationCacheCreateInfoEXT*       pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkValidationCacheEXT*                       pValidationCache);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyValidationCacheEXT(\n    VkDevice                                    device,\n    VkValidationCacheEXT                        validationCache,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkMergeValidationCachesEXT(\n    VkDevice                                    device,\n    VkValidationCacheEXT                        dstCache,\n    uint32_t                                    srcCacheCount,\n    const VkValidationCacheEXT*                 pSrcCaches);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetValidationCacheDataEXT(\n    VkDevice                                    device,\n    VkValidationCacheEXT                        validationCache,\n    size_t*                                     pDataSize,\n    void*                                       pData);\n#endif\n\n\n#define VK_EXT_descriptor_indexing 1\n#define VK_EXT_DESCRIPTOR_INDEXING_SPEC_VERSION 2\n#define VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME \"VK_EXT_descriptor_indexing\"\ntypedef VkDescriptorBindingFlagBits VkDescriptorBindingFlagBitsEXT;\n\ntypedef VkDescriptorBindingFlags VkDescriptorBindingFlagsEXT;\n\ntypedef VkDescriptorSetLayoutBindingFlagsCreateInfo VkDescriptorSetLayoutBindingFlagsCreateInfoEXT;\n\ntypedef VkPhysicalDeviceDescriptorIndexingFeatures VkPhysicalDeviceDescriptorIndexingFeaturesEXT;\n\ntypedef VkPhysicalDeviceDescriptorIndexingProperties VkPhysicalDeviceDescriptorIndexingPropertiesEXT;\n\ntypedef VkDescriptorSetVariableDescriptorCountAllocateInfo VkDescriptorSetVariableDescriptorCountAllocateInfoEXT;\n\ntypedef VkDescriptorSetVariableDescriptorCountLayoutSupport VkDescriptorSetVariableDescriptorCountLayoutSupportEXT;\n\n\n\n#define VK_EXT_shader_viewport_index_layer 1\n#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION 1\n#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME \"VK_EXT_shader_viewport_index_layer\"\n\n\n#define VK_NV_shading_rate_image 1\n#define VK_NV_SHADING_RATE_IMAGE_SPEC_VERSION 3\n#define VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME \"VK_NV_shading_rate_image\"\n\ntypedef enum VkShadingRatePaletteEntryNV {\n    VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV = 0,\n    VK_SHADING_RATE_PALETTE_ENTRY_16_INVOCATIONS_PER_PIXEL_NV = 1,\n    VK_SHADING_RATE_PALETTE_ENTRY_8_INVOCATIONS_PER_PIXEL_NV = 2,\n    VK_SHADING_RATE_PALETTE_ENTRY_4_INVOCATIONS_PER_PIXEL_NV = 3,\n    VK_SHADING_RATE_PALETTE_ENTRY_2_INVOCATIONS_PER_PIXEL_NV = 4,\n    VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV = 5,\n    VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X1_PIXELS_NV = 6,\n    VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV = 7,\n    VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X2_PIXELS_NV = 8,\n    VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X2_PIXELS_NV = 9,\n    VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X4_PIXELS_NV = 10,\n    VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV = 11,\n    VK_SHADING_RATE_PALETTE_ENTRY_MAX_ENUM_NV = 0x7FFFFFFF\n} VkShadingRatePaletteEntryNV;\n\ntypedef enum VkCoarseSampleOrderTypeNV {\n    VK_COARSE_SAMPLE_ORDER_TYPE_DEFAULT_NV = 0,\n    VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV = 1,\n    VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV = 2,\n    VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV = 3,\n    VK_COARSE_SAMPLE_ORDER_TYPE_MAX_ENUM_NV = 0x7FFFFFFF\n} VkCoarseSampleOrderTypeNV;\ntypedef struct VkShadingRatePaletteNV {\n    uint32_t                              shadingRatePaletteEntryCount;\n    const VkShadingRatePaletteEntryNV*    pShadingRatePaletteEntries;\n} VkShadingRatePaletteNV;\n\ntypedef struct VkPipelineViewportShadingRateImageStateCreateInfoNV {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkBool32                         shadingRateImageEnable;\n    uint32_t                         viewportCount;\n    const VkShadingRatePaletteNV*    pShadingRatePalettes;\n} VkPipelineViewportShadingRateImageStateCreateInfoNV;\n\ntypedef struct VkPhysicalDeviceShadingRateImageFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shadingRateImage;\n    VkBool32           shadingRateCoarseSampleOrder;\n} VkPhysicalDeviceShadingRateImageFeaturesNV;\n\ntypedef struct VkPhysicalDeviceShadingRateImagePropertiesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkExtent2D         shadingRateTexelSize;\n    uint32_t           shadingRatePaletteSize;\n    uint32_t           shadingRateMaxCoarseSamples;\n} VkPhysicalDeviceShadingRateImagePropertiesNV;\n\ntypedef struct VkCoarseSampleLocationNV {\n    uint32_t    pixelX;\n    uint32_t    pixelY;\n    uint32_t    sample;\n} VkCoarseSampleLocationNV;\n\ntypedef struct VkCoarseSampleOrderCustomNV {\n    VkShadingRatePaletteEntryNV        shadingRate;\n    uint32_t                           sampleCount;\n    uint32_t                           sampleLocationCount;\n    const VkCoarseSampleLocationNV*    pSampleLocations;\n} VkCoarseSampleOrderCustomNV;\n\ntypedef struct VkPipelineViewportCoarseSampleOrderStateCreateInfoNV {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkCoarseSampleOrderTypeNV             sampleOrderType;\n    uint32_t                              customSampleOrderCount;\n    const VkCoarseSampleOrderCustomNV*    pCustomSampleOrders;\n} VkPipelineViewportCoarseSampleOrderStateCreateInfoNV;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdBindShadingRateImageNV)(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetViewportShadingRatePaletteNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetCoarseSampleOrderNV)(VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdBindShadingRateImageNV(\n    VkCommandBuffer                             commandBuffer,\n    VkImageView                                 imageView,\n    VkImageLayout                               imageLayout);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetViewportShadingRatePaletteNV(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    firstViewport,\n    uint32_t                                    viewportCount,\n    const VkShadingRatePaletteNV*               pShadingRatePalettes);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetCoarseSampleOrderNV(\n    VkCommandBuffer                             commandBuffer,\n    VkCoarseSampleOrderTypeNV                   sampleOrderType,\n    uint32_t                                    customSampleOrderCount,\n    const VkCoarseSampleOrderCustomNV*          pCustomSampleOrders);\n#endif\n\n\n#define VK_NV_ray_tracing 1\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNV)\n#define VK_NV_RAY_TRACING_SPEC_VERSION    3\n#define VK_NV_RAY_TRACING_EXTENSION_NAME  \"VK_NV_ray_tracing\"\n#define VK_SHADER_UNUSED_KHR              (~0U)\n#define VK_SHADER_UNUSED_NV               VK_SHADER_UNUSED_KHR\n\ntypedef enum VkRayTracingShaderGroupTypeKHR {\n    VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR = 0,\n    VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR = 1,\n    VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR = 2,\n    VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR,\n    VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR,\n    VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR,\n    VK_RAY_TRACING_SHADER_GROUP_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkRayTracingShaderGroupTypeKHR;\ntypedef VkRayTracingShaderGroupTypeKHR VkRayTracingShaderGroupTypeNV;\n\n\ntypedef enum VkGeometryTypeKHR {\n    VK_GEOMETRY_TYPE_TRIANGLES_KHR = 0,\n    VK_GEOMETRY_TYPE_AABBS_KHR = 1,\n    VK_GEOMETRY_TYPE_INSTANCES_KHR = 2,\n    VK_GEOMETRY_TYPE_TRIANGLES_NV = VK_GEOMETRY_TYPE_TRIANGLES_KHR,\n    VK_GEOMETRY_TYPE_AABBS_NV = VK_GEOMETRY_TYPE_AABBS_KHR,\n    VK_GEOMETRY_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkGeometryTypeKHR;\ntypedef VkGeometryTypeKHR VkGeometryTypeNV;\n\n\ntypedef enum VkAccelerationStructureTypeKHR {\n    VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR = 0,\n    VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR = 1,\n    VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR = 2,\n    VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR,\n    VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR,\n    VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkAccelerationStructureTypeKHR;\ntypedef VkAccelerationStructureTypeKHR VkAccelerationStructureTypeNV;\n\n\ntypedef enum VkCopyAccelerationStructureModeKHR {\n    VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR = 0,\n    VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR = 1,\n    VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR = 2,\n    VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR = 3,\n    VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR,\n    VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR,\n    VK_COPY_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkCopyAccelerationStructureModeKHR;\ntypedef VkCopyAccelerationStructureModeKHR VkCopyAccelerationStructureModeNV;\n\n\ntypedef enum VkAccelerationStructureMemoryRequirementsTypeNV {\n    VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = 0,\n    VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = 1,\n    VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = 2,\n    VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_MAX_ENUM_NV = 0x7FFFFFFF\n} VkAccelerationStructureMemoryRequirementsTypeNV;\n\ntypedef enum VkGeometryFlagBitsKHR {\n    VK_GEOMETRY_OPAQUE_BIT_KHR = 0x00000001,\n    VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR = 0x00000002,\n    VK_GEOMETRY_OPAQUE_BIT_NV = VK_GEOMETRY_OPAQUE_BIT_KHR,\n    VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV = VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR,\n    VK_GEOMETRY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkGeometryFlagBitsKHR;\ntypedef VkFlags VkGeometryFlagsKHR;\ntypedef VkGeometryFlagsKHR VkGeometryFlagsNV;\n\ntypedef VkGeometryFlagBitsKHR VkGeometryFlagBitsNV;\n\n\ntypedef enum VkGeometryInstanceFlagBitsKHR {\n    VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR = 0x00000001,\n    VK_GEOMETRY_INSTANCE_TRIANGLE_FLIP_FACING_BIT_KHR = 0x00000002,\n    VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR = 0x00000004,\n    VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR = 0x00000008,\n    VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR = VK_GEOMETRY_INSTANCE_TRIANGLE_FLIP_FACING_BIT_KHR,\n    VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR,\n    VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR,\n    VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR,\n    VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV = VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR,\n    VK_GEOMETRY_INSTANCE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkGeometryInstanceFlagBitsKHR;\ntypedef VkFlags VkGeometryInstanceFlagsKHR;\ntypedef VkGeometryInstanceFlagsKHR VkGeometryInstanceFlagsNV;\n\ntypedef VkGeometryInstanceFlagBitsKHR VkGeometryInstanceFlagBitsNV;\n\n\ntypedef enum VkBuildAccelerationStructureFlagBitsKHR {\n    VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR = 0x00000001,\n    VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR = 0x00000002,\n    VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR = 0x00000004,\n    VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR = 0x00000008,\n    VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR = 0x00000010,\n    VK_BUILD_ACCELERATION_STRUCTURE_MOTION_BIT_NV = 0x00000020,\n    VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR,\n    VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR,\n    VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR,\n    VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR,\n    VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR,\n    VK_BUILD_ACCELERATION_STRUCTURE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkBuildAccelerationStructureFlagBitsKHR;\ntypedef VkFlags VkBuildAccelerationStructureFlagsKHR;\ntypedef VkBuildAccelerationStructureFlagsKHR VkBuildAccelerationStructureFlagsNV;\n\ntypedef VkBuildAccelerationStructureFlagBitsKHR VkBuildAccelerationStructureFlagBitsNV;\n\ntypedef struct VkRayTracingShaderGroupCreateInfoNV {\n    VkStructureType                   sType;\n    const void*                       pNext;\n    VkRayTracingShaderGroupTypeKHR    type;\n    uint32_t                          generalShader;\n    uint32_t                          closestHitShader;\n    uint32_t                          anyHitShader;\n    uint32_t                          intersectionShader;\n} VkRayTracingShaderGroupCreateInfoNV;\n\ntypedef struct VkRayTracingPipelineCreateInfoNV {\n    VkStructureType                               sType;\n    const void*                                   pNext;\n    VkPipelineCreateFlags                         flags;\n    uint32_t                                      stageCount;\n    const VkPipelineShaderStageCreateInfo*        pStages;\n    uint32_t                                      groupCount;\n    const VkRayTracingShaderGroupCreateInfoNV*    pGroups;\n    uint32_t                                      maxRecursionDepth;\n    VkPipelineLayout                              layout;\n    VkPipeline                                    basePipelineHandle;\n    int32_t                                       basePipelineIndex;\n} VkRayTracingPipelineCreateInfoNV;\n\ntypedef struct VkGeometryTrianglesNV {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBuffer           vertexData;\n    VkDeviceSize       vertexOffset;\n    uint32_t           vertexCount;\n    VkDeviceSize       vertexStride;\n    VkFormat           vertexFormat;\n    VkBuffer           indexData;\n    VkDeviceSize       indexOffset;\n    uint32_t           indexCount;\n    VkIndexType        indexType;\n    VkBuffer           transformData;\n    VkDeviceSize       transformOffset;\n} VkGeometryTrianglesNV;\n\ntypedef struct VkGeometryAABBNV {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBuffer           aabbData;\n    uint32_t           numAABBs;\n    uint32_t           stride;\n    VkDeviceSize       offset;\n} VkGeometryAABBNV;\n\ntypedef struct VkGeometryDataNV {\n    VkGeometryTrianglesNV    triangles;\n    VkGeometryAABBNV         aabbs;\n} VkGeometryDataNV;\n\ntypedef struct VkGeometryNV {\n    VkStructureType       sType;\n    const void*           pNext;\n    VkGeometryTypeKHR     geometryType;\n    VkGeometryDataNV      geometry;\n    VkGeometryFlagsKHR    flags;\n} VkGeometryNV;\n\ntypedef struct VkAccelerationStructureInfoNV {\n    VkStructureType                        sType;\n    const void*                            pNext;\n    VkAccelerationStructureTypeNV          type;\n    VkBuildAccelerationStructureFlagsNV    flags;\n    uint32_t                               instanceCount;\n    uint32_t                               geometryCount;\n    const VkGeometryNV*                    pGeometries;\n} VkAccelerationStructureInfoNV;\n\ntypedef struct VkAccelerationStructureCreateInfoNV {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkDeviceSize                     compactedSize;\n    VkAccelerationStructureInfoNV    info;\n} VkAccelerationStructureCreateInfoNV;\n\ntypedef struct VkBindAccelerationStructureMemoryInfoNV {\n    VkStructureType              sType;\n    const void*                  pNext;\n    VkAccelerationStructureNV    accelerationStructure;\n    VkDeviceMemory               memory;\n    VkDeviceSize                 memoryOffset;\n    uint32_t                     deviceIndexCount;\n    const uint32_t*              pDeviceIndices;\n} VkBindAccelerationStructureMemoryInfoNV;\n\ntypedef struct VkWriteDescriptorSetAccelerationStructureNV {\n    VkStructureType                     sType;\n    const void*                         pNext;\n    uint32_t                            accelerationStructureCount;\n    const VkAccelerationStructureNV*    pAccelerationStructures;\n} VkWriteDescriptorSetAccelerationStructureNV;\n\ntypedef struct VkAccelerationStructureMemoryRequirementsInfoNV {\n    VkStructureType                                    sType;\n    const void*                                        pNext;\n    VkAccelerationStructureMemoryRequirementsTypeNV    type;\n    VkAccelerationStructureNV                          accelerationStructure;\n} VkAccelerationStructureMemoryRequirementsInfoNV;\n\ntypedef struct VkPhysicalDeviceRayTracingPropertiesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           shaderGroupHandleSize;\n    uint32_t           maxRecursionDepth;\n    uint32_t           maxShaderGroupStride;\n    uint32_t           shaderGroupBaseAlignment;\n    uint64_t           maxGeometryCount;\n    uint64_t           maxInstanceCount;\n    uint64_t           maxTriangleCount;\n    uint32_t           maxDescriptorSetAccelerationStructures;\n} VkPhysicalDeviceRayTracingPropertiesNV;\n\ntypedef struct VkTransformMatrixKHR {\n    float    matrix[3][4];\n} VkTransformMatrixKHR;\n\ntypedef VkTransformMatrixKHR VkTransformMatrixNV;\n\ntypedef struct VkAabbPositionsKHR {\n    float    minX;\n    float    minY;\n    float    minZ;\n    float    maxX;\n    float    maxY;\n    float    maxZ;\n} VkAabbPositionsKHR;\n\ntypedef VkAabbPositionsKHR VkAabbPositionsNV;\n\ntypedef struct VkAccelerationStructureInstanceKHR {\n    VkTransformMatrixKHR          transform;\n    uint32_t                      instanceCustomIndex:24;\n    uint32_t                      mask:8;\n    uint32_t                      instanceShaderBindingTableRecordOffset:24;\n    VkGeometryInstanceFlagsKHR    flags:8;\n    uint64_t                      accelerationStructureReference;\n} VkAccelerationStructureInstanceKHR;\n\ntypedef VkAccelerationStructureInstanceKHR VkAccelerationStructureInstanceNV;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureNV)(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure);\ntypedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator);\ntypedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsNV)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements);\ntypedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNV)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos);\ntypedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNV)(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNV)(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode);\ntypedef void (VKAPI_PTR *PFN_vkCmdTraceRaysNV)(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesNV)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesNV)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData);\ntypedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesNV)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery);\ntypedef VkResult (VKAPI_PTR *PFN_vkCompileDeferredNV)(VkDevice device, VkPipeline pipeline, uint32_t shader);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureNV(\n    VkDevice                                    device,\n    const VkAccelerationStructureCreateInfoNV*  pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkAccelerationStructureNV*                  pAccelerationStructure);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureNV(\n    VkDevice                                    device,\n    VkAccelerationStructureNV                   accelerationStructure,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNV(\n    VkDevice                                    device,\n    const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo,\n    VkMemoryRequirements2KHR*                   pMemoryRequirements);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryNV(\n    VkDevice                                    device,\n    uint32_t                                    bindInfoCount,\n    const VkBindAccelerationStructureMemoryInfoNV* pBindInfos);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNV(\n    VkCommandBuffer                             commandBuffer,\n    const VkAccelerationStructureInfoNV*        pInfo,\n    VkBuffer                                    instanceData,\n    VkDeviceSize                                instanceOffset,\n    VkBool32                                    update,\n    VkAccelerationStructureNV                   dst,\n    VkAccelerationStructureNV                   src,\n    VkBuffer                                    scratch,\n    VkDeviceSize                                scratchOffset);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureNV(\n    VkCommandBuffer                             commandBuffer,\n    VkAccelerationStructureNV                   dst,\n    VkAccelerationStructureNV                   src,\n    VkCopyAccelerationStructureModeKHR          mode);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNV(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    raygenShaderBindingTableBuffer,\n    VkDeviceSize                                raygenShaderBindingOffset,\n    VkBuffer                                    missShaderBindingTableBuffer,\n    VkDeviceSize                                missShaderBindingOffset,\n    VkDeviceSize                                missShaderBindingStride,\n    VkBuffer                                    hitShaderBindingTableBuffer,\n    VkDeviceSize                                hitShaderBindingOffset,\n    VkDeviceSize                                hitShaderBindingStride,\n    VkBuffer                                    callableShaderBindingTableBuffer,\n    VkDeviceSize                                callableShaderBindingOffset,\n    VkDeviceSize                                callableShaderBindingStride,\n    uint32_t                                    width,\n    uint32_t                                    height,\n    uint32_t                                    depth);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesNV(\n    VkDevice                                    device,\n    VkPipelineCache                             pipelineCache,\n    uint32_t                                    createInfoCount,\n    const VkRayTracingPipelineCreateInfoNV*     pCreateInfos,\n    const VkAllocationCallbacks*                pAllocator,\n    VkPipeline*                                 pPipelines);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesKHR(\n    VkDevice                                    device,\n    VkPipeline                                  pipeline,\n    uint32_t                                    firstGroup,\n    uint32_t                                    groupCount,\n    size_t                                      dataSize,\n    void*                                       pData);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesNV(\n    VkDevice                                    device,\n    VkPipeline                                  pipeline,\n    uint32_t                                    firstGroup,\n    uint32_t                                    groupCount,\n    size_t                                      dataSize,\n    void*                                       pData);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureHandleNV(\n    VkDevice                                    device,\n    VkAccelerationStructureNV                   accelerationStructure,\n    size_t                                      dataSize,\n    void*                                       pData);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesNV(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    accelerationStructureCount,\n    const VkAccelerationStructureNV*            pAccelerationStructures,\n    VkQueryType                                 queryType,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    firstQuery);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCompileDeferredNV(\n    VkDevice                                    device,\n    VkPipeline                                  pipeline,\n    uint32_t                                    shader);\n#endif\n\n\n#define VK_NV_representative_fragment_test 1\n#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_SPEC_VERSION 2\n#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_EXTENSION_NAME \"VK_NV_representative_fragment_test\"\ntypedef struct VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           representativeFragmentTest;\n} VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV;\n\ntypedef struct VkPipelineRepresentativeFragmentTestStateCreateInfoNV {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBool32           representativeFragmentTestEnable;\n} VkPipelineRepresentativeFragmentTestStateCreateInfoNV;\n\n\n\n#define VK_EXT_filter_cubic 1\n#define VK_EXT_FILTER_CUBIC_SPEC_VERSION  3\n#define VK_EXT_FILTER_CUBIC_EXTENSION_NAME \"VK_EXT_filter_cubic\"\ntypedef struct VkPhysicalDeviceImageViewImageFormatInfoEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkImageViewType    imageViewType;\n} VkPhysicalDeviceImageViewImageFormatInfoEXT;\n\ntypedef struct VkFilterCubicImageViewImageFormatPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           filterCubic;\n    VkBool32           filterCubicMinmax;\n} VkFilterCubicImageViewImageFormatPropertiesEXT;\n\n\n\n#define VK_QCOM_render_pass_shader_resolve 1\n#define VK_QCOM_RENDER_PASS_SHADER_RESOLVE_SPEC_VERSION 4\n#define VK_QCOM_RENDER_PASS_SHADER_RESOLVE_EXTENSION_NAME \"VK_QCOM_render_pass_shader_resolve\"\n\n\n#define VK_EXT_global_priority 1\n#define VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION 2\n#define VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME \"VK_EXT_global_priority\"\ntypedef VkQueueGlobalPriorityKHR VkQueueGlobalPriorityEXT;\n\ntypedef VkDeviceQueueGlobalPriorityCreateInfoKHR VkDeviceQueueGlobalPriorityCreateInfoEXT;\n\n\n\n#define VK_EXT_external_memory_host 1\n#define VK_EXT_EXTERNAL_MEMORY_HOST_SPEC_VERSION 1\n#define VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME \"VK_EXT_external_memory_host\"\ntypedef struct VkImportMemoryHostPointerInfoEXT {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkExternalMemoryHandleTypeFlagBits    handleType;\n    void*                                 pHostPointer;\n} VkImportMemoryHostPointerInfoEXT;\n\ntypedef struct VkMemoryHostPointerPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           memoryTypeBits;\n} VkMemoryHostPointerPropertiesEXT;\n\ntypedef struct VkPhysicalDeviceExternalMemoryHostPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkDeviceSize       minImportedHostPointerAlignment;\n} VkPhysicalDeviceExternalMemoryHostPropertiesEXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetMemoryHostPointerPropertiesEXT)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryHostPointerPropertiesEXT(\n    VkDevice                                    device,\n    VkExternalMemoryHandleTypeFlagBits          handleType,\n    const void*                                 pHostPointer,\n    VkMemoryHostPointerPropertiesEXT*           pMemoryHostPointerProperties);\n#endif\n\n\n#define VK_AMD_buffer_marker 1\n#define VK_AMD_BUFFER_MARKER_SPEC_VERSION 1\n#define VK_AMD_BUFFER_MARKER_EXTENSION_NAME \"VK_AMD_buffer_marker\"\ntypedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarkerAMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarkerAMD(\n    VkCommandBuffer                             commandBuffer,\n    VkPipelineStageFlagBits                     pipelineStage,\n    VkBuffer                                    dstBuffer,\n    VkDeviceSize                                dstOffset,\n    uint32_t                                    marker);\n#endif\n\n\n#define VK_AMD_pipeline_compiler_control 1\n#define VK_AMD_PIPELINE_COMPILER_CONTROL_SPEC_VERSION 1\n#define VK_AMD_PIPELINE_COMPILER_CONTROL_EXTENSION_NAME \"VK_AMD_pipeline_compiler_control\"\n\ntypedef enum VkPipelineCompilerControlFlagBitsAMD {\n    VK_PIPELINE_COMPILER_CONTROL_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF\n} VkPipelineCompilerControlFlagBitsAMD;\ntypedef VkFlags VkPipelineCompilerControlFlagsAMD;\ntypedef struct VkPipelineCompilerControlCreateInfoAMD {\n    VkStructureType                      sType;\n    const void*                          pNext;\n    VkPipelineCompilerControlFlagsAMD    compilerControlFlags;\n} VkPipelineCompilerControlCreateInfoAMD;\n\n\n\n#define VK_EXT_calibrated_timestamps 1\n#define VK_EXT_CALIBRATED_TIMESTAMPS_SPEC_VERSION 2\n#define VK_EXT_CALIBRATED_TIMESTAMPS_EXTENSION_NAME \"VK_EXT_calibrated_timestamps\"\n\ntypedef enum VkTimeDomainEXT {\n    VK_TIME_DOMAIN_DEVICE_EXT = 0,\n    VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT = 1,\n    VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT = 2,\n    VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT = 3,\n    VK_TIME_DOMAIN_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkTimeDomainEXT;\ntypedef struct VkCalibratedTimestampInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkTimeDomainEXT    timeDomain;\n} VkCalibratedTimestampInfoEXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT)(VkPhysicalDevice physicalDevice, uint32_t* pTimeDomainCount, VkTimeDomainEXT* pTimeDomains);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetCalibratedTimestampsEXT)(VkDevice device, uint32_t timestampCount, const VkCalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pTimeDomainCount,\n    VkTimeDomainEXT*                            pTimeDomains);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetCalibratedTimestampsEXT(\n    VkDevice                                    device,\n    uint32_t                                    timestampCount,\n    const VkCalibratedTimestampInfoEXT*         pTimestampInfos,\n    uint64_t*                                   pTimestamps,\n    uint64_t*                                   pMaxDeviation);\n#endif\n\n\n#define VK_AMD_shader_core_properties 1\n#define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 2\n#define VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME \"VK_AMD_shader_core_properties\"\ntypedef struct VkPhysicalDeviceShaderCorePropertiesAMD {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           shaderEngineCount;\n    uint32_t           shaderArraysPerEngineCount;\n    uint32_t           computeUnitsPerShaderArray;\n    uint32_t           simdPerComputeUnit;\n    uint32_t           wavefrontsPerSimd;\n    uint32_t           wavefrontSize;\n    uint32_t           sgprsPerSimd;\n    uint32_t           minSgprAllocation;\n    uint32_t           maxSgprAllocation;\n    uint32_t           sgprAllocationGranularity;\n    uint32_t           vgprsPerSimd;\n    uint32_t           minVgprAllocation;\n    uint32_t           maxVgprAllocation;\n    uint32_t           vgprAllocationGranularity;\n} VkPhysicalDeviceShaderCorePropertiesAMD;\n\n\n\n#define VK_AMD_memory_overallocation_behavior 1\n#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_SPEC_VERSION 1\n#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_EXTENSION_NAME \"VK_AMD_memory_overallocation_behavior\"\n\ntypedef enum VkMemoryOverallocationBehaviorAMD {\n    VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD = 0,\n    VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD = 1,\n    VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD = 2,\n    VK_MEMORY_OVERALLOCATION_BEHAVIOR_MAX_ENUM_AMD = 0x7FFFFFFF\n} VkMemoryOverallocationBehaviorAMD;\ntypedef struct VkDeviceMemoryOverallocationCreateInfoAMD {\n    VkStructureType                      sType;\n    const void*                          pNext;\n    VkMemoryOverallocationBehaviorAMD    overallocationBehavior;\n} VkDeviceMemoryOverallocationCreateInfoAMD;\n\n\n\n#define VK_EXT_vertex_attribute_divisor 1\n#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 3\n#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME \"VK_EXT_vertex_attribute_divisor\"\ntypedef struct VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxVertexAttribDivisor;\n} VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT;\n\ntypedef struct VkVertexInputBindingDivisorDescriptionEXT {\n    uint32_t    binding;\n    uint32_t    divisor;\n} VkVertexInputBindingDivisorDescriptionEXT;\n\ntypedef struct VkPipelineVertexInputDivisorStateCreateInfoEXT {\n    VkStructureType                                     sType;\n    const void*                                         pNext;\n    uint32_t                                            vertexBindingDivisorCount;\n    const VkVertexInputBindingDivisorDescriptionEXT*    pVertexBindingDivisors;\n} VkPipelineVertexInputDivisorStateCreateInfoEXT;\n\ntypedef struct VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           vertexAttributeInstanceRateDivisor;\n    VkBool32           vertexAttributeInstanceRateZeroDivisor;\n} VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT;\n\n\n\n#define VK_EXT_pipeline_creation_feedback 1\n#define VK_EXT_PIPELINE_CREATION_FEEDBACK_SPEC_VERSION 1\n#define VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME \"VK_EXT_pipeline_creation_feedback\"\ntypedef VkPipelineCreationFeedbackFlagBits VkPipelineCreationFeedbackFlagBitsEXT;\n\ntypedef VkPipelineCreationFeedbackFlags VkPipelineCreationFeedbackFlagsEXT;\n\ntypedef VkPipelineCreationFeedbackCreateInfo VkPipelineCreationFeedbackCreateInfoEXT;\n\ntypedef VkPipelineCreationFeedback VkPipelineCreationFeedbackEXT;\n\n\n\n#define VK_NV_shader_subgroup_partitioned 1\n#define VK_NV_SHADER_SUBGROUP_PARTITIONED_SPEC_VERSION 1\n#define VK_NV_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME \"VK_NV_shader_subgroup_partitioned\"\n\n\n#define VK_NV_compute_shader_derivatives 1\n#define VK_NV_COMPUTE_SHADER_DERIVATIVES_SPEC_VERSION 1\n#define VK_NV_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME \"VK_NV_compute_shader_derivatives\"\ntypedef struct VkPhysicalDeviceComputeShaderDerivativesFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           computeDerivativeGroupQuads;\n    VkBool32           computeDerivativeGroupLinear;\n} VkPhysicalDeviceComputeShaderDerivativesFeaturesNV;\n\n\n\n#define VK_NV_mesh_shader 1\n#define VK_NV_MESH_SHADER_SPEC_VERSION    1\n#define VK_NV_MESH_SHADER_EXTENSION_NAME  \"VK_NV_mesh_shader\"\ntypedef struct VkPhysicalDeviceMeshShaderFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           taskShader;\n    VkBool32           meshShader;\n} VkPhysicalDeviceMeshShaderFeaturesNV;\n\ntypedef struct VkPhysicalDeviceMeshShaderPropertiesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxDrawMeshTasksCount;\n    uint32_t           maxTaskWorkGroupInvocations;\n    uint32_t           maxTaskWorkGroupSize[3];\n    uint32_t           maxTaskTotalMemorySize;\n    uint32_t           maxTaskOutputCount;\n    uint32_t           maxMeshWorkGroupInvocations;\n    uint32_t           maxMeshWorkGroupSize[3];\n    uint32_t           maxMeshTotalMemorySize;\n    uint32_t           maxMeshOutputVertices;\n    uint32_t           maxMeshOutputPrimitives;\n    uint32_t           maxMeshMultiviewViewCount;\n    uint32_t           meshOutputPerVertexGranularity;\n    uint32_t           meshOutputPerPrimitiveGranularity;\n} VkPhysicalDeviceMeshShaderPropertiesNV;\n\ntypedef struct VkDrawMeshTasksIndirectCommandNV {\n    uint32_t    taskCount;\n    uint32_t    firstTask;\n} VkDrawMeshTasksIndirectCommandNV;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksNV)(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask);\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectCountNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksNV(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    taskCount,\n    uint32_t                                    firstTask);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectNV(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    buffer,\n    VkDeviceSize                                offset,\n    uint32_t                                    drawCount,\n    uint32_t                                    stride);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectCountNV(\n    VkCommandBuffer                             commandBuffer,\n    VkBuffer                                    buffer,\n    VkDeviceSize                                offset,\n    VkBuffer                                    countBuffer,\n    VkDeviceSize                                countBufferOffset,\n    uint32_t                                    maxDrawCount,\n    uint32_t                                    stride);\n#endif\n\n\n#define VK_NV_fragment_shader_barycentric 1\n#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION 1\n#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME \"VK_NV_fragment_shader_barycentric\"\ntypedef VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV;\n\n\n\n#define VK_NV_shader_image_footprint 1\n#define VK_NV_SHADER_IMAGE_FOOTPRINT_SPEC_VERSION 2\n#define VK_NV_SHADER_IMAGE_FOOTPRINT_EXTENSION_NAME \"VK_NV_shader_image_footprint\"\ntypedef struct VkPhysicalDeviceShaderImageFootprintFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           imageFootprint;\n} VkPhysicalDeviceShaderImageFootprintFeaturesNV;\n\n\n\n#define VK_NV_scissor_exclusive 1\n#define VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION 1\n#define VK_NV_SCISSOR_EXCLUSIVE_EXTENSION_NAME \"VK_NV_scissor_exclusive\"\ntypedef struct VkPipelineViewportExclusiveScissorStateCreateInfoNV {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           exclusiveScissorCount;\n    const VkRect2D*    pExclusiveScissors;\n} VkPipelineViewportExclusiveScissorStateCreateInfoNV;\n\ntypedef struct VkPhysicalDeviceExclusiveScissorFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           exclusiveScissor;\n} VkPhysicalDeviceExclusiveScissorFeaturesNV;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdSetExclusiveScissorNV)(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdSetExclusiveScissorNV(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    firstExclusiveScissor,\n    uint32_t                                    exclusiveScissorCount,\n    const VkRect2D*                             pExclusiveScissors);\n#endif\n\n\n#define VK_NV_device_diagnostic_checkpoints 1\n#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_SPEC_VERSION 2\n#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME \"VK_NV_device_diagnostic_checkpoints\"\ntypedef struct VkQueueFamilyCheckpointPropertiesNV {\n    VkStructureType         sType;\n    void*                   pNext;\n    VkPipelineStageFlags    checkpointExecutionStageMask;\n} VkQueueFamilyCheckpointPropertiesNV;\n\ntypedef struct VkCheckpointDataNV {\n    VkStructureType            sType;\n    void*                      pNext;\n    VkPipelineStageFlagBits    stage;\n    void*                      pCheckpointMarker;\n} VkCheckpointDataNV;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdSetCheckpointNV)(VkCommandBuffer commandBuffer, const void* pCheckpointMarker);\ntypedef void (VKAPI_PTR *PFN_vkGetQueueCheckpointDataNV)(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointDataNV* pCheckpointData);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdSetCheckpointNV(\n    VkCommandBuffer                             commandBuffer,\n    const void*                                 pCheckpointMarker);\n\nVKAPI_ATTR void VKAPI_CALL vkGetQueueCheckpointDataNV(\n    VkQueue                                     queue,\n    uint32_t*                                   pCheckpointDataCount,\n    VkCheckpointDataNV*                         pCheckpointData);\n#endif\n\n\n#define VK_INTEL_shader_integer_functions2 1\n#define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_SPEC_VERSION 1\n#define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_EXTENSION_NAME \"VK_INTEL_shader_integer_functions2\"\ntypedef struct VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderIntegerFunctions2;\n} VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL;\n\n\n\n#define VK_INTEL_performance_query 1\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPerformanceConfigurationINTEL)\n#define VK_INTEL_PERFORMANCE_QUERY_SPEC_VERSION 2\n#define VK_INTEL_PERFORMANCE_QUERY_EXTENSION_NAME \"VK_INTEL_performance_query\"\n\ntypedef enum VkPerformanceConfigurationTypeINTEL {\n    VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL = 0,\n    VK_PERFORMANCE_CONFIGURATION_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF\n} VkPerformanceConfigurationTypeINTEL;\n\ntypedef enum VkQueryPoolSamplingModeINTEL {\n    VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL = 0,\n    VK_QUERY_POOL_SAMPLING_MODE_MAX_ENUM_INTEL = 0x7FFFFFFF\n} VkQueryPoolSamplingModeINTEL;\n\ntypedef enum VkPerformanceOverrideTypeINTEL {\n    VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL = 0,\n    VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL = 1,\n    VK_PERFORMANCE_OVERRIDE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF\n} VkPerformanceOverrideTypeINTEL;\n\ntypedef enum VkPerformanceParameterTypeINTEL {\n    VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL = 0,\n    VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL = 1,\n    VK_PERFORMANCE_PARAMETER_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF\n} VkPerformanceParameterTypeINTEL;\n\ntypedef enum VkPerformanceValueTypeINTEL {\n    VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL = 0,\n    VK_PERFORMANCE_VALUE_TYPE_UINT64_INTEL = 1,\n    VK_PERFORMANCE_VALUE_TYPE_FLOAT_INTEL = 2,\n    VK_PERFORMANCE_VALUE_TYPE_BOOL_INTEL = 3,\n    VK_PERFORMANCE_VALUE_TYPE_STRING_INTEL = 4,\n    VK_PERFORMANCE_VALUE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF\n} VkPerformanceValueTypeINTEL;\ntypedef union VkPerformanceValueDataINTEL {\n    uint32_t       value32;\n    uint64_t       value64;\n    float          valueFloat;\n    VkBool32       valueBool;\n    const char*    valueString;\n} VkPerformanceValueDataINTEL;\n\ntypedef struct VkPerformanceValueINTEL {\n    VkPerformanceValueTypeINTEL    type;\n    VkPerformanceValueDataINTEL    data;\n} VkPerformanceValueINTEL;\n\ntypedef struct VkInitializePerformanceApiInfoINTEL {\n    VkStructureType    sType;\n    const void*        pNext;\n    void*              pUserData;\n} VkInitializePerformanceApiInfoINTEL;\n\ntypedef struct VkQueryPoolPerformanceQueryCreateInfoINTEL {\n    VkStructureType                 sType;\n    const void*                     pNext;\n    VkQueryPoolSamplingModeINTEL    performanceCountersSampling;\n} VkQueryPoolPerformanceQueryCreateInfoINTEL;\n\ntypedef VkQueryPoolPerformanceQueryCreateInfoINTEL VkQueryPoolCreateInfoINTEL;\n\ntypedef struct VkPerformanceMarkerInfoINTEL {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint64_t           marker;\n} VkPerformanceMarkerInfoINTEL;\n\ntypedef struct VkPerformanceStreamMarkerInfoINTEL {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           marker;\n} VkPerformanceStreamMarkerInfoINTEL;\n\ntypedef struct VkPerformanceOverrideInfoINTEL {\n    VkStructureType                   sType;\n    const void*                       pNext;\n    VkPerformanceOverrideTypeINTEL    type;\n    VkBool32                          enable;\n    uint64_t                          parameter;\n} VkPerformanceOverrideInfoINTEL;\n\ntypedef struct VkPerformanceConfigurationAcquireInfoINTEL {\n    VkStructureType                        sType;\n    const void*                            pNext;\n    VkPerformanceConfigurationTypeINTEL    type;\n} VkPerformanceConfigurationAcquireInfoINTEL;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkInitializePerformanceApiINTEL)(VkDevice device, const VkInitializePerformanceApiInfoINTEL* pInitializeInfo);\ntypedef void (VKAPI_PTR *PFN_vkUninitializePerformanceApiINTEL)(VkDevice device);\ntypedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceMarkerInfoINTEL* pMarkerInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceStreamMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceOverrideINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceOverrideInfoINTEL* pOverrideInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkAcquirePerformanceConfigurationINTEL)(VkDevice device, const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VkPerformanceConfigurationINTEL* pConfiguration);\ntypedef VkResult (VKAPI_PTR *PFN_vkReleasePerformanceConfigurationINTEL)(VkDevice device, VkPerformanceConfigurationINTEL configuration);\ntypedef VkResult (VKAPI_PTR *PFN_vkQueueSetPerformanceConfigurationINTEL)(VkQueue queue, VkPerformanceConfigurationINTEL configuration);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPerformanceParameterINTEL)(VkDevice device, VkPerformanceParameterTypeINTEL parameter, VkPerformanceValueINTEL* pValue);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkInitializePerformanceApiINTEL(\n    VkDevice                                    device,\n    const VkInitializePerformanceApiInfoINTEL*  pInitializeInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkUninitializePerformanceApiINTEL(\n    VkDevice                                    device);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceMarkerINTEL(\n    VkCommandBuffer                             commandBuffer,\n    const VkPerformanceMarkerInfoINTEL*         pMarkerInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceStreamMarkerINTEL(\n    VkCommandBuffer                             commandBuffer,\n    const VkPerformanceStreamMarkerInfoINTEL*   pMarkerInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceOverrideINTEL(\n    VkCommandBuffer                             commandBuffer,\n    const VkPerformanceOverrideInfoINTEL*       pOverrideInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkAcquirePerformanceConfigurationINTEL(\n    VkDevice                                    device,\n    const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo,\n    VkPerformanceConfigurationINTEL*            pConfiguration);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkReleasePerformanceConfigurationINTEL(\n    VkDevice                                    device,\n    VkPerformanceConfigurationINTEL             configuration);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkQueueSetPerformanceConfigurationINTEL(\n    VkQueue                                     queue,\n    VkPerformanceConfigurationINTEL             configuration);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPerformanceParameterINTEL(\n    VkDevice                                    device,\n    VkPerformanceParameterTypeINTEL             parameter,\n    VkPerformanceValueINTEL*                    pValue);\n#endif\n\n\n#define VK_EXT_pci_bus_info 1\n#define VK_EXT_PCI_BUS_INFO_SPEC_VERSION  2\n#define VK_EXT_PCI_BUS_INFO_EXTENSION_NAME \"VK_EXT_pci_bus_info\"\ntypedef struct VkPhysicalDevicePCIBusInfoPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           pciDomain;\n    uint32_t           pciBus;\n    uint32_t           pciDevice;\n    uint32_t           pciFunction;\n} VkPhysicalDevicePCIBusInfoPropertiesEXT;\n\n\n\n#define VK_AMD_display_native_hdr 1\n#define VK_AMD_DISPLAY_NATIVE_HDR_SPEC_VERSION 1\n#define VK_AMD_DISPLAY_NATIVE_HDR_EXTENSION_NAME \"VK_AMD_display_native_hdr\"\ntypedef struct VkDisplayNativeHdrSurfaceCapabilitiesAMD {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           localDimmingSupport;\n} VkDisplayNativeHdrSurfaceCapabilitiesAMD;\n\ntypedef struct VkSwapchainDisplayNativeHdrCreateInfoAMD {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBool32           localDimmingEnable;\n} VkSwapchainDisplayNativeHdrCreateInfoAMD;\n\ntypedef void (VKAPI_PTR *PFN_vkSetLocalDimmingAMD)(VkDevice device, VkSwapchainKHR swapChain, VkBool32 localDimmingEnable);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkSetLocalDimmingAMD(\n    VkDevice                                    device,\n    VkSwapchainKHR                              swapChain,\n    VkBool32                                    localDimmingEnable);\n#endif\n\n\n#define VK_EXT_fragment_density_map 1\n#define VK_EXT_FRAGMENT_DENSITY_MAP_SPEC_VERSION 2\n#define VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME \"VK_EXT_fragment_density_map\"\ntypedef struct VkPhysicalDeviceFragmentDensityMapFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           fragmentDensityMap;\n    VkBool32           fragmentDensityMapDynamic;\n    VkBool32           fragmentDensityMapNonSubsampledImages;\n} VkPhysicalDeviceFragmentDensityMapFeaturesEXT;\n\ntypedef struct VkPhysicalDeviceFragmentDensityMapPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkExtent2D         minFragmentDensityTexelSize;\n    VkExtent2D         maxFragmentDensityTexelSize;\n    VkBool32           fragmentDensityInvocations;\n} VkPhysicalDeviceFragmentDensityMapPropertiesEXT;\n\ntypedef struct VkRenderPassFragmentDensityMapCreateInfoEXT {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkAttachmentReference    fragmentDensityMapAttachment;\n} VkRenderPassFragmentDensityMapCreateInfoEXT;\n\n\n\n#define VK_EXT_scalar_block_layout 1\n#define VK_EXT_SCALAR_BLOCK_LAYOUT_SPEC_VERSION 1\n#define VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME \"VK_EXT_scalar_block_layout\"\ntypedef VkPhysicalDeviceScalarBlockLayoutFeatures VkPhysicalDeviceScalarBlockLayoutFeaturesEXT;\n\n\n\n#define VK_GOOGLE_hlsl_functionality1 1\n#define VK_GOOGLE_HLSL_FUNCTIONALITY_1_SPEC_VERSION 1\n#define VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME \"VK_GOOGLE_hlsl_functionality1\"\n#define VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION VK_GOOGLE_HLSL_FUNCTIONALITY_1_SPEC_VERSION\n#define VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME\n\n\n#define VK_GOOGLE_decorate_string 1\n#define VK_GOOGLE_DECORATE_STRING_SPEC_VERSION 1\n#define VK_GOOGLE_DECORATE_STRING_EXTENSION_NAME \"VK_GOOGLE_decorate_string\"\n\n\n#define VK_EXT_subgroup_size_control 1\n#define VK_EXT_SUBGROUP_SIZE_CONTROL_SPEC_VERSION 2\n#define VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME \"VK_EXT_subgroup_size_control\"\ntypedef VkPhysicalDeviceSubgroupSizeControlFeatures VkPhysicalDeviceSubgroupSizeControlFeaturesEXT;\n\ntypedef VkPhysicalDeviceSubgroupSizeControlProperties VkPhysicalDeviceSubgroupSizeControlPropertiesEXT;\n\ntypedef VkPipelineShaderStageRequiredSubgroupSizeCreateInfo VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT;\n\n\n\n#define VK_AMD_shader_core_properties2 1\n#define VK_AMD_SHADER_CORE_PROPERTIES_2_SPEC_VERSION 1\n#define VK_AMD_SHADER_CORE_PROPERTIES_2_EXTENSION_NAME \"VK_AMD_shader_core_properties2\"\n\ntypedef enum VkShaderCorePropertiesFlagBitsAMD {\n    VK_SHADER_CORE_PROPERTIES_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF\n} VkShaderCorePropertiesFlagBitsAMD;\ntypedef VkFlags VkShaderCorePropertiesFlagsAMD;\ntypedef struct VkPhysicalDeviceShaderCoreProperties2AMD {\n    VkStructureType                   sType;\n    void*                             pNext;\n    VkShaderCorePropertiesFlagsAMD    shaderCoreFeatures;\n    uint32_t                          activeComputeUnitCount;\n} VkPhysicalDeviceShaderCoreProperties2AMD;\n\n\n\n#define VK_AMD_device_coherent_memory 1\n#define VK_AMD_DEVICE_COHERENT_MEMORY_SPEC_VERSION 1\n#define VK_AMD_DEVICE_COHERENT_MEMORY_EXTENSION_NAME \"VK_AMD_device_coherent_memory\"\ntypedef struct VkPhysicalDeviceCoherentMemoryFeaturesAMD {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           deviceCoherentMemory;\n} VkPhysicalDeviceCoherentMemoryFeaturesAMD;\n\n\n\n#define VK_EXT_shader_image_atomic_int64 1\n#define VK_EXT_SHADER_IMAGE_ATOMIC_INT64_SPEC_VERSION 1\n#define VK_EXT_SHADER_IMAGE_ATOMIC_INT64_EXTENSION_NAME \"VK_EXT_shader_image_atomic_int64\"\ntypedef struct VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderImageInt64Atomics;\n    VkBool32           sparseImageInt64Atomics;\n} VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT;\n\n\n\n#define VK_EXT_memory_budget 1\n#define VK_EXT_MEMORY_BUDGET_SPEC_VERSION 1\n#define VK_EXT_MEMORY_BUDGET_EXTENSION_NAME \"VK_EXT_memory_budget\"\ntypedef struct VkPhysicalDeviceMemoryBudgetPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkDeviceSize       heapBudget[VK_MAX_MEMORY_HEAPS];\n    VkDeviceSize       heapUsage[VK_MAX_MEMORY_HEAPS];\n} VkPhysicalDeviceMemoryBudgetPropertiesEXT;\n\n\n\n#define VK_EXT_memory_priority 1\n#define VK_EXT_MEMORY_PRIORITY_SPEC_VERSION 1\n#define VK_EXT_MEMORY_PRIORITY_EXTENSION_NAME \"VK_EXT_memory_priority\"\ntypedef struct VkPhysicalDeviceMemoryPriorityFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           memoryPriority;\n} VkPhysicalDeviceMemoryPriorityFeaturesEXT;\n\ntypedef struct VkMemoryPriorityAllocateInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    float              priority;\n} VkMemoryPriorityAllocateInfoEXT;\n\n\n\n#define VK_NV_dedicated_allocation_image_aliasing 1\n#define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_SPEC_VERSION 1\n#define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_EXTENSION_NAME \"VK_NV_dedicated_allocation_image_aliasing\"\ntypedef struct VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           dedicatedAllocationImageAliasing;\n} VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV;\n\n\n\n#define VK_EXT_buffer_device_address 1\n#define VK_EXT_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 2\n#define VK_EXT_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME \"VK_EXT_buffer_device_address\"\ntypedef struct VkPhysicalDeviceBufferDeviceAddressFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           bufferDeviceAddress;\n    VkBool32           bufferDeviceAddressCaptureReplay;\n    VkBool32           bufferDeviceAddressMultiDevice;\n} VkPhysicalDeviceBufferDeviceAddressFeaturesEXT;\n\ntypedef VkPhysicalDeviceBufferDeviceAddressFeaturesEXT VkPhysicalDeviceBufferAddressFeaturesEXT;\n\ntypedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoEXT;\n\ntypedef struct VkBufferDeviceAddressCreateInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkDeviceAddress    deviceAddress;\n} VkBufferDeviceAddressCreateInfoEXT;\n\ntypedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressEXT)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressEXT(\n    VkDevice                                    device,\n    const VkBufferDeviceAddressInfo*            pInfo);\n#endif\n\n\n#define VK_EXT_tooling_info 1\n#define VK_EXT_TOOLING_INFO_SPEC_VERSION  1\n#define VK_EXT_TOOLING_INFO_EXTENSION_NAME \"VK_EXT_tooling_info\"\ntypedef VkToolPurposeFlagBits VkToolPurposeFlagBitsEXT;\n\ntypedef VkToolPurposeFlags VkToolPurposeFlagsEXT;\n\ntypedef VkPhysicalDeviceToolProperties VkPhysicalDeviceToolPropertiesEXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceToolPropertiesEXT)(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolProperties* pToolProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceToolPropertiesEXT(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pToolCount,\n    VkPhysicalDeviceToolProperties*             pToolProperties);\n#endif\n\n\n#define VK_EXT_separate_stencil_usage 1\n#define VK_EXT_SEPARATE_STENCIL_USAGE_SPEC_VERSION 1\n#define VK_EXT_SEPARATE_STENCIL_USAGE_EXTENSION_NAME \"VK_EXT_separate_stencil_usage\"\ntypedef VkImageStencilUsageCreateInfo VkImageStencilUsageCreateInfoEXT;\n\n\n\n#define VK_EXT_validation_features 1\n#define VK_EXT_VALIDATION_FEATURES_SPEC_VERSION 5\n#define VK_EXT_VALIDATION_FEATURES_EXTENSION_NAME \"VK_EXT_validation_features\"\n\ntypedef enum VkValidationFeatureEnableEXT {\n    VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT = 0,\n    VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT = 1,\n    VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT = 2,\n    VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT = 3,\n    VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT = 4,\n    VK_VALIDATION_FEATURE_ENABLE_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkValidationFeatureEnableEXT;\n\ntypedef enum VkValidationFeatureDisableEXT {\n    VK_VALIDATION_FEATURE_DISABLE_ALL_EXT = 0,\n    VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT = 1,\n    VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT = 2,\n    VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT = 3,\n    VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT = 4,\n    VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT = 5,\n    VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT = 6,\n    VK_VALIDATION_FEATURE_DISABLE_SHADER_VALIDATION_CACHE_EXT = 7,\n    VK_VALIDATION_FEATURE_DISABLE_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkValidationFeatureDisableEXT;\ntypedef struct VkValidationFeaturesEXT {\n    VkStructureType                         sType;\n    const void*                             pNext;\n    uint32_t                                enabledValidationFeatureCount;\n    const VkValidationFeatureEnableEXT*     pEnabledValidationFeatures;\n    uint32_t                                disabledValidationFeatureCount;\n    const VkValidationFeatureDisableEXT*    pDisabledValidationFeatures;\n} VkValidationFeaturesEXT;\n\n\n\n#define VK_NV_cooperative_matrix 1\n#define VK_NV_COOPERATIVE_MATRIX_SPEC_VERSION 1\n#define VK_NV_COOPERATIVE_MATRIX_EXTENSION_NAME \"VK_NV_cooperative_matrix\"\n\ntypedef enum VkComponentTypeNV {\n    VK_COMPONENT_TYPE_FLOAT16_NV = 0,\n    VK_COMPONENT_TYPE_FLOAT32_NV = 1,\n    VK_COMPONENT_TYPE_FLOAT64_NV = 2,\n    VK_COMPONENT_TYPE_SINT8_NV = 3,\n    VK_COMPONENT_TYPE_SINT16_NV = 4,\n    VK_COMPONENT_TYPE_SINT32_NV = 5,\n    VK_COMPONENT_TYPE_SINT64_NV = 6,\n    VK_COMPONENT_TYPE_UINT8_NV = 7,\n    VK_COMPONENT_TYPE_UINT16_NV = 8,\n    VK_COMPONENT_TYPE_UINT32_NV = 9,\n    VK_COMPONENT_TYPE_UINT64_NV = 10,\n    VK_COMPONENT_TYPE_MAX_ENUM_NV = 0x7FFFFFFF\n} VkComponentTypeNV;\n\ntypedef enum VkScopeNV {\n    VK_SCOPE_DEVICE_NV = 1,\n    VK_SCOPE_WORKGROUP_NV = 2,\n    VK_SCOPE_SUBGROUP_NV = 3,\n    VK_SCOPE_QUEUE_FAMILY_NV = 5,\n    VK_SCOPE_MAX_ENUM_NV = 0x7FFFFFFF\n} VkScopeNV;\ntypedef struct VkCooperativeMatrixPropertiesNV {\n    VkStructureType      sType;\n    void*                pNext;\n    uint32_t             MSize;\n    uint32_t             NSize;\n    uint32_t             KSize;\n    VkComponentTypeNV    AType;\n    VkComponentTypeNV    BType;\n    VkComponentTypeNV    CType;\n    VkComponentTypeNV    DType;\n    VkScopeNV            scope;\n} VkCooperativeMatrixPropertiesNV;\n\ntypedef struct VkPhysicalDeviceCooperativeMatrixFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           cooperativeMatrix;\n    VkBool32           cooperativeMatrixRobustBufferAccess;\n} VkPhysicalDeviceCooperativeMatrixFeaturesNV;\n\ntypedef struct VkPhysicalDeviceCooperativeMatrixPropertiesNV {\n    VkStructureType       sType;\n    void*                 pNext;\n    VkShaderStageFlags    cooperativeMatrixSupportedStages;\n} VkPhysicalDeviceCooperativeMatrixPropertiesNV;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixPropertiesNV* pProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCooperativeMatrixPropertiesNV(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pPropertyCount,\n    VkCooperativeMatrixPropertiesNV*            pProperties);\n#endif\n\n\n#define VK_NV_coverage_reduction_mode 1\n#define VK_NV_COVERAGE_REDUCTION_MODE_SPEC_VERSION 1\n#define VK_NV_COVERAGE_REDUCTION_MODE_EXTENSION_NAME \"VK_NV_coverage_reduction_mode\"\n\ntypedef enum VkCoverageReductionModeNV {\n    VK_COVERAGE_REDUCTION_MODE_MERGE_NV = 0,\n    VK_COVERAGE_REDUCTION_MODE_TRUNCATE_NV = 1,\n    VK_COVERAGE_REDUCTION_MODE_MAX_ENUM_NV = 0x7FFFFFFF\n} VkCoverageReductionModeNV;\ntypedef VkFlags VkPipelineCoverageReductionStateCreateFlagsNV;\ntypedef struct VkPhysicalDeviceCoverageReductionModeFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           coverageReductionMode;\n} VkPhysicalDeviceCoverageReductionModeFeaturesNV;\n\ntypedef struct VkPipelineCoverageReductionStateCreateInfoNV {\n    VkStructureType                                  sType;\n    const void*                                      pNext;\n    VkPipelineCoverageReductionStateCreateFlagsNV    flags;\n    VkCoverageReductionModeNV                        coverageReductionMode;\n} VkPipelineCoverageReductionStateCreateInfoNV;\n\ntypedef struct VkFramebufferMixedSamplesCombinationNV {\n    VkStructureType              sType;\n    void*                        pNext;\n    VkCoverageReductionModeNV    coverageReductionMode;\n    VkSampleCountFlagBits        rasterizationSamples;\n    VkSampleCountFlags           depthStencilSamples;\n    VkSampleCountFlags           colorSamples;\n} VkFramebufferMixedSamplesCombinationNV;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV)(VkPhysicalDevice physicalDevice, uint32_t* pCombinationCount, VkFramebufferMixedSamplesCombinationNV* pCombinations);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t*                                   pCombinationCount,\n    VkFramebufferMixedSamplesCombinationNV*     pCombinations);\n#endif\n\n\n#define VK_EXT_fragment_shader_interlock 1\n#define VK_EXT_FRAGMENT_SHADER_INTERLOCK_SPEC_VERSION 1\n#define VK_EXT_FRAGMENT_SHADER_INTERLOCK_EXTENSION_NAME \"VK_EXT_fragment_shader_interlock\"\ntypedef struct VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           fragmentShaderSampleInterlock;\n    VkBool32           fragmentShaderPixelInterlock;\n    VkBool32           fragmentShaderShadingRateInterlock;\n} VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT;\n\n\n\n#define VK_EXT_ycbcr_image_arrays 1\n#define VK_EXT_YCBCR_IMAGE_ARRAYS_SPEC_VERSION 1\n#define VK_EXT_YCBCR_IMAGE_ARRAYS_EXTENSION_NAME \"VK_EXT_ycbcr_image_arrays\"\ntypedef struct VkPhysicalDeviceYcbcrImageArraysFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           ycbcrImageArrays;\n} VkPhysicalDeviceYcbcrImageArraysFeaturesEXT;\n\n\n\n#define VK_EXT_provoking_vertex 1\n#define VK_EXT_PROVOKING_VERTEX_SPEC_VERSION 1\n#define VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME \"VK_EXT_provoking_vertex\"\n\ntypedef enum VkProvokingVertexModeEXT {\n    VK_PROVOKING_VERTEX_MODE_FIRST_VERTEX_EXT = 0,\n    VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT = 1,\n    VK_PROVOKING_VERTEX_MODE_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkProvokingVertexModeEXT;\ntypedef struct VkPhysicalDeviceProvokingVertexFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           provokingVertexLast;\n    VkBool32           transformFeedbackPreservesProvokingVertex;\n} VkPhysicalDeviceProvokingVertexFeaturesEXT;\n\ntypedef struct VkPhysicalDeviceProvokingVertexPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           provokingVertexModePerPipeline;\n    VkBool32           transformFeedbackPreservesTriangleFanProvokingVertex;\n} VkPhysicalDeviceProvokingVertexPropertiesEXT;\n\ntypedef struct VkPipelineRasterizationProvokingVertexStateCreateInfoEXT {\n    VkStructureType             sType;\n    const void*                 pNext;\n    VkProvokingVertexModeEXT    provokingVertexMode;\n} VkPipelineRasterizationProvokingVertexStateCreateInfoEXT;\n\n\n\n#define VK_EXT_headless_surface 1\n#define VK_EXT_HEADLESS_SURFACE_SPEC_VERSION 1\n#define VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME \"VK_EXT_headless_surface\"\ntypedef VkFlags VkHeadlessSurfaceCreateFlagsEXT;\ntypedef struct VkHeadlessSurfaceCreateInfoEXT {\n    VkStructureType                    sType;\n    const void*                        pNext;\n    VkHeadlessSurfaceCreateFlagsEXT    flags;\n} VkHeadlessSurfaceCreateInfoEXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateHeadlessSurfaceEXT)(VkInstance instance, const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateHeadlessSurfaceEXT(\n    VkInstance                                  instance,\n    const VkHeadlessSurfaceCreateInfoEXT*       pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkSurfaceKHR*                               pSurface);\n#endif\n\n\n#define VK_EXT_line_rasterization 1\n#define VK_EXT_LINE_RASTERIZATION_SPEC_VERSION 1\n#define VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME \"VK_EXT_line_rasterization\"\n\ntypedef enum VkLineRasterizationModeEXT {\n    VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT = 0,\n    VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT = 1,\n    VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT = 2,\n    VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT = 3,\n    VK_LINE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkLineRasterizationModeEXT;\ntypedef struct VkPhysicalDeviceLineRasterizationFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           rectangularLines;\n    VkBool32           bresenhamLines;\n    VkBool32           smoothLines;\n    VkBool32           stippledRectangularLines;\n    VkBool32           stippledBresenhamLines;\n    VkBool32           stippledSmoothLines;\n} VkPhysicalDeviceLineRasterizationFeaturesEXT;\n\ntypedef struct VkPhysicalDeviceLineRasterizationPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           lineSubPixelPrecisionBits;\n} VkPhysicalDeviceLineRasterizationPropertiesEXT;\n\ntypedef struct VkPipelineRasterizationLineStateCreateInfoEXT {\n    VkStructureType               sType;\n    const void*                   pNext;\n    VkLineRasterizationModeEXT    lineRasterizationMode;\n    VkBool32                      stippledLineEnable;\n    uint32_t                      lineStippleFactor;\n    uint16_t                      lineStipplePattern;\n} VkPipelineRasterizationLineStateCreateInfoEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdSetLineStippleEXT)(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdSetLineStippleEXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    lineStippleFactor,\n    uint16_t                                    lineStipplePattern);\n#endif\n\n\n#define VK_EXT_shader_atomic_float 1\n#define VK_EXT_SHADER_ATOMIC_FLOAT_SPEC_VERSION 1\n#define VK_EXT_SHADER_ATOMIC_FLOAT_EXTENSION_NAME \"VK_EXT_shader_atomic_float\"\ntypedef struct VkPhysicalDeviceShaderAtomicFloatFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderBufferFloat32Atomics;\n    VkBool32           shaderBufferFloat32AtomicAdd;\n    VkBool32           shaderBufferFloat64Atomics;\n    VkBool32           shaderBufferFloat64AtomicAdd;\n    VkBool32           shaderSharedFloat32Atomics;\n    VkBool32           shaderSharedFloat32AtomicAdd;\n    VkBool32           shaderSharedFloat64Atomics;\n    VkBool32           shaderSharedFloat64AtomicAdd;\n    VkBool32           shaderImageFloat32Atomics;\n    VkBool32           shaderImageFloat32AtomicAdd;\n    VkBool32           sparseImageFloat32Atomics;\n    VkBool32           sparseImageFloat32AtomicAdd;\n} VkPhysicalDeviceShaderAtomicFloatFeaturesEXT;\n\n\n\n#define VK_EXT_host_query_reset 1\n#define VK_EXT_HOST_QUERY_RESET_SPEC_VERSION 1\n#define VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME \"VK_EXT_host_query_reset\"\ntypedef VkPhysicalDeviceHostQueryResetFeatures VkPhysicalDeviceHostQueryResetFeaturesEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkResetQueryPoolEXT)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkResetQueryPoolEXT(\n    VkDevice                                    device,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    firstQuery,\n    uint32_t                                    queryCount);\n#endif\n\n\n#define VK_EXT_index_type_uint8 1\n#define VK_EXT_INDEX_TYPE_UINT8_SPEC_VERSION 1\n#define VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME \"VK_EXT_index_type_uint8\"\ntypedef struct VkPhysicalDeviceIndexTypeUint8FeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           indexTypeUint8;\n} VkPhysicalDeviceIndexTypeUint8FeaturesEXT;\n\n\n\n#define VK_EXT_extended_dynamic_state 1\n#define VK_EXT_EXTENDED_DYNAMIC_STATE_SPEC_VERSION 1\n#define VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME \"VK_EXT_extended_dynamic_state\"\ntypedef struct VkPhysicalDeviceExtendedDynamicStateFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           extendedDynamicState;\n} VkPhysicalDeviceExtendedDynamicStateFeaturesEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdSetCullModeEXT)(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetFrontFaceEXT)(VkCommandBuffer commandBuffer, VkFrontFace frontFace);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveTopologyEXT)(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetViewportWithCountEXT)(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetScissorWithCountEXT)(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors);\ntypedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers2EXT)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDepthTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDepthWriteEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDepthCompareOpEXT)(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDepthBoundsTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetStencilTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetStencilOpEXT)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdSetCullModeEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkCullModeFlags                             cullMode);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetFrontFaceEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkFrontFace                                 frontFace);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveTopologyEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkPrimitiveTopology                         primitiveTopology);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWithCountEXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    viewportCount,\n    const VkViewport*                           pViewports);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetScissorWithCountEXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    scissorCount,\n    const VkRect2D*                             pScissors);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers2EXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    firstBinding,\n    uint32_t                                    bindingCount,\n    const VkBuffer*                             pBuffers,\n    const VkDeviceSize*                         pOffsets,\n    const VkDeviceSize*                         pSizes,\n    const VkDeviceSize*                         pStrides);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDepthTestEnableEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    depthTestEnable);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDepthWriteEnableEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    depthWriteEnable);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDepthCompareOpEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkCompareOp                                 depthCompareOp);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBoundsTestEnableEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    depthBoundsTestEnable);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetStencilTestEnableEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    stencilTestEnable);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetStencilOpEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkStencilFaceFlags                          faceMask,\n    VkStencilOp                                 failOp,\n    VkStencilOp                                 passOp,\n    VkStencilOp                                 depthFailOp,\n    VkCompareOp                                 compareOp);\n#endif\n\n\n#define VK_EXT_shader_atomic_float2 1\n#define VK_EXT_SHADER_ATOMIC_FLOAT_2_SPEC_VERSION 1\n#define VK_EXT_SHADER_ATOMIC_FLOAT_2_EXTENSION_NAME \"VK_EXT_shader_atomic_float2\"\ntypedef struct VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderBufferFloat16Atomics;\n    VkBool32           shaderBufferFloat16AtomicAdd;\n    VkBool32           shaderBufferFloat16AtomicMinMax;\n    VkBool32           shaderBufferFloat32AtomicMinMax;\n    VkBool32           shaderBufferFloat64AtomicMinMax;\n    VkBool32           shaderSharedFloat16Atomics;\n    VkBool32           shaderSharedFloat16AtomicAdd;\n    VkBool32           shaderSharedFloat16AtomicMinMax;\n    VkBool32           shaderSharedFloat32AtomicMinMax;\n    VkBool32           shaderSharedFloat64AtomicMinMax;\n    VkBool32           shaderImageFloat32AtomicMinMax;\n    VkBool32           sparseImageFloat32AtomicMinMax;\n} VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT;\n\n\n\n#define VK_EXT_shader_demote_to_helper_invocation 1\n#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION 1\n#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME \"VK_EXT_shader_demote_to_helper_invocation\"\ntypedef VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT;\n\n\n\n#define VK_NV_device_generated_commands 1\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutNV)\n#define VK_NV_DEVICE_GENERATED_COMMANDS_SPEC_VERSION 3\n#define VK_NV_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME \"VK_NV_device_generated_commands\"\n\ntypedef enum VkIndirectCommandsTokenTypeNV {\n    VK_INDIRECT_COMMANDS_TOKEN_TYPE_SHADER_GROUP_NV = 0,\n    VK_INDIRECT_COMMANDS_TOKEN_TYPE_STATE_FLAGS_NV = 1,\n    VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NV = 2,\n    VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NV = 3,\n    VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV = 4,\n    VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV = 5,\n    VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV = 6,\n    VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV = 7,\n    VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_NV = 0x7FFFFFFF\n} VkIndirectCommandsTokenTypeNV;\n\ntypedef enum VkIndirectStateFlagBitsNV {\n    VK_INDIRECT_STATE_FLAG_FRONTFACE_BIT_NV = 0x00000001,\n    VK_INDIRECT_STATE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF\n} VkIndirectStateFlagBitsNV;\ntypedef VkFlags VkIndirectStateFlagsNV;\n\ntypedef enum VkIndirectCommandsLayoutUsageFlagBitsNV {\n    VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EXPLICIT_PREPROCESS_BIT_NV = 0x00000001,\n    VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NV = 0x00000002,\n    VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NV = 0x00000004,\n    VK_INDIRECT_COMMANDS_LAYOUT_USAGE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF\n} VkIndirectCommandsLayoutUsageFlagBitsNV;\ntypedef VkFlags VkIndirectCommandsLayoutUsageFlagsNV;\ntypedef struct VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxGraphicsShaderGroupCount;\n    uint32_t           maxIndirectSequenceCount;\n    uint32_t           maxIndirectCommandsTokenCount;\n    uint32_t           maxIndirectCommandsStreamCount;\n    uint32_t           maxIndirectCommandsTokenOffset;\n    uint32_t           maxIndirectCommandsStreamStride;\n    uint32_t           minSequencesCountBufferOffsetAlignment;\n    uint32_t           minSequencesIndexBufferOffsetAlignment;\n    uint32_t           minIndirectCommandsBufferOffsetAlignment;\n} VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV;\n\ntypedef struct VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           deviceGeneratedCommands;\n} VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV;\n\ntypedef struct VkGraphicsShaderGroupCreateInfoNV {\n    VkStructureType                                 sType;\n    const void*                                     pNext;\n    uint32_t                                        stageCount;\n    const VkPipelineShaderStageCreateInfo*          pStages;\n    const VkPipelineVertexInputStateCreateInfo*     pVertexInputState;\n    const VkPipelineTessellationStateCreateInfo*    pTessellationState;\n} VkGraphicsShaderGroupCreateInfoNV;\n\ntypedef struct VkGraphicsPipelineShaderGroupsCreateInfoNV {\n    VkStructureType                             sType;\n    const void*                                 pNext;\n    uint32_t                                    groupCount;\n    const VkGraphicsShaderGroupCreateInfoNV*    pGroups;\n    uint32_t                                    pipelineCount;\n    const VkPipeline*                           pPipelines;\n} VkGraphicsPipelineShaderGroupsCreateInfoNV;\n\ntypedef struct VkBindShaderGroupIndirectCommandNV {\n    uint32_t    groupIndex;\n} VkBindShaderGroupIndirectCommandNV;\n\ntypedef struct VkBindIndexBufferIndirectCommandNV {\n    VkDeviceAddress    bufferAddress;\n    uint32_t           size;\n    VkIndexType        indexType;\n} VkBindIndexBufferIndirectCommandNV;\n\ntypedef struct VkBindVertexBufferIndirectCommandNV {\n    VkDeviceAddress    bufferAddress;\n    uint32_t           size;\n    uint32_t           stride;\n} VkBindVertexBufferIndirectCommandNV;\n\ntypedef struct VkSetStateFlagsIndirectCommandNV {\n    uint32_t    data;\n} VkSetStateFlagsIndirectCommandNV;\n\ntypedef struct VkIndirectCommandsStreamNV {\n    VkBuffer        buffer;\n    VkDeviceSize    offset;\n} VkIndirectCommandsStreamNV;\n\ntypedef struct VkIndirectCommandsLayoutTokenNV {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkIndirectCommandsTokenTypeNV    tokenType;\n    uint32_t                         stream;\n    uint32_t                         offset;\n    uint32_t                         vertexBindingUnit;\n    VkBool32                         vertexDynamicStride;\n    VkPipelineLayout                 pushconstantPipelineLayout;\n    VkShaderStageFlags               pushconstantShaderStageFlags;\n    uint32_t                         pushconstantOffset;\n    uint32_t                         pushconstantSize;\n    VkIndirectStateFlagsNV           indirectStateFlags;\n    uint32_t                         indexTypeCount;\n    const VkIndexType*               pIndexTypes;\n    const uint32_t*                  pIndexTypeValues;\n} VkIndirectCommandsLayoutTokenNV;\n\ntypedef struct VkIndirectCommandsLayoutCreateInfoNV {\n    VkStructureType                           sType;\n    const void*                               pNext;\n    VkIndirectCommandsLayoutUsageFlagsNV      flags;\n    VkPipelineBindPoint                       pipelineBindPoint;\n    uint32_t                                  tokenCount;\n    const VkIndirectCommandsLayoutTokenNV*    pTokens;\n    uint32_t                                  streamCount;\n    const uint32_t*                           pStreamStrides;\n} VkIndirectCommandsLayoutCreateInfoNV;\n\ntypedef struct VkGeneratedCommandsInfoNV {\n    VkStructureType                      sType;\n    const void*                          pNext;\n    VkPipelineBindPoint                  pipelineBindPoint;\n    VkPipeline                           pipeline;\n    VkIndirectCommandsLayoutNV           indirectCommandsLayout;\n    uint32_t                             streamCount;\n    const VkIndirectCommandsStreamNV*    pStreams;\n    uint32_t                             sequencesCount;\n    VkBuffer                             preprocessBuffer;\n    VkDeviceSize                         preprocessOffset;\n    VkDeviceSize                         preprocessSize;\n    VkBuffer                             sequencesCountBuffer;\n    VkDeviceSize                         sequencesCountOffset;\n    VkBuffer                             sequencesIndexBuffer;\n    VkDeviceSize                         sequencesIndexOffset;\n} VkGeneratedCommandsInfoNV;\n\ntypedef struct VkGeneratedCommandsMemoryRequirementsInfoNV {\n    VkStructureType               sType;\n    const void*                   pNext;\n    VkPipelineBindPoint           pipelineBindPoint;\n    VkPipeline                    pipeline;\n    VkIndirectCommandsLayoutNV    indirectCommandsLayout;\n    uint32_t                      maxSequencesCount;\n} VkGeneratedCommandsMemoryRequirementsInfoNV;\n\ntypedef void (VKAPI_PTR *PFN_vkGetGeneratedCommandsMemoryRequirementsNV)(VkDevice device, const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2* pMemoryRequirements);\ntypedef void (VKAPI_PTR *PFN_vkCmdPreprocessGeneratedCommandsNV)(VkCommandBuffer commandBuffer, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdExecuteGeneratedCommandsNV)(VkCommandBuffer commandBuffer, VkBool32 isPreprocessed, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdBindPipelineShaderGroupNV)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline, uint32_t groupIndex);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectCommandsLayoutNV)(VkDevice device, const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNV* pIndirectCommandsLayout);\ntypedef void (VKAPI_PTR *PFN_vkDestroyIndirectCommandsLayoutNV)(VkDevice device, VkIndirectCommandsLayoutNV indirectCommandsLayout, const VkAllocationCallbacks* pAllocator);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkGetGeneratedCommandsMemoryRequirementsNV(\n    VkDevice                                    device,\n    const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo,\n    VkMemoryRequirements2*                      pMemoryRequirements);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdPreprocessGeneratedCommandsNV(\n    VkCommandBuffer                             commandBuffer,\n    const VkGeneratedCommandsInfoNV*            pGeneratedCommandsInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdExecuteGeneratedCommandsNV(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    isPreprocessed,\n    const VkGeneratedCommandsInfoNV*            pGeneratedCommandsInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBindPipelineShaderGroupNV(\n    VkCommandBuffer                             commandBuffer,\n    VkPipelineBindPoint                         pipelineBindPoint,\n    VkPipeline                                  pipeline,\n    uint32_t                                    groupIndex);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectCommandsLayoutNV(\n    VkDevice                                    device,\n    const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkIndirectCommandsLayoutNV*                 pIndirectCommandsLayout);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyIndirectCommandsLayoutNV(\n    VkDevice                                    device,\n    VkIndirectCommandsLayoutNV                  indirectCommandsLayout,\n    const VkAllocationCallbacks*                pAllocator);\n#endif\n\n\n#define VK_NV_inherited_viewport_scissor 1\n#define VK_NV_INHERITED_VIEWPORT_SCISSOR_SPEC_VERSION 1\n#define VK_NV_INHERITED_VIEWPORT_SCISSOR_EXTENSION_NAME \"VK_NV_inherited_viewport_scissor\"\ntypedef struct VkPhysicalDeviceInheritedViewportScissorFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           inheritedViewportScissor2D;\n} VkPhysicalDeviceInheritedViewportScissorFeaturesNV;\n\ntypedef struct VkCommandBufferInheritanceViewportScissorInfoNV {\n    VkStructureType      sType;\n    const void*          pNext;\n    VkBool32             viewportScissor2D;\n    uint32_t             viewportDepthCount;\n    const VkViewport*    pViewportDepths;\n} VkCommandBufferInheritanceViewportScissorInfoNV;\n\n\n\n#define VK_EXT_texel_buffer_alignment 1\n#define VK_EXT_TEXEL_BUFFER_ALIGNMENT_SPEC_VERSION 1\n#define VK_EXT_TEXEL_BUFFER_ALIGNMENT_EXTENSION_NAME \"VK_EXT_texel_buffer_alignment\"\ntypedef struct VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           texelBufferAlignment;\n} VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT;\n\ntypedef VkPhysicalDeviceTexelBufferAlignmentProperties VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT;\n\n\n\n#define VK_QCOM_render_pass_transform 1\n#define VK_QCOM_RENDER_PASS_TRANSFORM_SPEC_VERSION 3\n#define VK_QCOM_RENDER_PASS_TRANSFORM_EXTENSION_NAME \"VK_QCOM_render_pass_transform\"\ntypedef struct VkRenderPassTransformBeginInfoQCOM {\n    VkStructureType                  sType;\n    void*                            pNext;\n    VkSurfaceTransformFlagBitsKHR    transform;\n} VkRenderPassTransformBeginInfoQCOM;\n\ntypedef struct VkCommandBufferInheritanceRenderPassTransformInfoQCOM {\n    VkStructureType                  sType;\n    void*                            pNext;\n    VkSurfaceTransformFlagBitsKHR    transform;\n    VkRect2D                         renderArea;\n} VkCommandBufferInheritanceRenderPassTransformInfoQCOM;\n\n\n\n#define VK_EXT_device_memory_report 1\n#define VK_EXT_DEVICE_MEMORY_REPORT_SPEC_VERSION 2\n#define VK_EXT_DEVICE_MEMORY_REPORT_EXTENSION_NAME \"VK_EXT_device_memory_report\"\n\ntypedef enum VkDeviceMemoryReportEventTypeEXT {\n    VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT = 0,\n    VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT = 1,\n    VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT = 2,\n    VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT = 3,\n    VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT = 4,\n    VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkDeviceMemoryReportEventTypeEXT;\ntypedef VkFlags VkDeviceMemoryReportFlagsEXT;\ntypedef struct VkPhysicalDeviceDeviceMemoryReportFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           deviceMemoryReport;\n} VkPhysicalDeviceDeviceMemoryReportFeaturesEXT;\n\ntypedef struct VkDeviceMemoryReportCallbackDataEXT {\n    VkStructureType                     sType;\n    void*                               pNext;\n    VkDeviceMemoryReportFlagsEXT        flags;\n    VkDeviceMemoryReportEventTypeEXT    type;\n    uint64_t                            memoryObjectId;\n    VkDeviceSize                        size;\n    VkObjectType                        objectType;\n    uint64_t                            objectHandle;\n    uint32_t                            heapIndex;\n} VkDeviceMemoryReportCallbackDataEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkDeviceMemoryReportCallbackEXT)(\n    const VkDeviceMemoryReportCallbackDataEXT*  pCallbackData,\n    void*                                       pUserData);\n\ntypedef struct VkDeviceDeviceMemoryReportCreateInfoEXT {\n    VkStructureType                        sType;\n    const void*                            pNext;\n    VkDeviceMemoryReportFlagsEXT           flags;\n    PFN_vkDeviceMemoryReportCallbackEXT    pfnUserCallback;\n    void*                                  pUserData;\n} VkDeviceDeviceMemoryReportCreateInfoEXT;\n\n\n\n#define VK_EXT_acquire_drm_display 1\n#define VK_EXT_ACQUIRE_DRM_DISPLAY_SPEC_VERSION 1\n#define VK_EXT_ACQUIRE_DRM_DISPLAY_EXTENSION_NAME \"VK_EXT_acquire_drm_display\"\ntypedef VkResult (VKAPI_PTR *PFN_vkAcquireDrmDisplayEXT)(VkPhysicalDevice physicalDevice, int32_t drmFd, VkDisplayKHR display);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetDrmDisplayEXT)(VkPhysicalDevice physicalDevice, int32_t drmFd, uint32_t connectorId, VkDisplayKHR* display);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkAcquireDrmDisplayEXT(\n    VkPhysicalDevice                            physicalDevice,\n    int32_t                                     drmFd,\n    VkDisplayKHR                                display);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetDrmDisplayEXT(\n    VkPhysicalDevice                            physicalDevice,\n    int32_t                                     drmFd,\n    uint32_t                                    connectorId,\n    VkDisplayKHR*                               display);\n#endif\n\n\n#define VK_EXT_robustness2 1\n#define VK_EXT_ROBUSTNESS_2_SPEC_VERSION  1\n#define VK_EXT_ROBUSTNESS_2_EXTENSION_NAME \"VK_EXT_robustness2\"\ntypedef struct VkPhysicalDeviceRobustness2FeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           robustBufferAccess2;\n    VkBool32           robustImageAccess2;\n    VkBool32           nullDescriptor;\n} VkPhysicalDeviceRobustness2FeaturesEXT;\n\ntypedef struct VkPhysicalDeviceRobustness2PropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkDeviceSize       robustStorageBufferAccessSizeAlignment;\n    VkDeviceSize       robustUniformBufferAccessSizeAlignment;\n} VkPhysicalDeviceRobustness2PropertiesEXT;\n\n\n\n#define VK_EXT_custom_border_color 1\n#define VK_EXT_CUSTOM_BORDER_COLOR_SPEC_VERSION 12\n#define VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME \"VK_EXT_custom_border_color\"\ntypedef struct VkSamplerCustomBorderColorCreateInfoEXT {\n    VkStructureType      sType;\n    const void*          pNext;\n    VkClearColorValue    customBorderColor;\n    VkFormat             format;\n} VkSamplerCustomBorderColorCreateInfoEXT;\n\ntypedef struct VkPhysicalDeviceCustomBorderColorPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxCustomBorderColorSamplers;\n} VkPhysicalDeviceCustomBorderColorPropertiesEXT;\n\ntypedef struct VkPhysicalDeviceCustomBorderColorFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           customBorderColors;\n    VkBool32           customBorderColorWithoutFormat;\n} VkPhysicalDeviceCustomBorderColorFeaturesEXT;\n\n\n\n#define VK_GOOGLE_user_type 1\n#define VK_GOOGLE_USER_TYPE_SPEC_VERSION  1\n#define VK_GOOGLE_USER_TYPE_EXTENSION_NAME \"VK_GOOGLE_user_type\"\n\n\n#define VK_EXT_private_data 1\ntypedef VkPrivateDataSlot VkPrivateDataSlotEXT;\n\n#define VK_EXT_PRIVATE_DATA_SPEC_VERSION  1\n#define VK_EXT_PRIVATE_DATA_EXTENSION_NAME \"VK_EXT_private_data\"\ntypedef VkPrivateDataSlotCreateFlags VkPrivateDataSlotCreateFlagsEXT;\n\ntypedef VkPhysicalDevicePrivateDataFeatures VkPhysicalDevicePrivateDataFeaturesEXT;\n\ntypedef VkDevicePrivateDataCreateInfo VkDevicePrivateDataCreateInfoEXT;\n\ntypedef VkPrivateDataSlotCreateInfo VkPrivateDataSlotCreateInfoEXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreatePrivateDataSlotEXT)(VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot);\ntypedef void (VKAPI_PTR *PFN_vkDestroyPrivateDataSlotEXT)(VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator);\ntypedef VkResult (VKAPI_PTR *PFN_vkSetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data);\ntypedef void (VKAPI_PTR *PFN_vkGetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreatePrivateDataSlotEXT(\n    VkDevice                                    device,\n    const VkPrivateDataSlotCreateInfo*          pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkPrivateDataSlot*                          pPrivateDataSlot);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyPrivateDataSlotEXT(\n    VkDevice                                    device,\n    VkPrivateDataSlot                           privateDataSlot,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkSetPrivateDataEXT(\n    VkDevice                                    device,\n    VkObjectType                                objectType,\n    uint64_t                                    objectHandle,\n    VkPrivateDataSlot                           privateDataSlot,\n    uint64_t                                    data);\n\nVKAPI_ATTR void VKAPI_CALL vkGetPrivateDataEXT(\n    VkDevice                                    device,\n    VkObjectType                                objectType,\n    uint64_t                                    objectHandle,\n    VkPrivateDataSlot                           privateDataSlot,\n    uint64_t*                                   pData);\n#endif\n\n\n#define VK_EXT_pipeline_creation_cache_control 1\n#define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_SPEC_VERSION 3\n#define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_EXTENSION_NAME \"VK_EXT_pipeline_creation_cache_control\"\ntypedef VkPhysicalDevicePipelineCreationCacheControlFeatures VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT;\n\n\n\n#define VK_NV_device_diagnostics_config 1\n#define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_SPEC_VERSION 2\n#define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME \"VK_NV_device_diagnostics_config\"\n\ntypedef enum VkDeviceDiagnosticsConfigFlagBitsNV {\n    VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV = 0x00000001,\n    VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV = 0x00000002,\n    VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV = 0x00000004,\n    VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_ERROR_REPORTING_BIT_NV = 0x00000008,\n    VK_DEVICE_DIAGNOSTICS_CONFIG_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF\n} VkDeviceDiagnosticsConfigFlagBitsNV;\ntypedef VkFlags VkDeviceDiagnosticsConfigFlagsNV;\ntypedef struct VkPhysicalDeviceDiagnosticsConfigFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           diagnosticsConfig;\n} VkPhysicalDeviceDiagnosticsConfigFeaturesNV;\n\ntypedef struct VkDeviceDiagnosticsConfigCreateInfoNV {\n    VkStructureType                     sType;\n    const void*                         pNext;\n    VkDeviceDiagnosticsConfigFlagsNV    flags;\n} VkDeviceDiagnosticsConfigCreateInfoNV;\n\n\n\n#define VK_QCOM_render_pass_store_ops 1\n#define VK_QCOM_RENDER_PASS_STORE_OPS_SPEC_VERSION 2\n#define VK_QCOM_RENDER_PASS_STORE_OPS_EXTENSION_NAME \"VK_QCOM_render_pass_store_ops\"\n\n\n#define VK_EXT_graphics_pipeline_library 1\n#define VK_EXT_GRAPHICS_PIPELINE_LIBRARY_SPEC_VERSION 1\n#define VK_EXT_GRAPHICS_PIPELINE_LIBRARY_EXTENSION_NAME \"VK_EXT_graphics_pipeline_library\"\n\ntypedef enum VkGraphicsPipelineLibraryFlagBitsEXT {\n    VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT = 0x00000001,\n    VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT = 0x00000002,\n    VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT = 0x00000004,\n    VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT = 0x00000008,\n    VK_GRAPHICS_PIPELINE_LIBRARY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkGraphicsPipelineLibraryFlagBitsEXT;\ntypedef VkFlags VkGraphicsPipelineLibraryFlagsEXT;\ntypedef struct VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           graphicsPipelineLibrary;\n} VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT;\n\ntypedef struct VkPhysicalDeviceGraphicsPipelineLibraryPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           graphicsPipelineLibraryFastLinking;\n    VkBool32           graphicsPipelineLibraryIndependentInterpolationDecoration;\n} VkPhysicalDeviceGraphicsPipelineLibraryPropertiesEXT;\n\ntypedef struct VkGraphicsPipelineLibraryCreateInfoEXT {\n    VkStructureType                      sType;\n    void*                                pNext;\n    VkGraphicsPipelineLibraryFlagsEXT    flags;\n} VkGraphicsPipelineLibraryCreateInfoEXT;\n\n\n\n#define VK_AMD_shader_early_and_late_fragment_tests 1\n#define VK_AMD_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_SPEC_VERSION 1\n#define VK_AMD_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_EXTENSION_NAME \"VK_AMD_shader_early_and_late_fragment_tests\"\ntypedef struct VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderEarlyAndLateFragmentTests;\n} VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD;\n\n\n\n#define VK_NV_fragment_shading_rate_enums 1\n#define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_SPEC_VERSION 1\n#define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_EXTENSION_NAME \"VK_NV_fragment_shading_rate_enums\"\n\ntypedef enum VkFragmentShadingRateTypeNV {\n    VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV = 0,\n    VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV = 1,\n    VK_FRAGMENT_SHADING_RATE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF\n} VkFragmentShadingRateTypeNV;\n\ntypedef enum VkFragmentShadingRateNV {\n    VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV = 0,\n    VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV = 1,\n    VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV = 4,\n    VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV = 5,\n    VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV = 6,\n    VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV = 9,\n    VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV = 10,\n    VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV = 11,\n    VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV = 12,\n    VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV = 13,\n    VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV = 14,\n    VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV = 15,\n    VK_FRAGMENT_SHADING_RATE_MAX_ENUM_NV = 0x7FFFFFFF\n} VkFragmentShadingRateNV;\ntypedef struct VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           fragmentShadingRateEnums;\n    VkBool32           supersampleFragmentShadingRates;\n    VkBool32           noInvocationFragmentShadingRates;\n} VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV;\n\ntypedef struct VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV {\n    VkStructureType          sType;\n    void*                    pNext;\n    VkSampleCountFlagBits    maxFragmentShadingRateInvocationCount;\n} VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV;\n\ntypedef struct VkPipelineFragmentShadingRateEnumStateCreateInfoNV {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkFragmentShadingRateTypeNV           shadingRateType;\n    VkFragmentShadingRateNV               shadingRate;\n    VkFragmentShadingRateCombinerOpKHR    combinerOps[2];\n} VkPipelineFragmentShadingRateEnumStateCreateInfoNV;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdSetFragmentShadingRateEnumNV)(VkCommandBuffer           commandBuffer, VkFragmentShadingRateNV                     shadingRate, const VkFragmentShadingRateCombinerOpKHR    combinerOps[2]);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdSetFragmentShadingRateEnumNV(\n    VkCommandBuffer                             commandBuffer,\n    VkFragmentShadingRateNV                     shadingRate,\n    const VkFragmentShadingRateCombinerOpKHR    combinerOps[2]);\n#endif\n\n\n#define VK_NV_ray_tracing_motion_blur 1\n#define VK_NV_RAY_TRACING_MOTION_BLUR_SPEC_VERSION 1\n#define VK_NV_RAY_TRACING_MOTION_BLUR_EXTENSION_NAME \"VK_NV_ray_tracing_motion_blur\"\n\ntypedef enum VkAccelerationStructureMotionInstanceTypeNV {\n    VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_STATIC_NV = 0,\n    VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_MATRIX_MOTION_NV = 1,\n    VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_SRT_MOTION_NV = 2,\n    VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF\n} VkAccelerationStructureMotionInstanceTypeNV;\ntypedef VkFlags VkAccelerationStructureMotionInfoFlagsNV;\ntypedef VkFlags VkAccelerationStructureMotionInstanceFlagsNV;\ntypedef union VkDeviceOrHostAddressConstKHR {\n    VkDeviceAddress    deviceAddress;\n    const void*        hostAddress;\n} VkDeviceOrHostAddressConstKHR;\n\ntypedef struct VkAccelerationStructureGeometryMotionTrianglesDataNV {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkDeviceOrHostAddressConstKHR    vertexData;\n} VkAccelerationStructureGeometryMotionTrianglesDataNV;\n\ntypedef struct VkAccelerationStructureMotionInfoNV {\n    VkStructureType                             sType;\n    const void*                                 pNext;\n    uint32_t                                    maxInstances;\n    VkAccelerationStructureMotionInfoFlagsNV    flags;\n} VkAccelerationStructureMotionInfoNV;\n\ntypedef struct VkAccelerationStructureMatrixMotionInstanceNV {\n    VkTransformMatrixKHR          transformT0;\n    VkTransformMatrixKHR          transformT1;\n    uint32_t                      instanceCustomIndex:24;\n    uint32_t                      mask:8;\n    uint32_t                      instanceShaderBindingTableRecordOffset:24;\n    VkGeometryInstanceFlagsKHR    flags:8;\n    uint64_t                      accelerationStructureReference;\n} VkAccelerationStructureMatrixMotionInstanceNV;\n\ntypedef struct VkSRTDataNV {\n    float    sx;\n    float    a;\n    float    b;\n    float    pvx;\n    float    sy;\n    float    c;\n    float    pvy;\n    float    sz;\n    float    pvz;\n    float    qx;\n    float    qy;\n    float    qz;\n    float    qw;\n    float    tx;\n    float    ty;\n    float    tz;\n} VkSRTDataNV;\n\ntypedef struct VkAccelerationStructureSRTMotionInstanceNV {\n    VkSRTDataNV                   transformT0;\n    VkSRTDataNV                   transformT1;\n    uint32_t                      instanceCustomIndex:24;\n    uint32_t                      mask:8;\n    uint32_t                      instanceShaderBindingTableRecordOffset:24;\n    VkGeometryInstanceFlagsKHR    flags:8;\n    uint64_t                      accelerationStructureReference;\n} VkAccelerationStructureSRTMotionInstanceNV;\n\ntypedef union VkAccelerationStructureMotionInstanceDataNV {\n    VkAccelerationStructureInstanceKHR               staticInstance;\n    VkAccelerationStructureMatrixMotionInstanceNV    matrixMotionInstance;\n    VkAccelerationStructureSRTMotionInstanceNV       srtMotionInstance;\n} VkAccelerationStructureMotionInstanceDataNV;\n\ntypedef struct VkAccelerationStructureMotionInstanceNV {\n    VkAccelerationStructureMotionInstanceTypeNV     type;\n    VkAccelerationStructureMotionInstanceFlagsNV    flags;\n    VkAccelerationStructureMotionInstanceDataNV     data;\n} VkAccelerationStructureMotionInstanceNV;\n\ntypedef struct VkPhysicalDeviceRayTracingMotionBlurFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           rayTracingMotionBlur;\n    VkBool32           rayTracingMotionBlurPipelineTraceRaysIndirect;\n} VkPhysicalDeviceRayTracingMotionBlurFeaturesNV;\n\n\n\n#define VK_EXT_ycbcr_2plane_444_formats 1\n#define VK_EXT_YCBCR_2PLANE_444_FORMATS_SPEC_VERSION 1\n#define VK_EXT_YCBCR_2PLANE_444_FORMATS_EXTENSION_NAME \"VK_EXT_ycbcr_2plane_444_formats\"\ntypedef struct VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           ycbcr2plane444Formats;\n} VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT;\n\n\n\n#define VK_EXT_fragment_density_map2 1\n#define VK_EXT_FRAGMENT_DENSITY_MAP_2_SPEC_VERSION 1\n#define VK_EXT_FRAGMENT_DENSITY_MAP_2_EXTENSION_NAME \"VK_EXT_fragment_density_map2\"\ntypedef struct VkPhysicalDeviceFragmentDensityMap2FeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           fragmentDensityMapDeferred;\n} VkPhysicalDeviceFragmentDensityMap2FeaturesEXT;\n\ntypedef struct VkPhysicalDeviceFragmentDensityMap2PropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           subsampledLoads;\n    VkBool32           subsampledCoarseReconstructionEarlyAccess;\n    uint32_t           maxSubsampledArrayLayers;\n    uint32_t           maxDescriptorSetSubsampledSamplers;\n} VkPhysicalDeviceFragmentDensityMap2PropertiesEXT;\n\n\n\n#define VK_QCOM_rotated_copy_commands 1\n#define VK_QCOM_ROTATED_COPY_COMMANDS_SPEC_VERSION 1\n#define VK_QCOM_ROTATED_COPY_COMMANDS_EXTENSION_NAME \"VK_QCOM_rotated_copy_commands\"\ntypedef struct VkCopyCommandTransformInfoQCOM {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkSurfaceTransformFlagBitsKHR    transform;\n} VkCopyCommandTransformInfoQCOM;\n\n\n\n#define VK_EXT_image_robustness 1\n#define VK_EXT_IMAGE_ROBUSTNESS_SPEC_VERSION 1\n#define VK_EXT_IMAGE_ROBUSTNESS_EXTENSION_NAME \"VK_EXT_image_robustness\"\ntypedef VkPhysicalDeviceImageRobustnessFeatures VkPhysicalDeviceImageRobustnessFeaturesEXT;\n\n\n\n#define VK_EXT_image_compression_control 1\n#define VK_EXT_IMAGE_COMPRESSION_CONTROL_SPEC_VERSION 1\n#define VK_EXT_IMAGE_COMPRESSION_CONTROL_EXTENSION_NAME \"VK_EXT_image_compression_control\"\n\ntypedef enum VkImageCompressionFlagBitsEXT {\n    VK_IMAGE_COMPRESSION_DEFAULT_EXT = 0,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_DEFAULT_EXT = 0x00000001,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_EXPLICIT_EXT = 0x00000002,\n    VK_IMAGE_COMPRESSION_DISABLED_EXT = 0x00000004,\n    VK_IMAGE_COMPRESSION_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkImageCompressionFlagBitsEXT;\ntypedef VkFlags VkImageCompressionFlagsEXT;\n\ntypedef enum VkImageCompressionFixedRateFlagBitsEXT {\n    VK_IMAGE_COMPRESSION_FIXED_RATE_NONE_EXT = 0,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_1BPC_BIT_EXT = 0x00000001,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_2BPC_BIT_EXT = 0x00000002,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_3BPC_BIT_EXT = 0x00000004,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_4BPC_BIT_EXT = 0x00000008,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_5BPC_BIT_EXT = 0x00000010,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_6BPC_BIT_EXT = 0x00000020,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_7BPC_BIT_EXT = 0x00000040,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_8BPC_BIT_EXT = 0x00000080,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_9BPC_BIT_EXT = 0x00000100,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_10BPC_BIT_EXT = 0x00000200,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_11BPC_BIT_EXT = 0x00000400,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_12BPC_BIT_EXT = 0x00000800,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_13BPC_BIT_EXT = 0x00001000,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_14BPC_BIT_EXT = 0x00002000,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_15BPC_BIT_EXT = 0x00004000,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_16BPC_BIT_EXT = 0x00008000,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_17BPC_BIT_EXT = 0x00010000,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_18BPC_BIT_EXT = 0x00020000,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_19BPC_BIT_EXT = 0x00040000,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_20BPC_BIT_EXT = 0x00080000,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_21BPC_BIT_EXT = 0x00100000,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_22BPC_BIT_EXT = 0x00200000,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_23BPC_BIT_EXT = 0x00400000,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_24BPC_BIT_EXT = 0x00800000,\n    VK_IMAGE_COMPRESSION_FIXED_RATE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkImageCompressionFixedRateFlagBitsEXT;\ntypedef VkFlags VkImageCompressionFixedRateFlagsEXT;\ntypedef struct VkPhysicalDeviceImageCompressionControlFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           imageCompressionControl;\n} VkPhysicalDeviceImageCompressionControlFeaturesEXT;\n\ntypedef struct VkImageCompressionControlEXT {\n    VkStructureType                         sType;\n    const void*                             pNext;\n    VkImageCompressionFlagsEXT              flags;\n    uint32_t                                compressionControlPlaneCount;\n    VkImageCompressionFixedRateFlagsEXT*    pFixedRateFlags;\n} VkImageCompressionControlEXT;\n\ntypedef struct VkSubresourceLayout2EXT {\n    VkStructureType        sType;\n    void*                  pNext;\n    VkSubresourceLayout    subresourceLayout;\n} VkSubresourceLayout2EXT;\n\ntypedef struct VkImageSubresource2EXT {\n    VkStructureType       sType;\n    void*                 pNext;\n    VkImageSubresource    imageSubresource;\n} VkImageSubresource2EXT;\n\ntypedef struct VkImageCompressionPropertiesEXT {\n    VkStructureType                        sType;\n    void*                                  pNext;\n    VkImageCompressionFlagsEXT             imageCompressionFlags;\n    VkImageCompressionFixedRateFlagsEXT    imageCompressionFixedRateFlags;\n} VkImageCompressionPropertiesEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout2EXT)(VkDevice device, VkImage image, const VkImageSubresource2EXT* pSubresource, VkSubresourceLayout2EXT* pLayout);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout2EXT(\n    VkDevice                                    device,\n    VkImage                                     image,\n    const VkImageSubresource2EXT*               pSubresource,\n    VkSubresourceLayout2EXT*                    pLayout);\n#endif\n\n\n#define VK_EXT_attachment_feedback_loop_layout 1\n#define VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_SPEC_VERSION 2\n#define VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_EXTENSION_NAME \"VK_EXT_attachment_feedback_loop_layout\"\ntypedef struct VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           attachmentFeedbackLoopLayout;\n} VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT;\n\n\n\n#define VK_EXT_4444_formats 1\n#define VK_EXT_4444_FORMATS_SPEC_VERSION  1\n#define VK_EXT_4444_FORMATS_EXTENSION_NAME \"VK_EXT_4444_formats\"\ntypedef struct VkPhysicalDevice4444FormatsFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           formatA4R4G4B4;\n    VkBool32           formatA4B4G4R4;\n} VkPhysicalDevice4444FormatsFeaturesEXT;\n\n\n\n#define VK_ARM_rasterization_order_attachment_access 1\n#define VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_SPEC_VERSION 1\n#define VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME \"VK_ARM_rasterization_order_attachment_access\"\ntypedef struct VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           rasterizationOrderColorAttachmentAccess;\n    VkBool32           rasterizationOrderDepthAttachmentAccess;\n    VkBool32           rasterizationOrderStencilAttachmentAccess;\n} VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM;\n\n\n\n#define VK_EXT_rgba10x6_formats 1\n#define VK_EXT_RGBA10X6_FORMATS_SPEC_VERSION 1\n#define VK_EXT_RGBA10X6_FORMATS_EXTENSION_NAME \"VK_EXT_rgba10x6_formats\"\ntypedef struct VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           formatRgba10x6WithoutYCbCrSampler;\n} VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT;\n\n\n\n#define VK_NV_acquire_winrt_display 1\n#define VK_NV_ACQUIRE_WINRT_DISPLAY_SPEC_VERSION 1\n#define VK_NV_ACQUIRE_WINRT_DISPLAY_EXTENSION_NAME \"VK_NV_acquire_winrt_display\"\ntypedef VkResult (VKAPI_PTR *PFN_vkAcquireWinrtDisplayNV)(VkPhysicalDevice physicalDevice, VkDisplayKHR display);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetWinrtDisplayNV)(VkPhysicalDevice physicalDevice, uint32_t deviceRelativeId, VkDisplayKHR* pDisplay);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkAcquireWinrtDisplayNV(\n    VkPhysicalDevice                            physicalDevice,\n    VkDisplayKHR                                display);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetWinrtDisplayNV(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t                                    deviceRelativeId,\n    VkDisplayKHR*                               pDisplay);\n#endif\n\n\n#define VK_VALVE_mutable_descriptor_type 1\n#define VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_SPEC_VERSION 1\n#define VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME \"VK_VALVE_mutable_descriptor_type\"\ntypedef struct VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           mutableDescriptorType;\n} VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE;\n\ntypedef struct VkMutableDescriptorTypeListVALVE {\n    uint32_t                   descriptorTypeCount;\n    const VkDescriptorType*    pDescriptorTypes;\n} VkMutableDescriptorTypeListVALVE;\n\ntypedef struct VkMutableDescriptorTypeCreateInfoVALVE {\n    VkStructureType                            sType;\n    const void*                                pNext;\n    uint32_t                                   mutableDescriptorTypeListCount;\n    const VkMutableDescriptorTypeListVALVE*    pMutableDescriptorTypeLists;\n} VkMutableDescriptorTypeCreateInfoVALVE;\n\n\n\n#define VK_EXT_vertex_input_dynamic_state 1\n#define VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_SPEC_VERSION 2\n#define VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME \"VK_EXT_vertex_input_dynamic_state\"\ntypedef struct VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           vertexInputDynamicState;\n} VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT;\n\ntypedef struct VkVertexInputBindingDescription2EXT {\n    VkStructureType      sType;\n    void*                pNext;\n    uint32_t             binding;\n    uint32_t             stride;\n    VkVertexInputRate    inputRate;\n    uint32_t             divisor;\n} VkVertexInputBindingDescription2EXT;\n\ntypedef struct VkVertexInputAttributeDescription2EXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           location;\n    uint32_t           binding;\n    VkFormat           format;\n    uint32_t           offset;\n} VkVertexInputAttributeDescription2EXT;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdSetVertexInputEXT)(VkCommandBuffer commandBuffer, uint32_t vertexBindingDescriptionCount, const VkVertexInputBindingDescription2EXT* pVertexBindingDescriptions, uint32_t vertexAttributeDescriptionCount, const VkVertexInputAttributeDescription2EXT* pVertexAttributeDescriptions);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdSetVertexInputEXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    vertexBindingDescriptionCount,\n    const VkVertexInputBindingDescription2EXT*  pVertexBindingDescriptions,\n    uint32_t                                    vertexAttributeDescriptionCount,\n    const VkVertexInputAttributeDescription2EXT* pVertexAttributeDescriptions);\n#endif\n\n\n#define VK_EXT_physical_device_drm 1\n#define VK_EXT_PHYSICAL_DEVICE_DRM_SPEC_VERSION 1\n#define VK_EXT_PHYSICAL_DEVICE_DRM_EXTENSION_NAME \"VK_EXT_physical_device_drm\"\ntypedef struct VkPhysicalDeviceDrmPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           hasPrimary;\n    VkBool32           hasRender;\n    int64_t            primaryMajor;\n    int64_t            primaryMinor;\n    int64_t            renderMajor;\n    int64_t            renderMinor;\n} VkPhysicalDeviceDrmPropertiesEXT;\n\n\n\n#define VK_EXT_depth_clip_control 1\n#define VK_EXT_DEPTH_CLIP_CONTROL_SPEC_VERSION 1\n#define VK_EXT_DEPTH_CLIP_CONTROL_EXTENSION_NAME \"VK_EXT_depth_clip_control\"\ntypedef struct VkPhysicalDeviceDepthClipControlFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           depthClipControl;\n} VkPhysicalDeviceDepthClipControlFeaturesEXT;\n\ntypedef struct VkPipelineViewportDepthClipControlCreateInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBool32           negativeOneToOne;\n} VkPipelineViewportDepthClipControlCreateInfoEXT;\n\n\n\n#define VK_EXT_primitive_topology_list_restart 1\n#define VK_EXT_PRIMITIVE_TOPOLOGY_LIST_RESTART_SPEC_VERSION 1\n#define VK_EXT_PRIMITIVE_TOPOLOGY_LIST_RESTART_EXTENSION_NAME \"VK_EXT_primitive_topology_list_restart\"\ntypedef struct VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           primitiveTopologyListRestart;\n    VkBool32           primitiveTopologyPatchListRestart;\n} VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT;\n\n\n\n#define VK_HUAWEI_subpass_shading 1\n#define VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION 2\n#define VK_HUAWEI_SUBPASS_SHADING_EXTENSION_NAME \"VK_HUAWEI_subpass_shading\"\ntypedef struct VkSubpassShadingPipelineCreateInfoHUAWEI {\n    VkStructureType    sType;\n    void*              pNext;\n    VkRenderPass       renderPass;\n    uint32_t           subpass;\n} VkSubpassShadingPipelineCreateInfoHUAWEI;\n\ntypedef struct VkPhysicalDeviceSubpassShadingFeaturesHUAWEI {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           subpassShading;\n} VkPhysicalDeviceSubpassShadingFeaturesHUAWEI;\n\ntypedef struct VkPhysicalDeviceSubpassShadingPropertiesHUAWEI {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxSubpassShadingWorkgroupSizeAspectRatio;\n} VkPhysicalDeviceSubpassShadingPropertiesHUAWEI;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI)(VkDevice device, VkRenderPass renderpass, VkExtent2D* pMaxWorkgroupSize);\ntypedef void (VKAPI_PTR *PFN_vkCmdSubpassShadingHUAWEI)(VkCommandBuffer commandBuffer);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI(\n    VkDevice                                    device,\n    VkRenderPass                                renderpass,\n    VkExtent2D*                                 pMaxWorkgroupSize);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSubpassShadingHUAWEI(\n    VkCommandBuffer                             commandBuffer);\n#endif\n\n\n#define VK_HUAWEI_invocation_mask 1\n#define VK_HUAWEI_INVOCATION_MASK_SPEC_VERSION 1\n#define VK_HUAWEI_INVOCATION_MASK_EXTENSION_NAME \"VK_HUAWEI_invocation_mask\"\ntypedef struct VkPhysicalDeviceInvocationMaskFeaturesHUAWEI {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           invocationMask;\n} VkPhysicalDeviceInvocationMaskFeaturesHUAWEI;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdBindInvocationMaskHUAWEI)(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdBindInvocationMaskHUAWEI(\n    VkCommandBuffer                             commandBuffer,\n    VkImageView                                 imageView,\n    VkImageLayout                               imageLayout);\n#endif\n\n\n#define VK_NV_external_memory_rdma 1\ntypedef void* VkRemoteAddressNV;\n#define VK_NV_EXTERNAL_MEMORY_RDMA_SPEC_VERSION 1\n#define VK_NV_EXTERNAL_MEMORY_RDMA_EXTENSION_NAME \"VK_NV_external_memory_rdma\"\ntypedef struct VkMemoryGetRemoteAddressInfoNV {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkDeviceMemory                        memory;\n    VkExternalMemoryHandleTypeFlagBits    handleType;\n} VkMemoryGetRemoteAddressInfoNV;\n\ntypedef struct VkPhysicalDeviceExternalMemoryRDMAFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           externalMemoryRDMA;\n} VkPhysicalDeviceExternalMemoryRDMAFeaturesNV;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetMemoryRemoteAddressNV)(VkDevice device, const VkMemoryGetRemoteAddressInfoNV* pMemoryGetRemoteAddressInfo, VkRemoteAddressNV* pAddress);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryRemoteAddressNV(\n    VkDevice                                    device,\n    const VkMemoryGetRemoteAddressInfoNV*       pMemoryGetRemoteAddressInfo,\n    VkRemoteAddressNV*                          pAddress);\n#endif\n\n\n#define VK_EXT_pipeline_properties 1\n#define VK_EXT_PIPELINE_PROPERTIES_SPEC_VERSION 1\n#define VK_EXT_PIPELINE_PROPERTIES_EXTENSION_NAME \"VK_EXT_pipeline_properties\"\ntypedef VkPipelineInfoKHR VkPipelineInfoEXT;\n\ntypedef struct VkPipelinePropertiesIdentifierEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint8_t            pipelineIdentifier[VK_UUID_SIZE];\n} VkPipelinePropertiesIdentifierEXT;\n\ntypedef struct VkPhysicalDevicePipelinePropertiesFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           pipelinePropertiesIdentifier;\n} VkPhysicalDevicePipelinePropertiesFeaturesEXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPipelinePropertiesEXT)(VkDevice device, const VkPipelineInfoEXT* pPipelineInfo, VkBaseOutStructure* pPipelineProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPipelinePropertiesEXT(\n    VkDevice                                    device,\n    const VkPipelineInfoEXT*                    pPipelineInfo,\n    VkBaseOutStructure*                         pPipelineProperties);\n#endif\n\n\n#define VK_EXT_multisampled_render_to_single_sampled 1\n#define VK_EXT_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_SPEC_VERSION 1\n#define VK_EXT_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_EXTENSION_NAME \"VK_EXT_multisampled_render_to_single_sampled\"\ntypedef struct VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           multisampledRenderToSingleSampled;\n} VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT;\n\ntypedef struct VkSubpassResolvePerformanceQueryEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           optimal;\n} VkSubpassResolvePerformanceQueryEXT;\n\ntypedef struct VkMultisampledRenderToSingleSampledInfoEXT {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkBool32                 multisampledRenderToSingleSampledEnable;\n    VkSampleCountFlagBits    rasterizationSamples;\n} VkMultisampledRenderToSingleSampledInfoEXT;\n\n\n\n#define VK_EXT_extended_dynamic_state2 1\n#define VK_EXT_EXTENDED_DYNAMIC_STATE_2_SPEC_VERSION 1\n#define VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME \"VK_EXT_extended_dynamic_state2\"\ntypedef struct VkPhysicalDeviceExtendedDynamicState2FeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           extendedDynamicState2;\n    VkBool32           extendedDynamicState2LogicOp;\n    VkBool32           extendedDynamicState2PatchControlPoints;\n} VkPhysicalDeviceExtendedDynamicState2FeaturesEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdSetPatchControlPointsEXT)(VkCommandBuffer commandBuffer, uint32_t patchControlPoints);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetRasterizerDiscardEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetDepthBiasEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetLogicOpEXT)(VkCommandBuffer commandBuffer, VkLogicOp logicOp);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveRestartEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdSetPatchControlPointsEXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    patchControlPoints);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetRasterizerDiscardEnableEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    rasterizerDiscardEnable);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBiasEnableEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    depthBiasEnable);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetLogicOpEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkLogicOp                                   logicOp);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveRestartEnableEXT(\n    VkCommandBuffer                             commandBuffer,\n    VkBool32                                    primitiveRestartEnable);\n#endif\n\n\n#define VK_EXT_color_write_enable 1\n#define VK_EXT_COLOR_WRITE_ENABLE_SPEC_VERSION 1\n#define VK_EXT_COLOR_WRITE_ENABLE_EXTENSION_NAME \"VK_EXT_color_write_enable\"\ntypedef struct VkPhysicalDeviceColorWriteEnableFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           colorWriteEnable;\n} VkPhysicalDeviceColorWriteEnableFeaturesEXT;\n\ntypedef struct VkPipelineColorWriteCreateInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           attachmentCount;\n    const VkBool32*    pColorWriteEnables;\n} VkPipelineColorWriteCreateInfoEXT;\n\ntypedef void                                    (VKAPI_PTR *PFN_vkCmdSetColorWriteEnableEXT)(VkCommandBuffer       commandBuffer, uint32_t                                attachmentCount, const VkBool32*   pColorWriteEnables);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void                                    VKAPI_CALL vkCmdSetColorWriteEnableEXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    attachmentCount,\n    const VkBool32*                             pColorWriteEnables);\n#endif\n\n\n#define VK_EXT_primitives_generated_query 1\n#define VK_EXT_PRIMITIVES_GENERATED_QUERY_SPEC_VERSION 1\n#define VK_EXT_PRIMITIVES_GENERATED_QUERY_EXTENSION_NAME \"VK_EXT_primitives_generated_query\"\ntypedef struct VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           primitivesGeneratedQuery;\n    VkBool32           primitivesGeneratedQueryWithRasterizerDiscard;\n    VkBool32           primitivesGeneratedQueryWithNonZeroStreams;\n} VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT;\n\n\n\n#define VK_EXT_global_priority_query 1\n#define VK_EXT_GLOBAL_PRIORITY_QUERY_SPEC_VERSION 1\n#define VK_EXT_GLOBAL_PRIORITY_QUERY_EXTENSION_NAME \"VK_EXT_global_priority_query\"\n#define VK_MAX_GLOBAL_PRIORITY_SIZE_EXT   VK_MAX_GLOBAL_PRIORITY_SIZE_KHR\ntypedef VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT;\n\ntypedef VkQueueFamilyGlobalPriorityPropertiesKHR VkQueueFamilyGlobalPriorityPropertiesEXT;\n\n\n\n#define VK_EXT_image_view_min_lod 1\n#define VK_EXT_IMAGE_VIEW_MIN_LOD_SPEC_VERSION 1\n#define VK_EXT_IMAGE_VIEW_MIN_LOD_EXTENSION_NAME \"VK_EXT_image_view_min_lod\"\ntypedef struct VkPhysicalDeviceImageViewMinLodFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           minLod;\n} VkPhysicalDeviceImageViewMinLodFeaturesEXT;\n\ntypedef struct VkImageViewMinLodCreateInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    float              minLod;\n} VkImageViewMinLodCreateInfoEXT;\n\n\n\n#define VK_EXT_multi_draw 1\n#define VK_EXT_MULTI_DRAW_SPEC_VERSION    1\n#define VK_EXT_MULTI_DRAW_EXTENSION_NAME  \"VK_EXT_multi_draw\"\ntypedef struct VkPhysicalDeviceMultiDrawFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           multiDraw;\n} VkPhysicalDeviceMultiDrawFeaturesEXT;\n\ntypedef struct VkPhysicalDeviceMultiDrawPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxMultiDrawCount;\n} VkPhysicalDeviceMultiDrawPropertiesEXT;\n\ntypedef struct VkMultiDrawInfoEXT {\n    uint32_t    firstVertex;\n    uint32_t    vertexCount;\n} VkMultiDrawInfoEXT;\n\ntypedef struct VkMultiDrawIndexedInfoEXT {\n    uint32_t    firstIndex;\n    uint32_t    indexCount;\n    int32_t     vertexOffset;\n} VkMultiDrawIndexedInfoEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawMultiEXT)(VkCommandBuffer commandBuffer, uint32_t drawCount, const VkMultiDrawInfoEXT* pVertexInfo, uint32_t instanceCount, uint32_t firstInstance, uint32_t stride);\ntypedef void (VKAPI_PTR *PFN_vkCmdDrawMultiIndexedEXT)(VkCommandBuffer commandBuffer, uint32_t drawCount, const VkMultiDrawIndexedInfoEXT* pIndexInfo, uint32_t instanceCount, uint32_t firstInstance, uint32_t stride, const int32_t* pVertexOffset);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawMultiEXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    drawCount,\n    const VkMultiDrawInfoEXT*                   pVertexInfo,\n    uint32_t                                    instanceCount,\n    uint32_t                                    firstInstance,\n    uint32_t                                    stride);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdDrawMultiIndexedEXT(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    drawCount,\n    const VkMultiDrawIndexedInfoEXT*            pIndexInfo,\n    uint32_t                                    instanceCount,\n    uint32_t                                    firstInstance,\n    uint32_t                                    stride,\n    const int32_t*                              pVertexOffset);\n#endif\n\n\n#define VK_EXT_image_2d_view_of_3d 1\n#define VK_EXT_IMAGE_2D_VIEW_OF_3D_SPEC_VERSION 1\n#define VK_EXT_IMAGE_2D_VIEW_OF_3D_EXTENSION_NAME \"VK_EXT_image_2d_view_of_3d\"\ntypedef struct VkPhysicalDeviceImage2DViewOf3DFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           image2DViewOf3D;\n    VkBool32           sampler2DViewOf3D;\n} VkPhysicalDeviceImage2DViewOf3DFeaturesEXT;\n\n\n\n#define VK_EXT_load_store_op_none 1\n#define VK_EXT_LOAD_STORE_OP_NONE_SPEC_VERSION 1\n#define VK_EXT_LOAD_STORE_OP_NONE_EXTENSION_NAME \"VK_EXT_load_store_op_none\"\n\n\n#define VK_EXT_border_color_swizzle 1\n#define VK_EXT_BORDER_COLOR_SWIZZLE_SPEC_VERSION 1\n#define VK_EXT_BORDER_COLOR_SWIZZLE_EXTENSION_NAME \"VK_EXT_border_color_swizzle\"\ntypedef struct VkPhysicalDeviceBorderColorSwizzleFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           borderColorSwizzle;\n    VkBool32           borderColorSwizzleFromImage;\n} VkPhysicalDeviceBorderColorSwizzleFeaturesEXT;\n\ntypedef struct VkSamplerBorderColorComponentMappingCreateInfoEXT {\n    VkStructureType       sType;\n    const void*           pNext;\n    VkComponentMapping    components;\n    VkBool32              srgb;\n} VkSamplerBorderColorComponentMappingCreateInfoEXT;\n\n\n\n#define VK_EXT_pageable_device_local_memory 1\n#define VK_EXT_PAGEABLE_DEVICE_LOCAL_MEMORY_SPEC_VERSION 1\n#define VK_EXT_PAGEABLE_DEVICE_LOCAL_MEMORY_EXTENSION_NAME \"VK_EXT_pageable_device_local_memory\"\ntypedef struct VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           pageableDeviceLocalMemory;\n} VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkSetDeviceMemoryPriorityEXT)(VkDevice       device, VkDeviceMemory memory, float          priority);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkSetDeviceMemoryPriorityEXT(\n    VkDevice                                    device,\n    VkDeviceMemory                              memory,\n    float                                       priority);\n#endif\n\n\n#define VK_VALVE_descriptor_set_host_mapping 1\n#define VK_VALVE_DESCRIPTOR_SET_HOST_MAPPING_SPEC_VERSION 1\n#define VK_VALVE_DESCRIPTOR_SET_HOST_MAPPING_EXTENSION_NAME \"VK_VALVE_descriptor_set_host_mapping\"\ntypedef struct VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           descriptorSetHostMapping;\n} VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE;\n\ntypedef struct VkDescriptorSetBindingReferenceVALVE {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkDescriptorSetLayout    descriptorSetLayout;\n    uint32_t                 binding;\n} VkDescriptorSetBindingReferenceVALVE;\n\ntypedef struct VkDescriptorSetLayoutHostMappingInfoVALVE {\n    VkStructureType    sType;\n    void*              pNext;\n    size_t             descriptorOffset;\n    uint32_t           descriptorSize;\n} VkDescriptorSetLayoutHostMappingInfoVALVE;\n\ntypedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE)(VkDevice device, const VkDescriptorSetBindingReferenceVALVE* pBindingReference, VkDescriptorSetLayoutHostMappingInfoVALVE* pHostMapping);\ntypedef void (VKAPI_PTR *PFN_vkGetDescriptorSetHostMappingVALVE)(VkDevice device, VkDescriptorSet descriptorSet, void** ppData);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutHostMappingInfoVALVE(\n    VkDevice                                    device,\n    const VkDescriptorSetBindingReferenceVALVE* pBindingReference,\n    VkDescriptorSetLayoutHostMappingInfoVALVE*  pHostMapping);\n\nVKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetHostMappingVALVE(\n    VkDevice                                    device,\n    VkDescriptorSet                             descriptorSet,\n    void**                                      ppData);\n#endif\n\n\n#define VK_EXT_non_seamless_cube_map 1\n#define VK_EXT_NON_SEAMLESS_CUBE_MAP_SPEC_VERSION 1\n#define VK_EXT_NON_SEAMLESS_CUBE_MAP_EXTENSION_NAME \"VK_EXT_non_seamless_cube_map\"\ntypedef struct VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           nonSeamlessCubeMap;\n} VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT;\n\n\n\n#define VK_QCOM_fragment_density_map_offset 1\n#define VK_QCOM_FRAGMENT_DENSITY_MAP_OFFSET_SPEC_VERSION 1\n#define VK_QCOM_FRAGMENT_DENSITY_MAP_OFFSET_EXTENSION_NAME \"VK_QCOM_fragment_density_map_offset\"\ntypedef struct VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           fragmentDensityMapOffset;\n} VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM;\n\ntypedef struct VkPhysicalDeviceFragmentDensityMapOffsetPropertiesQCOM {\n    VkStructureType    sType;\n    void*              pNext;\n    VkExtent2D         fragmentDensityOffsetGranularity;\n} VkPhysicalDeviceFragmentDensityMapOffsetPropertiesQCOM;\n\ntypedef struct VkSubpassFragmentDensityMapOffsetEndInfoQCOM {\n    VkStructureType      sType;\n    const void*          pNext;\n    uint32_t             fragmentDensityOffsetCount;\n    const VkOffset2D*    pFragmentDensityOffsets;\n} VkSubpassFragmentDensityMapOffsetEndInfoQCOM;\n\n\n\n#define VK_NV_linear_color_attachment 1\n#define VK_NV_LINEAR_COLOR_ATTACHMENT_SPEC_VERSION 1\n#define VK_NV_LINEAR_COLOR_ATTACHMENT_EXTENSION_NAME \"VK_NV_linear_color_attachment\"\ntypedef struct VkPhysicalDeviceLinearColorAttachmentFeaturesNV {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           linearColorAttachment;\n} VkPhysicalDeviceLinearColorAttachmentFeaturesNV;\n\n\n\n#define VK_GOOGLE_surfaceless_query 1\n#define VK_GOOGLE_SURFACELESS_QUERY_SPEC_VERSION 1\n#define VK_GOOGLE_SURFACELESS_QUERY_EXTENSION_NAME \"VK_GOOGLE_surfaceless_query\"\n\n\n#define VK_EXT_image_compression_control_swapchain 1\n#define VK_EXT_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_SPEC_VERSION 1\n#define VK_EXT_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_EXTENSION_NAME \"VK_EXT_image_compression_control_swapchain\"\ntypedef struct VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           imageCompressionControlSwapchain;\n} VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT;\n\n\n\n#define VK_QCOM_image_processing 1\n#define VK_QCOM_IMAGE_PROCESSING_SPEC_VERSION 1\n#define VK_QCOM_IMAGE_PROCESSING_EXTENSION_NAME \"VK_QCOM_image_processing\"\ntypedef struct VkImageViewSampleWeightCreateInfoQCOM {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkOffset2D         filterCenter;\n    VkExtent2D         filterSize;\n    uint32_t           numPhases;\n} VkImageViewSampleWeightCreateInfoQCOM;\n\ntypedef struct VkPhysicalDeviceImageProcessingFeaturesQCOM {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           textureSampleWeighted;\n    VkBool32           textureBoxFilter;\n    VkBool32           textureBlockMatch;\n} VkPhysicalDeviceImageProcessingFeaturesQCOM;\n\ntypedef struct VkPhysicalDeviceImageProcessingPropertiesQCOM {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           maxWeightFilterPhases;\n    VkExtent2D         maxWeightFilterDimension;\n    VkExtent2D         maxBlockMatchRegion;\n    VkExtent2D         maxBoxFilterBlockSize;\n} VkPhysicalDeviceImageProcessingPropertiesQCOM;\n\n\n\n#define VK_EXT_subpass_merge_feedback 1\n#define VK_EXT_SUBPASS_MERGE_FEEDBACK_SPEC_VERSION 2\n#define VK_EXT_SUBPASS_MERGE_FEEDBACK_EXTENSION_NAME \"VK_EXT_subpass_merge_feedback\"\n\ntypedef enum VkSubpassMergeStatusEXT {\n    VK_SUBPASS_MERGE_STATUS_MERGED_EXT = 0,\n    VK_SUBPASS_MERGE_STATUS_DISALLOWED_EXT = 1,\n    VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SIDE_EFFECTS_EXT = 2,\n    VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SAMPLES_MISMATCH_EXT = 3,\n    VK_SUBPASS_MERGE_STATUS_NOT_MERGED_VIEWS_MISMATCH_EXT = 4,\n    VK_SUBPASS_MERGE_STATUS_NOT_MERGED_ALIASING_EXT = 5,\n    VK_SUBPASS_MERGE_STATUS_NOT_MERGED_DEPENDENCIES_EXT = 6,\n    VK_SUBPASS_MERGE_STATUS_NOT_MERGED_INCOMPATIBLE_INPUT_ATTACHMENT_EXT = 7,\n    VK_SUBPASS_MERGE_STATUS_NOT_MERGED_TOO_MANY_ATTACHMENTS_EXT = 8,\n    VK_SUBPASS_MERGE_STATUS_NOT_MERGED_INSUFFICIENT_STORAGE_EXT = 9,\n    VK_SUBPASS_MERGE_STATUS_NOT_MERGED_DEPTH_STENCIL_COUNT_EXT = 10,\n    VK_SUBPASS_MERGE_STATUS_NOT_MERGED_RESOLVE_ATTACHMENT_REUSE_EXT = 11,\n    VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SINGLE_SUBPASS_EXT = 12,\n    VK_SUBPASS_MERGE_STATUS_NOT_MERGED_UNSPECIFIED_EXT = 13,\n    VK_SUBPASS_MERGE_STATUS_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkSubpassMergeStatusEXT;\ntypedef struct VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           subpassMergeFeedback;\n} VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT;\n\ntypedef struct VkRenderPassCreationControlEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkBool32           disallowMerging;\n} VkRenderPassCreationControlEXT;\n\ntypedef struct VkRenderPassCreationFeedbackInfoEXT {\n    uint32_t    postMergeSubpassCount;\n} VkRenderPassCreationFeedbackInfoEXT;\n\ntypedef struct VkRenderPassCreationFeedbackCreateInfoEXT {\n    VkStructureType                         sType;\n    const void*                             pNext;\n    VkRenderPassCreationFeedbackInfoEXT*    pRenderPassFeedback;\n} VkRenderPassCreationFeedbackCreateInfoEXT;\n\ntypedef struct VkRenderPassSubpassFeedbackInfoEXT {\n    VkSubpassMergeStatusEXT    subpassMergeStatus;\n    char                       description[VK_MAX_DESCRIPTION_SIZE];\n    uint32_t                   postMergeIndex;\n} VkRenderPassSubpassFeedbackInfoEXT;\n\ntypedef struct VkRenderPassSubpassFeedbackCreateInfoEXT {\n    VkStructureType                        sType;\n    const void*                            pNext;\n    VkRenderPassSubpassFeedbackInfoEXT*    pSubpassFeedback;\n} VkRenderPassSubpassFeedbackCreateInfoEXT;\n\n\n\n#define VK_EXT_shader_module_identifier 1\n#define VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT 32U\n#define VK_EXT_SHADER_MODULE_IDENTIFIER_SPEC_VERSION 1\n#define VK_EXT_SHADER_MODULE_IDENTIFIER_EXTENSION_NAME \"VK_EXT_shader_module_identifier\"\ntypedef struct VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           shaderModuleIdentifier;\n} VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT;\n\ntypedef struct VkPhysicalDeviceShaderModuleIdentifierPropertiesEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint8_t            shaderModuleIdentifierAlgorithmUUID[VK_UUID_SIZE];\n} VkPhysicalDeviceShaderModuleIdentifierPropertiesEXT;\n\ntypedef struct VkPipelineShaderStageModuleIdentifierCreateInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           identifierSize;\n    const uint8_t*     pIdentifier;\n} VkPipelineShaderStageModuleIdentifierCreateInfoEXT;\n\ntypedef struct VkShaderModuleIdentifierEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           identifierSize;\n    uint8_t            identifier[VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT];\n} VkShaderModuleIdentifierEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkGetShaderModuleIdentifierEXT)(VkDevice device, VkShaderModule shaderModule, VkShaderModuleIdentifierEXT* pIdentifier);\ntypedef void (VKAPI_PTR *PFN_vkGetShaderModuleCreateInfoIdentifierEXT)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, VkShaderModuleIdentifierEXT* pIdentifier);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkGetShaderModuleIdentifierEXT(\n    VkDevice                                    device,\n    VkShaderModule                              shaderModule,\n    VkShaderModuleIdentifierEXT*                pIdentifier);\n\nVKAPI_ATTR void VKAPI_CALL vkGetShaderModuleCreateInfoIdentifierEXT(\n    VkDevice                                    device,\n    const VkShaderModuleCreateInfo*             pCreateInfo,\n    VkShaderModuleIdentifierEXT*                pIdentifier);\n#endif\n\n\n#define VK_QCOM_tile_properties 1\n#define VK_QCOM_TILE_PROPERTIES_SPEC_VERSION 1\n#define VK_QCOM_TILE_PROPERTIES_EXTENSION_NAME \"VK_QCOM_tile_properties\"\ntypedef struct VkPhysicalDeviceTilePropertiesFeaturesQCOM {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           tileProperties;\n} VkPhysicalDeviceTilePropertiesFeaturesQCOM;\n\ntypedef struct VkTilePropertiesQCOM {\n    VkStructureType    sType;\n    void*              pNext;\n    VkExtent3D         tileSize;\n    VkExtent2D         apronSize;\n    VkOffset2D         origin;\n} VkTilePropertiesQCOM;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetFramebufferTilePropertiesQCOM)(VkDevice device, VkFramebuffer framebuffer, uint32_t* pPropertiesCount, VkTilePropertiesQCOM* pProperties);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetDynamicRenderingTilePropertiesQCOM)(VkDevice device, const VkRenderingInfo* pRenderingInfo, VkTilePropertiesQCOM* pProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetFramebufferTilePropertiesQCOM(\n    VkDevice                                    device,\n    VkFramebuffer                               framebuffer,\n    uint32_t*                                   pPropertiesCount,\n    VkTilePropertiesQCOM*                       pProperties);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetDynamicRenderingTilePropertiesQCOM(\n    VkDevice                                    device,\n    const VkRenderingInfo*                      pRenderingInfo,\n    VkTilePropertiesQCOM*                       pProperties);\n#endif\n\n\n#define VK_SEC_amigo_profiling 1\n#define VK_SEC_AMIGO_PROFILING_SPEC_VERSION 1\n#define VK_SEC_AMIGO_PROFILING_EXTENSION_NAME \"VK_SEC_amigo_profiling\"\ntypedef struct VkPhysicalDeviceAmigoProfilingFeaturesSEC {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           amigoProfiling;\n} VkPhysicalDeviceAmigoProfilingFeaturesSEC;\n\ntypedef struct VkAmigoProfilingSubmitInfoSEC {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint64_t           firstDrawTimestamp;\n    uint64_t           swapBufferTimestamp;\n} VkAmigoProfilingSubmitInfoSEC;\n\n\n\n#define VK_KHR_acceleration_structure 1\nVK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureKHR)\n#define VK_KHR_ACCELERATION_STRUCTURE_SPEC_VERSION 13\n#define VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME \"VK_KHR_acceleration_structure\"\n\ntypedef enum VkBuildAccelerationStructureModeKHR {\n    VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR = 0,\n    VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR = 1,\n    VK_BUILD_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkBuildAccelerationStructureModeKHR;\n\ntypedef enum VkAccelerationStructureBuildTypeKHR {\n    VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR = 0,\n    VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR = 1,\n    VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR = 2,\n    VK_ACCELERATION_STRUCTURE_BUILD_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkAccelerationStructureBuildTypeKHR;\n\ntypedef enum VkAccelerationStructureCompatibilityKHR {\n    VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR = 0,\n    VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR = 1,\n    VK_ACCELERATION_STRUCTURE_COMPATIBILITY_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkAccelerationStructureCompatibilityKHR;\n\ntypedef enum VkAccelerationStructureCreateFlagBitsKHR {\n    VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = 0x00000001,\n    VK_ACCELERATION_STRUCTURE_CREATE_MOTION_BIT_NV = 0x00000004,\n    VK_ACCELERATION_STRUCTURE_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkAccelerationStructureCreateFlagBitsKHR;\ntypedef VkFlags VkAccelerationStructureCreateFlagsKHR;\ntypedef union VkDeviceOrHostAddressKHR {\n    VkDeviceAddress    deviceAddress;\n    void*              hostAddress;\n} VkDeviceOrHostAddressKHR;\n\ntypedef struct VkAccelerationStructureBuildRangeInfoKHR {\n    uint32_t    primitiveCount;\n    uint32_t    primitiveOffset;\n    uint32_t    firstVertex;\n    uint32_t    transformOffset;\n} VkAccelerationStructureBuildRangeInfoKHR;\n\ntypedef struct VkAccelerationStructureGeometryTrianglesDataKHR {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkFormat                         vertexFormat;\n    VkDeviceOrHostAddressConstKHR    vertexData;\n    VkDeviceSize                     vertexStride;\n    uint32_t                         maxVertex;\n    VkIndexType                      indexType;\n    VkDeviceOrHostAddressConstKHR    indexData;\n    VkDeviceOrHostAddressConstKHR    transformData;\n} VkAccelerationStructureGeometryTrianglesDataKHR;\n\ntypedef struct VkAccelerationStructureGeometryAabbsDataKHR {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkDeviceOrHostAddressConstKHR    data;\n    VkDeviceSize                     stride;\n} VkAccelerationStructureGeometryAabbsDataKHR;\n\ntypedef struct VkAccelerationStructureGeometryInstancesDataKHR {\n    VkStructureType                  sType;\n    const void*                      pNext;\n    VkBool32                         arrayOfPointers;\n    VkDeviceOrHostAddressConstKHR    data;\n} VkAccelerationStructureGeometryInstancesDataKHR;\n\ntypedef union VkAccelerationStructureGeometryDataKHR {\n    VkAccelerationStructureGeometryTrianglesDataKHR    triangles;\n    VkAccelerationStructureGeometryAabbsDataKHR        aabbs;\n    VkAccelerationStructureGeometryInstancesDataKHR    instances;\n} VkAccelerationStructureGeometryDataKHR;\n\ntypedef struct VkAccelerationStructureGeometryKHR {\n    VkStructureType                           sType;\n    const void*                               pNext;\n    VkGeometryTypeKHR                         geometryType;\n    VkAccelerationStructureGeometryDataKHR    geometry;\n    VkGeometryFlagsKHR                        flags;\n} VkAccelerationStructureGeometryKHR;\n\ntypedef struct VkAccelerationStructureBuildGeometryInfoKHR {\n    VkStructureType                                     sType;\n    const void*                                         pNext;\n    VkAccelerationStructureTypeKHR                      type;\n    VkBuildAccelerationStructureFlagsKHR                flags;\n    VkBuildAccelerationStructureModeKHR                 mode;\n    VkAccelerationStructureKHR                          srcAccelerationStructure;\n    VkAccelerationStructureKHR                          dstAccelerationStructure;\n    uint32_t                                            geometryCount;\n    const VkAccelerationStructureGeometryKHR*           pGeometries;\n    const VkAccelerationStructureGeometryKHR* const*    ppGeometries;\n    VkDeviceOrHostAddressKHR                            scratchData;\n} VkAccelerationStructureBuildGeometryInfoKHR;\n\ntypedef struct VkAccelerationStructureCreateInfoKHR {\n    VkStructureType                          sType;\n    const void*                              pNext;\n    VkAccelerationStructureCreateFlagsKHR    createFlags;\n    VkBuffer                                 buffer;\n    VkDeviceSize                             offset;\n    VkDeviceSize                             size;\n    VkAccelerationStructureTypeKHR           type;\n    VkDeviceAddress                          deviceAddress;\n} VkAccelerationStructureCreateInfoKHR;\n\ntypedef struct VkWriteDescriptorSetAccelerationStructureKHR {\n    VkStructureType                      sType;\n    const void*                          pNext;\n    uint32_t                             accelerationStructureCount;\n    const VkAccelerationStructureKHR*    pAccelerationStructures;\n} VkWriteDescriptorSetAccelerationStructureKHR;\n\ntypedef struct VkPhysicalDeviceAccelerationStructureFeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           accelerationStructure;\n    VkBool32           accelerationStructureCaptureReplay;\n    VkBool32           accelerationStructureIndirectBuild;\n    VkBool32           accelerationStructureHostCommands;\n    VkBool32           descriptorBindingAccelerationStructureUpdateAfterBind;\n} VkPhysicalDeviceAccelerationStructureFeaturesKHR;\n\ntypedef struct VkPhysicalDeviceAccelerationStructurePropertiesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    uint64_t           maxGeometryCount;\n    uint64_t           maxInstanceCount;\n    uint64_t           maxPrimitiveCount;\n    uint32_t           maxPerStageDescriptorAccelerationStructures;\n    uint32_t           maxPerStageDescriptorUpdateAfterBindAccelerationStructures;\n    uint32_t           maxDescriptorSetAccelerationStructures;\n    uint32_t           maxDescriptorSetUpdateAfterBindAccelerationStructures;\n    uint32_t           minAccelerationStructureScratchOffsetAlignment;\n} VkPhysicalDeviceAccelerationStructurePropertiesKHR;\n\ntypedef struct VkAccelerationStructureDeviceAddressInfoKHR {\n    VkStructureType               sType;\n    const void*                   pNext;\n    VkAccelerationStructureKHR    accelerationStructure;\n} VkAccelerationStructureDeviceAddressInfoKHR;\n\ntypedef struct VkAccelerationStructureVersionInfoKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    const uint8_t*     pVersionData;\n} VkAccelerationStructureVersionInfoKHR;\n\ntypedef struct VkCopyAccelerationStructureToMemoryInfoKHR {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkAccelerationStructureKHR            src;\n    VkDeviceOrHostAddressKHR              dst;\n    VkCopyAccelerationStructureModeKHR    mode;\n} VkCopyAccelerationStructureToMemoryInfoKHR;\n\ntypedef struct VkCopyMemoryToAccelerationStructureInfoKHR {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkDeviceOrHostAddressConstKHR         src;\n    VkAccelerationStructureKHR            dst;\n    VkCopyAccelerationStructureModeKHR    mode;\n} VkCopyMemoryToAccelerationStructureInfoKHR;\n\ntypedef struct VkCopyAccelerationStructureInfoKHR {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkAccelerationStructureKHR            src;\n    VkAccelerationStructureKHR            dst;\n    VkCopyAccelerationStructureModeKHR    mode;\n} VkCopyAccelerationStructureInfoKHR;\n\ntypedef struct VkAccelerationStructureBuildSizesInfoKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkDeviceSize       accelerationStructureSize;\n    VkDeviceSize       updateScratchSize;\n    VkDeviceSize       buildScratchSize;\n} VkAccelerationStructureBuildSizesInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureKHR)(VkDevice                                           device, const VkAccelerationStructureCreateInfoKHR*        pCreateInfo, const VkAllocationCallbacks*       pAllocator, VkAccelerationStructureKHR*                        pAccelerationStructure);\ntypedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureKHR)(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator);\ntypedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructuresKHR)(VkCommandBuffer                                    commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos);\ntypedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructuresIndirectKHR)(VkCommandBuffer                  commandBuffer, uint32_t                                           infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkDeviceAddress*             pIndirectDeviceAddresses, const uint32_t*                    pIndirectStrides, const uint32_t* const*             ppMaxPrimitiveCounts);\ntypedef VkResult (VKAPI_PTR *PFN_vkBuildAccelerationStructuresKHR)(VkDevice                                           device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos);\ntypedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureToMemoryKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToAccelerationStructureKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkWriteAccelerationStructuresPropertiesKHR)(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType  queryType, size_t       dataSize, void* pData, size_t stride);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureToMemoryKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryToAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo);\ntypedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetAccelerationStructureDeviceAddressKHR)(VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo);\ntypedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery);\ntypedef void (VKAPI_PTR *PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)(VkDevice device, const VkAccelerationStructureVersionInfoKHR* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility);\ntypedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureBuildSizesKHR)(VkDevice                                            device, VkAccelerationStructureBuildTypeKHR                 buildType, const VkAccelerationStructureBuildGeometryInfoKHR*  pBuildInfo, const uint32_t*  pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR*           pSizeInfo);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureKHR(\n    VkDevice                                    device,\n    const VkAccelerationStructureCreateInfoKHR* pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkAccelerationStructureKHR*                 pAccelerationStructure);\n\nVKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureKHR(\n    VkDevice                                    device,\n    VkAccelerationStructureKHR                  accelerationStructure,\n    const VkAllocationCallbacks*                pAllocator);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructuresKHR(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    infoCount,\n    const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,\n    const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructuresIndirectKHR(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    infoCount,\n    const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,\n    const VkDeviceAddress*                      pIndirectDeviceAddresses,\n    const uint32_t*                             pIndirectStrides,\n    const uint32_t* const*                      ppMaxPrimitiveCounts);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkBuildAccelerationStructuresKHR(\n    VkDevice                                    device,\n    VkDeferredOperationKHR                      deferredOperation,\n    uint32_t                                    infoCount,\n    const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,\n    const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureKHR(\n    VkDevice                                    device,\n    VkDeferredOperationKHR                      deferredOperation,\n    const VkCopyAccelerationStructureInfoKHR*   pInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureToMemoryKHR(\n    VkDevice                                    device,\n    VkDeferredOperationKHR                      deferredOperation,\n    const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToAccelerationStructureKHR(\n    VkDevice                                    device,\n    VkDeferredOperationKHR                      deferredOperation,\n    const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkWriteAccelerationStructuresPropertiesKHR(\n    VkDevice                                    device,\n    uint32_t                                    accelerationStructureCount,\n    const VkAccelerationStructureKHR*           pAccelerationStructures,\n    VkQueryType                                 queryType,\n    size_t                                      dataSize,\n    void*                                       pData,\n    size_t                                      stride);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureKHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkCopyAccelerationStructureInfoKHR*   pInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureToMemoryKHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryToAccelerationStructureKHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo);\n\nVKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetAccelerationStructureDeviceAddressKHR(\n    VkDevice                                    device,\n    const VkAccelerationStructureDeviceAddressInfoKHR* pInfo);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesKHR(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    accelerationStructureCount,\n    const VkAccelerationStructureKHR*           pAccelerationStructures,\n    VkQueryType                                 queryType,\n    VkQueryPool                                 queryPool,\n    uint32_t                                    firstQuery);\n\nVKAPI_ATTR void VKAPI_CALL vkGetDeviceAccelerationStructureCompatibilityKHR(\n    VkDevice                                    device,\n    const VkAccelerationStructureVersionInfoKHR* pVersionInfo,\n    VkAccelerationStructureCompatibilityKHR*    pCompatibility);\n\nVKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureBuildSizesKHR(\n    VkDevice                                    device,\n    VkAccelerationStructureBuildTypeKHR         buildType,\n    const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo,\n    const uint32_t*                             pMaxPrimitiveCounts,\n    VkAccelerationStructureBuildSizesInfoKHR*   pSizeInfo);\n#endif\n\n\n#define VK_KHR_ray_tracing_pipeline 1\n#define VK_KHR_RAY_TRACING_PIPELINE_SPEC_VERSION 1\n#define VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME \"VK_KHR_ray_tracing_pipeline\"\n\ntypedef enum VkShaderGroupShaderKHR {\n    VK_SHADER_GROUP_SHADER_GENERAL_KHR = 0,\n    VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR = 1,\n    VK_SHADER_GROUP_SHADER_ANY_HIT_KHR = 2,\n    VK_SHADER_GROUP_SHADER_INTERSECTION_KHR = 3,\n    VK_SHADER_GROUP_SHADER_MAX_ENUM_KHR = 0x7FFFFFFF\n} VkShaderGroupShaderKHR;\ntypedef struct VkRayTracingShaderGroupCreateInfoKHR {\n    VkStructureType                   sType;\n    const void*                       pNext;\n    VkRayTracingShaderGroupTypeKHR    type;\n    uint32_t                          generalShader;\n    uint32_t                          closestHitShader;\n    uint32_t                          anyHitShader;\n    uint32_t                          intersectionShader;\n    const void*                       pShaderGroupCaptureReplayHandle;\n} VkRayTracingShaderGroupCreateInfoKHR;\n\ntypedef struct VkRayTracingPipelineInterfaceCreateInfoKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           maxPipelineRayPayloadSize;\n    uint32_t           maxPipelineRayHitAttributeSize;\n} VkRayTracingPipelineInterfaceCreateInfoKHR;\n\ntypedef struct VkRayTracingPipelineCreateInfoKHR {\n    VkStructureType                                      sType;\n    const void*                                          pNext;\n    VkPipelineCreateFlags                                flags;\n    uint32_t                                             stageCount;\n    const VkPipelineShaderStageCreateInfo*               pStages;\n    uint32_t                                             groupCount;\n    const VkRayTracingShaderGroupCreateInfoKHR*          pGroups;\n    uint32_t                                             maxPipelineRayRecursionDepth;\n    const VkPipelineLibraryCreateInfoKHR*                pLibraryInfo;\n    const VkRayTracingPipelineInterfaceCreateInfoKHR*    pLibraryInterface;\n    const VkPipelineDynamicStateCreateInfo*              pDynamicState;\n    VkPipelineLayout                                     layout;\n    VkPipeline                                           basePipelineHandle;\n    int32_t                                              basePipelineIndex;\n} VkRayTracingPipelineCreateInfoKHR;\n\ntypedef struct VkPhysicalDeviceRayTracingPipelineFeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           rayTracingPipeline;\n    VkBool32           rayTracingPipelineShaderGroupHandleCaptureReplay;\n    VkBool32           rayTracingPipelineShaderGroupHandleCaptureReplayMixed;\n    VkBool32           rayTracingPipelineTraceRaysIndirect;\n    VkBool32           rayTraversalPrimitiveCulling;\n} VkPhysicalDeviceRayTracingPipelineFeaturesKHR;\n\ntypedef struct VkPhysicalDeviceRayTracingPipelinePropertiesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           shaderGroupHandleSize;\n    uint32_t           maxRayRecursionDepth;\n    uint32_t           maxShaderGroupStride;\n    uint32_t           shaderGroupBaseAlignment;\n    uint32_t           shaderGroupHandleCaptureReplaySize;\n    uint32_t           maxRayDispatchInvocationCount;\n    uint32_t           shaderGroupHandleAlignment;\n    uint32_t           maxRayHitAttributeSize;\n} VkPhysicalDeviceRayTracingPipelinePropertiesKHR;\n\ntypedef struct VkStridedDeviceAddressRegionKHR {\n    VkDeviceAddress    deviceAddress;\n    VkDeviceSize       stride;\n    VkDeviceSize       size;\n} VkStridedDeviceAddressRegionKHR;\n\ntypedef struct VkTraceRaysIndirectCommandKHR {\n    uint32_t    width;\n    uint32_t    height;\n    uint32_t    depth;\n} VkTraceRaysIndirectCommandKHR;\n\ntypedef void (VKAPI_PTR *PFN_vkCmdTraceRaysKHR)(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth);\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData);\ntypedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirectKHR)(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress);\ntypedef VkDeviceSize (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupStackSizeKHR)(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader);\ntypedef void (VKAPI_PTR *PFN_vkCmdSetRayTracingPipelineStackSizeKHR)(VkCommandBuffer commandBuffer, uint32_t pipelineStackSize);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysKHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkStridedDeviceAddressRegionKHR*      pRaygenShaderBindingTable,\n    const VkStridedDeviceAddressRegionKHR*      pMissShaderBindingTable,\n    const VkStridedDeviceAddressRegionKHR*      pHitShaderBindingTable,\n    const VkStridedDeviceAddressRegionKHR*      pCallableShaderBindingTable,\n    uint32_t                                    width,\n    uint32_t                                    height,\n    uint32_t                                    depth);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesKHR(\n    VkDevice                                    device,\n    VkDeferredOperationKHR                      deferredOperation,\n    VkPipelineCache                             pipelineCache,\n    uint32_t                                    createInfoCount,\n    const VkRayTracingPipelineCreateInfoKHR*    pCreateInfos,\n    const VkAllocationCallbacks*                pAllocator,\n    VkPipeline*                                 pPipelines);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(\n    VkDevice                                    device,\n    VkPipeline                                  pipeline,\n    uint32_t                                    firstGroup,\n    uint32_t                                    groupCount,\n    size_t                                      dataSize,\n    void*                                       pData);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirectKHR(\n    VkCommandBuffer                             commandBuffer,\n    const VkStridedDeviceAddressRegionKHR*      pRaygenShaderBindingTable,\n    const VkStridedDeviceAddressRegionKHR*      pMissShaderBindingTable,\n    const VkStridedDeviceAddressRegionKHR*      pHitShaderBindingTable,\n    const VkStridedDeviceAddressRegionKHR*      pCallableShaderBindingTable,\n    VkDeviceAddress                             indirectDeviceAddress);\n\nVKAPI_ATTR VkDeviceSize VKAPI_CALL vkGetRayTracingShaderGroupStackSizeKHR(\n    VkDevice                                    device,\n    VkPipeline                                  pipeline,\n    uint32_t                                    group,\n    VkShaderGroupShaderKHR                      groupShader);\n\nVKAPI_ATTR void VKAPI_CALL vkCmdSetRayTracingPipelineStackSizeKHR(\n    VkCommandBuffer                             commandBuffer,\n    uint32_t                                    pipelineStackSize);\n#endif\n\n\n#define VK_KHR_ray_query 1\n#define VK_KHR_RAY_QUERY_SPEC_VERSION     1\n#define VK_KHR_RAY_QUERY_EXTENSION_NAME   \"VK_KHR_ray_query\"\ntypedef struct VkPhysicalDeviceRayQueryFeaturesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           rayQuery;\n} VkPhysicalDeviceRayQueryFeaturesKHR;\n\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "deps/vulkan-headers/vulkan/vulkan_ios.h",
    "content": "#ifndef VULKAN_IOS_H_\n#define VULKAN_IOS_H_ 1\n\n/*\n** Copyright 2015-2022 The Khronos Group Inc.\n**\n** SPDX-License-Identifier: Apache-2.0\n*/\n\n/*\n** This header is generated from the Khronos Vulkan XML API Registry.\n**\n*/\n\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n\n\n#define VK_MVK_ios_surface 1\n#define VK_MVK_IOS_SURFACE_SPEC_VERSION   3\n#define VK_MVK_IOS_SURFACE_EXTENSION_NAME \"VK_MVK_ios_surface\"\ntypedef VkFlags VkIOSSurfaceCreateFlagsMVK;\ntypedef struct VkIOSSurfaceCreateInfoMVK {\n    VkStructureType               sType;\n    const void*                   pNext;\n    VkIOSSurfaceCreateFlagsMVK    flags;\n    const void*                   pView;\n} VkIOSSurfaceCreateInfoMVK;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateIOSSurfaceMVK)(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK(\n    VkInstance                                  instance,\n    const VkIOSSurfaceCreateInfoMVK*            pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkSurfaceKHR*                               pSurface);\n#endif\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "deps/vulkan-headers/vulkan/vulkan_macos.h",
    "content": "#ifndef VULKAN_MACOS_H_\n#define VULKAN_MACOS_H_ 1\n\n/*\n** Copyright 2015-2022 The Khronos Group Inc.\n**\n** SPDX-License-Identifier: Apache-2.0\n*/\n\n/*\n** This header is generated from the Khronos Vulkan XML API Registry.\n**\n*/\n\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n\n\n#define VK_MVK_macos_surface 1\n#define VK_MVK_MACOS_SURFACE_SPEC_VERSION 3\n#define VK_MVK_MACOS_SURFACE_EXTENSION_NAME \"VK_MVK_macos_surface\"\ntypedef VkFlags VkMacOSSurfaceCreateFlagsMVK;\ntypedef struct VkMacOSSurfaceCreateInfoMVK {\n    VkStructureType                 sType;\n    const void*                     pNext;\n    VkMacOSSurfaceCreateFlagsMVK    flags;\n    const void*                     pView;\n} VkMacOSSurfaceCreateInfoMVK;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateMacOSSurfaceMVK)(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK(\n    VkInstance                                  instance,\n    const VkMacOSSurfaceCreateInfoMVK*          pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkSurfaceKHR*                               pSurface);\n#endif\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "deps/vulkan-headers/vulkan/vulkan_metal.h",
    "content": "#ifndef VULKAN_METAL_H_\n#define VULKAN_METAL_H_ 1\n\n/*\n** Copyright 2015-2022 The Khronos Group Inc.\n**\n** SPDX-License-Identifier: Apache-2.0\n*/\n\n/*\n** This header is generated from the Khronos Vulkan XML API Registry.\n**\n*/\n\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n\n\n#define VK_EXT_metal_surface 1\n#ifdef __OBJC__\n@class CAMetalLayer;\n#else\ntypedef void CAMetalLayer;\n#endif\n\n#define VK_EXT_METAL_SURFACE_SPEC_VERSION 1\n#define VK_EXT_METAL_SURFACE_EXTENSION_NAME \"VK_EXT_metal_surface\"\ntypedef VkFlags VkMetalSurfaceCreateFlagsEXT;\ntypedef struct VkMetalSurfaceCreateInfoEXT {\n    VkStructureType                 sType;\n    const void*                     pNext;\n    VkMetalSurfaceCreateFlagsEXT    flags;\n    const CAMetalLayer*             pLayer;\n} VkMetalSurfaceCreateInfoEXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateMetalSurfaceEXT)(VkInstance instance, const VkMetalSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateMetalSurfaceEXT(\n    VkInstance                                  instance,\n    const VkMetalSurfaceCreateInfoEXT*          pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkSurfaceKHR*                               pSurface);\n#endif\n\n\n#define VK_EXT_metal_objects 1\n#ifdef __OBJC__\n@protocol MTLDevice;\ntypedef id<MTLDevice> MTLDevice_id;\n#else\ntypedef void* MTLDevice_id;\n#endif\n\n#ifdef __OBJC__\n@protocol MTLCommandQueue;\ntypedef id<MTLCommandQueue> MTLCommandQueue_id;\n#else\ntypedef void* MTLCommandQueue_id;\n#endif\n\n#ifdef __OBJC__\n@protocol MTLBuffer;\ntypedef id<MTLBuffer> MTLBuffer_id;\n#else\ntypedef void* MTLBuffer_id;\n#endif\n\n#ifdef __OBJC__\n@protocol MTLTexture;\ntypedef id<MTLTexture> MTLTexture_id;\n#else\ntypedef void* MTLTexture_id;\n#endif\n\ntypedef struct __IOSurface* IOSurfaceRef;\n#ifdef __OBJC__\n@protocol MTLSharedEvent;\ntypedef id<MTLSharedEvent> MTLSharedEvent_id;\n#else\ntypedef void* MTLSharedEvent_id;\n#endif\n\n#define VK_EXT_METAL_OBJECTS_SPEC_VERSION 1\n#define VK_EXT_METAL_OBJECTS_EXTENSION_NAME \"VK_EXT_metal_objects\"\n\ntypedef enum VkExportMetalObjectTypeFlagBitsEXT {\n    VK_EXPORT_METAL_OBJECT_TYPE_METAL_DEVICE_BIT_EXT = 0x00000001,\n    VK_EXPORT_METAL_OBJECT_TYPE_METAL_COMMAND_QUEUE_BIT_EXT = 0x00000002,\n    VK_EXPORT_METAL_OBJECT_TYPE_METAL_BUFFER_BIT_EXT = 0x00000004,\n    VK_EXPORT_METAL_OBJECT_TYPE_METAL_TEXTURE_BIT_EXT = 0x00000008,\n    VK_EXPORT_METAL_OBJECT_TYPE_METAL_IOSURFACE_BIT_EXT = 0x00000010,\n    VK_EXPORT_METAL_OBJECT_TYPE_METAL_SHARED_EVENT_BIT_EXT = 0x00000020,\n    VK_EXPORT_METAL_OBJECT_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkExportMetalObjectTypeFlagBitsEXT;\ntypedef VkFlags VkExportMetalObjectTypeFlagsEXT;\ntypedef struct VkExportMetalObjectCreateInfoEXT {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkExportMetalObjectTypeFlagBitsEXT    exportObjectType;\n} VkExportMetalObjectCreateInfoEXT;\n\ntypedef struct VkExportMetalObjectsInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n} VkExportMetalObjectsInfoEXT;\n\ntypedef struct VkExportMetalDeviceInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    MTLDevice_id       mtlDevice;\n} VkExportMetalDeviceInfoEXT;\n\ntypedef struct VkExportMetalCommandQueueInfoEXT {\n    VkStructureType       sType;\n    const void*           pNext;\n    VkQueue               queue;\n    MTLCommandQueue_id    mtlCommandQueue;\n} VkExportMetalCommandQueueInfoEXT;\n\ntypedef struct VkExportMetalBufferInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkDeviceMemory     memory;\n    MTLBuffer_id       mtlBuffer;\n} VkExportMetalBufferInfoEXT;\n\ntypedef struct VkImportMetalBufferInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    MTLBuffer_id       mtlBuffer;\n} VkImportMetalBufferInfoEXT;\n\ntypedef struct VkExportMetalTextureInfoEXT {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkImage                  image;\n    VkImageView              imageView;\n    VkBufferView             bufferView;\n    VkImageAspectFlagBits    plane;\n    MTLTexture_id            mtlTexture;\n} VkExportMetalTextureInfoEXT;\n\ntypedef struct VkImportMetalTextureInfoEXT {\n    VkStructureType          sType;\n    const void*              pNext;\n    VkImageAspectFlagBits    plane;\n    MTLTexture_id            mtlTexture;\n} VkImportMetalTextureInfoEXT;\n\ntypedef struct VkExportMetalIOSurfaceInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    VkImage            image;\n    IOSurfaceRef       ioSurface;\n} VkExportMetalIOSurfaceInfoEXT;\n\ntypedef struct VkImportMetalIOSurfaceInfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    IOSurfaceRef       ioSurface;\n} VkImportMetalIOSurfaceInfoEXT;\n\ntypedef struct VkExportMetalSharedEventInfoEXT {\n    VkStructureType      sType;\n    const void*          pNext;\n    VkSemaphore          semaphore;\n    VkEvent              event;\n    MTLSharedEvent_id    mtlSharedEvent;\n} VkExportMetalSharedEventInfoEXT;\n\ntypedef struct VkImportMetalSharedEventInfoEXT {\n    VkStructureType      sType;\n    const void*          pNext;\n    MTLSharedEvent_id    mtlSharedEvent;\n} VkImportMetalSharedEventInfoEXT;\n\ntypedef void (VKAPI_PTR *PFN_vkExportMetalObjectsEXT)(VkDevice device, VkExportMetalObjectsInfoEXT* pMetalObjectsInfo);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR void VKAPI_CALL vkExportMetalObjectsEXT(\n    VkDevice                                    device,\n    VkExportMetalObjectsInfoEXT*                pMetalObjectsInfo);\n#endif\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "deps/vulkan-headers/vulkan/vulkan_win32.h",
    "content": "#ifndef VULKAN_WIN32_H_\n#define VULKAN_WIN32_H_ 1\n\n/*\n** Copyright 2015-2022 The Khronos Group Inc.\n**\n** SPDX-License-Identifier: Apache-2.0\n*/\n\n/*\n** This header is generated from the Khronos Vulkan XML API Registry.\n**\n*/\n\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n\n\n#define VK_KHR_win32_surface 1\n#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 6\n#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME \"VK_KHR_win32_surface\"\ntypedef VkFlags VkWin32SurfaceCreateFlagsKHR;\ntypedef struct VkWin32SurfaceCreateInfoKHR {\n    VkStructureType                 sType;\n    const void*                     pNext;\n    VkWin32SurfaceCreateFlagsKHR    flags;\n    HINSTANCE                       hinstance;\n    HWND                            hwnd;\n} VkWin32SurfaceCreateInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);\ntypedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(\n    VkInstance                                  instance,\n    const VkWin32SurfaceCreateInfoKHR*          pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkSurfaceKHR*                               pSurface);\n\nVKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t                                    queueFamilyIndex);\n#endif\n\n\n#define VK_KHR_external_memory_win32 1\n#define VK_KHR_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1\n#define VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME \"VK_KHR_external_memory_win32\"\ntypedef struct VkImportMemoryWin32HandleInfoKHR {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkExternalMemoryHandleTypeFlagBits    handleType;\n    HANDLE                                handle;\n    LPCWSTR                               name;\n} VkImportMemoryWin32HandleInfoKHR;\n\ntypedef struct VkExportMemoryWin32HandleInfoKHR {\n    VkStructureType               sType;\n    const void*                   pNext;\n    const SECURITY_ATTRIBUTES*    pAttributes;\n    DWORD                         dwAccess;\n    LPCWSTR                       name;\n} VkExportMemoryWin32HandleInfoKHR;\n\ntypedef struct VkMemoryWin32HandlePropertiesKHR {\n    VkStructureType    sType;\n    void*              pNext;\n    uint32_t           memoryTypeBits;\n} VkMemoryWin32HandlePropertiesKHR;\n\ntypedef struct VkMemoryGetWin32HandleInfoKHR {\n    VkStructureType                       sType;\n    const void*                           pNext;\n    VkDeviceMemory                        memory;\n    VkExternalMemoryHandleTypeFlagBits    handleType;\n} VkMemoryGetWin32HandleInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleKHR)(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandlePropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleKHR(\n    VkDevice                                    device,\n    const VkMemoryGetWin32HandleInfoKHR*        pGetWin32HandleInfo,\n    HANDLE*                                     pHandle);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandlePropertiesKHR(\n    VkDevice                                    device,\n    VkExternalMemoryHandleTypeFlagBits          handleType,\n    HANDLE                                      handle,\n    VkMemoryWin32HandlePropertiesKHR*           pMemoryWin32HandleProperties);\n#endif\n\n\n#define VK_KHR_win32_keyed_mutex 1\n#define VK_KHR_WIN32_KEYED_MUTEX_SPEC_VERSION 1\n#define VK_KHR_WIN32_KEYED_MUTEX_EXTENSION_NAME \"VK_KHR_win32_keyed_mutex\"\ntypedef struct VkWin32KeyedMutexAcquireReleaseInfoKHR {\n    VkStructureType          sType;\n    const void*              pNext;\n    uint32_t                 acquireCount;\n    const VkDeviceMemory*    pAcquireSyncs;\n    const uint64_t*          pAcquireKeys;\n    const uint32_t*          pAcquireTimeouts;\n    uint32_t                 releaseCount;\n    const VkDeviceMemory*    pReleaseSyncs;\n    const uint64_t*          pReleaseKeys;\n} VkWin32KeyedMutexAcquireReleaseInfoKHR;\n\n\n\n#define VK_KHR_external_semaphore_win32 1\n#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION 1\n#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME \"VK_KHR_external_semaphore_win32\"\ntypedef struct VkImportSemaphoreWin32HandleInfoKHR {\n    VkStructureType                          sType;\n    const void*                              pNext;\n    VkSemaphore                              semaphore;\n    VkSemaphoreImportFlags                   flags;\n    VkExternalSemaphoreHandleTypeFlagBits    handleType;\n    HANDLE                                   handle;\n    LPCWSTR                                  name;\n} VkImportSemaphoreWin32HandleInfoKHR;\n\ntypedef struct VkExportSemaphoreWin32HandleInfoKHR {\n    VkStructureType               sType;\n    const void*                   pNext;\n    const SECURITY_ATTRIBUTES*    pAttributes;\n    DWORD                         dwAccess;\n    LPCWSTR                       name;\n} VkExportSemaphoreWin32HandleInfoKHR;\n\ntypedef struct VkD3D12FenceSubmitInfoKHR {\n    VkStructureType    sType;\n    const void*        pNext;\n    uint32_t           waitSemaphoreValuesCount;\n    const uint64_t*    pWaitSemaphoreValues;\n    uint32_t           signalSemaphoreValuesCount;\n    const uint64_t*    pSignalSemaphoreValues;\n} VkD3D12FenceSubmitInfoKHR;\n\ntypedef struct VkSemaphoreGetWin32HandleInfoKHR {\n    VkStructureType                          sType;\n    const void*                              pNext;\n    VkSemaphore                              semaphore;\n    VkExternalSemaphoreHandleTypeFlagBits    handleType;\n} VkSemaphoreGetWin32HandleInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreWin32HandleKHR)(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreWin32HandleKHR)(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreWin32HandleKHR(\n    VkDevice                                    device,\n    const VkImportSemaphoreWin32HandleInfoKHR*  pImportSemaphoreWin32HandleInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreWin32HandleKHR(\n    VkDevice                                    device,\n    const VkSemaphoreGetWin32HandleInfoKHR*     pGetWin32HandleInfo,\n    HANDLE*                                     pHandle);\n#endif\n\n\n#define VK_KHR_external_fence_win32 1\n#define VK_KHR_EXTERNAL_FENCE_WIN32_SPEC_VERSION 1\n#define VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME \"VK_KHR_external_fence_win32\"\ntypedef struct VkImportFenceWin32HandleInfoKHR {\n    VkStructureType                      sType;\n    const void*                          pNext;\n    VkFence                              fence;\n    VkFenceImportFlags                   flags;\n    VkExternalFenceHandleTypeFlagBits    handleType;\n    HANDLE                               handle;\n    LPCWSTR                              name;\n} VkImportFenceWin32HandleInfoKHR;\n\ntypedef struct VkExportFenceWin32HandleInfoKHR {\n    VkStructureType               sType;\n    const void*                   pNext;\n    const SECURITY_ATTRIBUTES*    pAttributes;\n    DWORD                         dwAccess;\n    LPCWSTR                       name;\n} VkExportFenceWin32HandleInfoKHR;\n\ntypedef struct VkFenceGetWin32HandleInfoKHR {\n    VkStructureType                      sType;\n    const void*                          pNext;\n    VkFence                              fence;\n    VkExternalFenceHandleTypeFlagBits    handleType;\n} VkFenceGetWin32HandleInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkImportFenceWin32HandleKHR)(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetFenceWin32HandleKHR)(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkImportFenceWin32HandleKHR(\n    VkDevice                                    device,\n    const VkImportFenceWin32HandleInfoKHR*      pImportFenceWin32HandleInfo);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetFenceWin32HandleKHR(\n    VkDevice                                    device,\n    const VkFenceGetWin32HandleInfoKHR*         pGetWin32HandleInfo,\n    HANDLE*                                     pHandle);\n#endif\n\n\n#define VK_NV_external_memory_win32 1\n#define VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1\n#define VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME \"VK_NV_external_memory_win32\"\ntypedef struct VkImportMemoryWin32HandleInfoNV {\n    VkStructureType                      sType;\n    const void*                          pNext;\n    VkExternalMemoryHandleTypeFlagsNV    handleType;\n    HANDLE                               handle;\n} VkImportMemoryWin32HandleInfoNV;\n\ntypedef struct VkExportMemoryWin32HandleInfoNV {\n    VkStructureType               sType;\n    const void*                   pNext;\n    const SECURITY_ATTRIBUTES*    pAttributes;\n    DWORD                         dwAccess;\n} VkExportMemoryWin32HandleInfoNV;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleNV)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV(\n    VkDevice                                    device,\n    VkDeviceMemory                              memory,\n    VkExternalMemoryHandleTypeFlagsNV           handleType,\n    HANDLE*                                     pHandle);\n#endif\n\n\n#define VK_NV_win32_keyed_mutex 1\n#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 2\n#define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME \"VK_NV_win32_keyed_mutex\"\ntypedef struct VkWin32KeyedMutexAcquireReleaseInfoNV {\n    VkStructureType          sType;\n    const void*              pNext;\n    uint32_t                 acquireCount;\n    const VkDeviceMemory*    pAcquireSyncs;\n    const uint64_t*          pAcquireKeys;\n    const uint32_t*          pAcquireTimeoutMilliseconds;\n    uint32_t                 releaseCount;\n    const VkDeviceMemory*    pReleaseSyncs;\n    const uint64_t*          pReleaseKeys;\n} VkWin32KeyedMutexAcquireReleaseInfoNV;\n\n\n\n#define VK_EXT_full_screen_exclusive 1\n#define VK_EXT_FULL_SCREEN_EXCLUSIVE_SPEC_VERSION 4\n#define VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME \"VK_EXT_full_screen_exclusive\"\n\ntypedef enum VkFullScreenExclusiveEXT {\n    VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT = 0,\n    VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT = 1,\n    VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT = 2,\n    VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT = 3,\n    VK_FULL_SCREEN_EXCLUSIVE_MAX_ENUM_EXT = 0x7FFFFFFF\n} VkFullScreenExclusiveEXT;\ntypedef struct VkSurfaceFullScreenExclusiveInfoEXT {\n    VkStructureType             sType;\n    void*                       pNext;\n    VkFullScreenExclusiveEXT    fullScreenExclusive;\n} VkSurfaceFullScreenExclusiveInfoEXT;\n\ntypedef struct VkSurfaceCapabilitiesFullScreenExclusiveEXT {\n    VkStructureType    sType;\n    void*              pNext;\n    VkBool32           fullScreenExclusiveSupported;\n} VkSurfaceCapabilitiesFullScreenExclusiveEXT;\n\ntypedef struct VkSurfaceFullScreenExclusiveWin32InfoEXT {\n    VkStructureType    sType;\n    const void*        pNext;\n    HMONITOR           hmonitor;\n} VkSurfaceFullScreenExclusiveWin32InfoEXT;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes);\ntypedef VkResult (VKAPI_PTR *PFN_vkAcquireFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain);\ntypedef VkResult (VKAPI_PTR *PFN_vkReleaseFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain);\ntypedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModes2EXT)(VkDevice device, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR* pModes);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModes2EXT(\n    VkPhysicalDevice                            physicalDevice,\n    const VkPhysicalDeviceSurfaceInfo2KHR*      pSurfaceInfo,\n    uint32_t*                                   pPresentModeCount,\n    VkPresentModeKHR*                           pPresentModes);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkAcquireFullScreenExclusiveModeEXT(\n    VkDevice                                    device,\n    VkSwapchainKHR                              swapchain);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkReleaseFullScreenExclusiveModeEXT(\n    VkDevice                                    device,\n    VkSwapchainKHR                              swapchain);\n\nVKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModes2EXT(\n    VkDevice                                    device,\n    const VkPhysicalDeviceSurfaceInfo2KHR*      pSurfaceInfo,\n    VkDeviceGroupPresentModeFlagsKHR*           pModes);\n#endif\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "deps/vulkan-headers/vulkan/vulkan_xcb.h",
    "content": "#ifndef VULKAN_XCB_H_\n#define VULKAN_XCB_H_ 1\n\n/*\n** Copyright 2015-2022 The Khronos Group Inc.\n**\n** SPDX-License-Identifier: Apache-2.0\n*/\n\n/*\n** This header is generated from the Khronos Vulkan XML API Registry.\n**\n*/\n\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n\n\n#define VK_KHR_xcb_surface 1\n#define VK_KHR_XCB_SURFACE_SPEC_VERSION   6\n#define VK_KHR_XCB_SURFACE_EXTENSION_NAME \"VK_KHR_xcb_surface\"\ntypedef VkFlags VkXcbSurfaceCreateFlagsKHR;\ntypedef struct VkXcbSurfaceCreateInfoKHR {\n    VkStructureType               sType;\n    const void*                   pNext;\n    VkXcbSurfaceCreateFlagsKHR    flags;\n    xcb_connection_t*             connection;\n    xcb_window_t                  window;\n} VkXcbSurfaceCreateInfoKHR;\n\ntypedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);\ntypedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id);\n\n#ifndef VK_NO_PROTOTYPES\nVKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(\n    VkInstance                                  instance,\n    const VkXcbSurfaceCreateInfoKHR*            pCreateInfo,\n    const VkAllocationCallbacks*                pAllocator,\n    VkSurfaceKHR*                               pSurface);\n\nVKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR(\n    VkPhysicalDevice                            physicalDevice,\n    uint32_t                                    queueFamilyIndex,\n    xcb_connection_t*                           connection,\n    xcb_visualid_t                              visual_id);\n#endif\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "include/nicegraf-mtl-handles.h",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n#include \"nicegraf.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif \n\n/**\n * \\ingroup ngf\n *\n * Returns a uintptr_t to the underlying MTLTexture. The caller is responsible for casting the return\n * value to a MTLTexture.\n *\n * @param image A handle to a nicegraf image.\n */\nuintptr_t ngf_get_mtl_image_handle(ngf_image image) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Returns a uintptr_t to the underlying MTLBuffer. The caller is responsible for casting the return\n * value to a MTLBuffer.\n *\n * @param buffer A handle to a nicegraf buffer.\n */\nuintptr_t ngf_get_mtl_buffer_handle(ngf_buffer buffer) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Returns a uintptr_t to the underlying MTLSamplerState. The caller is responsible for casting the\n * return value to a MTLSamplerState.\n *\n * @param sampler A handle to a nicegraf sampler.\n */\nuintptr_t ngf_get_mtl_sampler_handle(ngf_sampler sampler) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Returns a uintptr_t to the underlying MTLCommandBuffer. The caller is responsible for casting\n * the return value to a MTLCommandBuffer.\n *\n * @param cmd_buffer A handle to a nicegraf command buffer.\n */\nuintptr_t ngf_get_mtl_cmd_buffer_handle(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Returns a uintptr_t to the underlying MTLRenderCommandEncoder. The caller is responsible for casting\n * the return value to a MTLRenderCommandEncoder.\n *\n * @param cmd_buffer A handle to a nicegraf command buffer.\n */\nuintptr_t ngf_get_mtl_render_encoder_handle(ngf_render_encoder render_encoder) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Returns a uintptr_t to the underlying MTLBlitCommandEncoder. The caller is responsible for casting\n * the return value to a MTLBlitCommandEncoder.\n *\n * @param cmd_buffer A handle to a nicegraf command buffer.\n */\nuintptr_t ngf_get_mtl_xfer_encoder_handle(ngf_xfer_encoder xfer_encoder) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Returns a uintptr_t to the underlying MTLComputeCommandEncoder. The caller is responsible for casting\n * the return value to a MTLComputeCommandEncoder.\n *\n * @param cmd_buffer A handle to a nicegraf command buffer.\n */\nuintptr_t ngf_get_mtl_compute_encoder_handle(ngf_compute_encoder compute_encoder) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Returns a uint32_t representing the underlying MTLPixelFormat. The caller is responsible for casting the return\n * value to a MTLPixelFormat.\n *\n * @param format A nicegraf image format.\n */\nuint32_t ngf_get_mtl_pixel_format_index(ngf_image_format format) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Returns a uintptr_t to the underlying MTLDevice. The caller is responsible for casting the return value\n * to a MTLDevice.\n */\nuintptr_t ngf_get_mtl_device() NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Sets the counter sample buffer attachment descriptor to be used by the next compute pass.\n *\n * @param cmd_buffer A handle to a nicegraf command buffer.\n * @param sample_buf_attachment_descriptor uintptr_t to MTLComputePassSampleBufferAttachmentDescriptor handle.\n */\nvoid ngf_mtl_set_sample_attachment_for_next_compute_pass( ngf_cmd_buffer cmd_buffer, uintptr_t sample_buf_attachment_descriptor ) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Sets the counter sample buffer attachment descriptor to be used by the next render pass.\n *\n * @param cmd_buffer A handle to a nicegraf command buffer.\n * @param sample_buf_attachment_descriptor uintptr_t to MTLRenderPassSampleBufferAttachmentDescriptor handle.\n */\nvoid ngf_mtl_set_sample_attachment_for_next_render_pass( ngf_cmd_buffer cmd_buffer, uintptr_t sample_buf_attachment_descriptor ) NGF_NOEXCEPT;\n\n#ifdef __cplusplus\n}\n#endif \n\n"
  },
  {
    "path": "include/nicegraf-util.h",
    "content": "/**\n * Copyright (c) 2021 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include \"nicegraf.h\"\n\n#include <stdint.h>\n\n/**\n * @file\n * \\defgroup ngf_util Utility Library\n * \n * This module contains routines and structures that provide auxiliary functionality or help reduce boilerplate.\n */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/**\n * @struct ngf_util_graphics_pipeline_data\n * \\ingroup ngf_util\n * \n * Contains all the data describing a graphics pipeline, with the exception\n * of shader stages.\n * \n * See \\ref ngf_util_create_default_graphics_pipeline_data for more details.\n */\ntypedef struct ngf_util_graphics_pipeline_data {\n  ngf_graphics_pipeline_info pipeline_info; /**< Can be used to initialize a new pipeline object. */\n  ngf_depth_stencil_info     depth_stencil_info;\n  ngf_vertex_input_info      vertex_input_info;\n  ngf_multisample_info       multisample_info;\n  ngf_rasterization_info     rasterization_info;\n  ngf_input_assembly_info    input_assembly_info;\n  ngf_specialization_info    spec_info;\n} ngf_util_graphics_pipeline_data;\n\n/**\n * \\ingroup ngf_util\n * \n * Creates a configuration for a graphics pipeline object with some pre-set defaults.\n * \n * The fields of the members of the resulting \\ref ngf_util_graphics_pipeline_data are set such that\n * they match OpenGL defaults. They can be adjusted later. The pointer fields of \\ref\n * ngf_util_graphics_pipeline_data::pipeline_info are set to point to the corresponding members of\n * \\ref ngf_util_graphics_pipeline_data.\n * \n * The only aspect of configuration that this function does not set are the programmable shader stages. After the application code sets those, \\ref\n * ngf_util_graphics_pipeline_data::pipeline_info can be used to create a new pipeline object.\n * \n * @param result Pipeline configuration data will be stored here.\n */\nvoid ngf_util_create_default_graphics_pipeline_data(ngf_util_graphics_pipeline_data* result);\n\n/**\n * \\ingroup ngf_util\n * \n * Converts a nicegraf error code to a human-readable string.\n * \n * @param err The error enum to get the string for.\n * @return A human-readable error message.\n */\nconst char* ngf_util_get_error_name(const ngf_error err);\n\n/**\n * \\ingroup ngf_util\n * \n * Rounds `value` up to the nearest multiple of `alignment`.\n */\nstatic inline size_t ngf_util_align_size(size_t value, size_t alignment) {\n    const size_t m = value % alignment;\n    return value + (m > 0 ? (alignment - m) : 0u);\n}\n\n#ifdef __cplusplus\n}\n#endif\n"
  },
  {
    "path": "include/nicegraf-vk-handles.h",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n#include \"nicegraf.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/**\n * \\ingroup ngf\n *\n * Returns the underlying VkDevice handle cast to uintptr_t. The caller is responsible for casting\n * the return value to a VkDevice.\n */\nuintptr_t ngf_get_vk_device_handle() NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Returns the underlying VkInstance handle cast to uintptr_t. The caller is responsible for casting\n * the return value to a VkInstance.\n */\nuintptr_t ngf_get_vk_instance_handle() NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n * \n * Returns a uintptr_t to the underlying VkImage. The caller is responsible for casting the return\n * value to a VkImage.\n *\n * @param image A handle to a nicegraf image.\n */\nuintptr_t ngf_get_vk_image_handle(ngf_image image) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n * \n * Returns a uintptr_t to the underlying VkBuffer. The caller is responsible for casting the return\n * value to a VkBuffer.\n *\n * @param buffer A handle to a nicegraf buffer.\n */\nuintptr_t ngf_get_vk_buffer_handle(ngf_buffer buffer) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n * \n * Returns a uintptr_t to the underlying VkCommandBuffer. The caller is responsible for casting\n * the return value to a VkCommandBuffer.\n *\n * @param cmd_buffer A handle to a nicegraf command buffer.\n */\nuintptr_t ngf_get_vk_cmd_buffer_handle(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n * \n * Returns a uintptr_t to the underlying VkSampler. The caller is responsible for casting the\n * return value to a VkSampler.\n *\n * @param sampler A handle to a nicegraf sampler.\n */\nuintptr_t ngf_get_vk_sampler_handle(ngf_sampler sampler) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Returns a uint32_t representing the underlying VkFormat. The caller is responsible for casting the return\n * value to a VkFormat.\n *\n * @param format A nicegraf image format.\n */\nuint32_t ngf_get_vk_image_format_index(ngf_image_format format) NGF_NOEXCEPT;\n\n#ifdef __cplusplus\n}\n#endif\n\n"
  },
  {
    "path": "include/nicegraf-wrappers.h",
    "content": "/**\n * Copyright (c) 2025 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n#pragma once\n\n#include \"nicegraf-util.h\"\n#include \"nicegraf.h\"\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n/**\n * @file\n * \\defgroup ngf_wrappers C++ Wrappers\n *\n * This module contains optional C++ wrappers for nicegraf structures and routines.\n * The \\ref ngf namespace contains aliases for most types without the `ngf_` prefix\n * (i.e. `ngf_extent3d` becomes `ngf::extent3d`).\n * Most functions are wrapped using static inline wrappers.\n */\n\nnamespace ngf {\n\n#define NGF_POD_TYPE_ALIAS(name)    using name = ngf_##name;\n#define NGF_OPAQUE_TYPE_ALIAS(name) using unowned_##name = ngf_##name;\n\nNGF_POD_TYPE_ALIAS(diagnostic_log_verbosity)\nNGF_POD_TYPE_ALIAS(diagnostic_message_type)\nNGF_POD_TYPE_ALIAS(renderdoc_info)\nNGF_POD_TYPE_ALIAS(diagnostic_callback)\nNGF_POD_TYPE_ALIAS(diagnostic_info)\nNGF_POD_TYPE_ALIAS(allocation_callbacks)\nNGF_POD_TYPE_ALIAS(device_handle)\nNGF_POD_TYPE_ALIAS(device_performance_tier)\nNGF_POD_TYPE_ALIAS(init_info)\nNGF_POD_TYPE_ALIAS(error)\nNGF_POD_TYPE_ALIAS(irect2d)\nNGF_POD_TYPE_ALIAS(extent3d)\nNGF_POD_TYPE_ALIAS(offset3d)\nNGF_POD_TYPE_ALIAS(stage_type)\nNGF_POD_TYPE_ALIAS(shader_stage_info)\nNGF_POD_TYPE_ALIAS(polygon_mode)\nNGF_POD_TYPE_ALIAS(cull_mode)\nNGF_POD_TYPE_ALIAS(front_face_mode)\nNGF_POD_TYPE_ALIAS(rasterization_info)\nNGF_POD_TYPE_ALIAS(compare_op)\nNGF_POD_TYPE_ALIAS(stencil_op)\nNGF_POD_TYPE_ALIAS(stencil_info)\nNGF_POD_TYPE_ALIAS(depth_stencil_info)\nNGF_POD_TYPE_ALIAS(blend_factor)\nNGF_POD_TYPE_ALIAS(blend_op)\nNGF_POD_TYPE_ALIAS(color_write_mask_bit)\nNGF_POD_TYPE_ALIAS(blend_info)\nNGF_POD_TYPE_ALIAS(type)\nNGF_POD_TYPE_ALIAS(vertex_input_rate)\nNGF_POD_TYPE_ALIAS(vertex_buf_binding_desc)\nNGF_POD_TYPE_ALIAS(vertex_attrib_desc)\nNGF_POD_TYPE_ALIAS(vertex_input_info)\nNGF_POD_TYPE_ALIAS(sample_count)\nNGF_POD_TYPE_ALIAS(multisample_info)\nNGF_POD_TYPE_ALIAS(image_format)\nNGF_POD_TYPE_ALIAS(attachment_type)\nNGF_POD_TYPE_ALIAS(attachment_description)\nNGF_POD_TYPE_ALIAS(attachment_descriptions)\nNGF_POD_TYPE_ALIAS(primitive_topology)\nNGF_POD_TYPE_ALIAS(constant_specialization)\nNGF_POD_TYPE_ALIAS(specialization_info)\nNGF_POD_TYPE_ALIAS(input_assembly_info)\nNGF_POD_TYPE_ALIAS(graphics_pipeline_info)\nNGF_POD_TYPE_ALIAS(compute_pipeline_info)\nNGF_POD_TYPE_ALIAS(descriptor_type)\nNGF_POD_TYPE_ALIAS(sampler_filter)\nNGF_POD_TYPE_ALIAS(sampler_wrap_mode)\nNGF_POD_TYPE_ALIAS(sampler_info)\nNGF_POD_TYPE_ALIAS(image_usage)\nNGF_POD_TYPE_ALIAS(image_type)\nNGF_POD_TYPE_ALIAS(image_info)\nNGF_POD_TYPE_ALIAS(cubemap_face)\nNGF_POD_TYPE_ALIAS(image_ref)\nNGF_POD_TYPE_ALIAS(image_view_info)\nNGF_POD_TYPE_ALIAS(clear)\nNGF_POD_TYPE_ALIAS(attachment_load_op)\nNGF_POD_TYPE_ALIAS(attachment_store_op)\nNGF_POD_TYPE_ALIAS(render_pass_info)\nNGF_POD_TYPE_ALIAS(xfer_pass_info)\nNGF_POD_TYPE_ALIAS(compute_pass_info)\nNGF_POD_TYPE_ALIAS(buffer_storage_type)\nNGF_POD_TYPE_ALIAS(buffer_usage)\nNGF_POD_TYPE_ALIAS(buffer_info)\nNGF_POD_TYPE_ALIAS(buffer_slice)\nNGF_POD_TYPE_ALIAS(texel_buffer_view_info)\nNGF_POD_TYPE_ALIAS(buffer_bind_info)\nNGF_POD_TYPE_ALIAS(image_sampler_bind_info)\nNGF_POD_TYPE_ALIAS(resource_bind_op)\nNGF_POD_TYPE_ALIAS(present_mode)\nNGF_POD_TYPE_ALIAS(colorspace)\nNGF_POD_TYPE_ALIAS(swapchain_info)\nNGF_POD_TYPE_ALIAS(context_info)\nNGF_POD_TYPE_ALIAS(cmd_buffer_info)\nNGF_POD_TYPE_ALIAS(frame_token)\nNGF_POD_TYPE_ALIAS(device_capabilities)\nNGF_POD_TYPE_ALIAS(device)\nNGF_POD_TYPE_ALIAS(image_write)\nNGF_OPAQUE_TYPE_ALIAS(shader_stage)\nNGF_OPAQUE_TYPE_ALIAS(graphics_pipeline)\nNGF_OPAQUE_TYPE_ALIAS(compute_pipeline)\nNGF_OPAQUE_TYPE_ALIAS(sampler)\nNGF_OPAQUE_TYPE_ALIAS(image)\nNGF_OPAQUE_TYPE_ALIAS(image_view)\nNGF_OPAQUE_TYPE_ALIAS(render_target_info)\nNGF_OPAQUE_TYPE_ALIAS(render_target)\nNGF_OPAQUE_TYPE_ALIAS(render_encoder)\nNGF_OPAQUE_TYPE_ALIAS(compute_encoder)\nNGF_OPAQUE_TYPE_ALIAS(xfer_encoder)\nNGF_OPAQUE_TYPE_ALIAS(buffer)\nNGF_OPAQUE_TYPE_ALIAS(texel_buffer_view)\nNGF_OPAQUE_TYPE_ALIAS(context)\nNGF_OPAQUE_TYPE_ALIAS(cmd_buffer)\n\nstatic inline error get_device_list(const device** devices, uint32_t* ndevices) noexcept {\n  return ngf_get_device_list(devices, ndevices);\n}\n\nstatic inline error initialize(const init_info* init_info) noexcept {\n  return ngf_initialize(init_info);\n}\n\nstatic inline void shutdown() noexcept {\n  ngf_shutdown();\n}\n\nstatic inline error\nresize_context(unowned_context ctx, uint32_t new_width, uint32_t new_height) noexcept {\n  return ngf_resize_context(ctx, new_width, new_height);\n}\n\nstatic inline error set_context(unowned_context ctx) noexcept {\n  return ngf_set_context(ctx);\n}\n\nstatic inline unowned_context get_context() noexcept {\n  return ngf_get_context();\n}\n\nstatic inline error begin_frame(frame_token* token) noexcept {\n  return ngf_begin_frame(token);\n}\n\nstatic inline error end_frame(frame_token token) noexcept {\n  return ngf_end_frame(token);\n}\n\nstatic inline error get_current_swapchain_image(frame_token token, unowned_image* result) noexcept {\n  return ngf_get_current_swapchain_image(token, result);\n}\n\nstatic inline const device_capabilities* get_device_capabilities() noexcept {\n  return ngf_get_device_capabilities();\n}\n\nstatic inline unowned_render_target default_render_target() noexcept {\n  return ngf_default_render_target();\n}\n\nstatic inline const attachment_descriptions* default_render_target_attachment_descs() noexcept {\n  return ngf_default_render_target_attachment_descs();\n}\n\nstatic inline void* buffer_map_range(unowned_buffer buf, size_t offset, size_t size) noexcept {\n  return ngf_buffer_map_range(buf, offset, size);\n}\n\nstatic inline void buffer_flush_range(unowned_buffer buf, size_t offset, size_t size) noexcept {\n  ngf_buffer_flush_range(buf, offset, size);\n}\n\nstatic inline void buffer_unmap(unowned_buffer buf) noexcept {\n  ngf_buffer_unmap(buf);\n}\n\nstatic inline void finish() noexcept {\n  ngf_finish();\n}\n\nstatic inline error start_cmd_buffer(unowned_cmd_buffer buf, frame_token token) noexcept {\n  return ngf_start_cmd_buffer(buf, token);\n}\n\nstatic inline error submit_cmd_buffers(uint32_t nbuffers, unowned_cmd_buffer* bufs) noexcept {\n  return ngf_submit_cmd_buffers(nbuffers, bufs);\n}\n\nstatic inline void\ncmd_bind_gfx_pipeline(unowned_render_encoder buf, unowned_graphics_pipeline pipeline) noexcept {\n  ngf_cmd_bind_gfx_pipeline(buf, pipeline);\n}\n\nstatic inline void\ncmd_bind_compute_pipeline(unowned_compute_encoder buf, unowned_compute_pipeline pipeline) noexcept {\n  ngf_cmd_bind_compute_pipeline(buf, pipeline);\n}\n\nstatic inline void cmd_viewport(unowned_render_encoder buf, const irect2d* r) noexcept {\n  ngf_cmd_viewport(buf, r);\n}\n\nstatic inline void cmd_scissor(unowned_render_encoder enc, const irect2d* r) noexcept {\n  ngf_cmd_scissor(enc, r);\n}\n\nstatic inline void\ncmd_stencil_reference(unowned_render_encoder enc, uint32_t front, uint32_t back) noexcept {\n  ngf_cmd_stencil_reference(enc, front, back);\n}\n\nstatic inline void\ncmd_stencil_compare_mask(unowned_render_encoder enc, uint32_t front, uint32_t back) noexcept {\n  ngf_cmd_stencil_compare_mask(enc, front, back);\n}\n\nstatic inline void\ncmd_stencil_write_mask(unowned_render_encoder enc, uint32_t front, uint32_t back) noexcept {\n  ngf_cmd_stencil_write_mask(enc, front, back);\n}\n\nstatic inline void cmd_set_depth_bias(\n    unowned_render_encoder enc,\n    float                  const_scale,\n    float                  slope_scale,\n    float                  clamp) noexcept {\n  ngf_cmd_set_depth_bias(enc, const_scale, slope_scale, clamp);\n}\n\nstatic inline void cmd_bind_resources(\n    unowned_render_encoder  enc,\n    const resource_bind_op* bind_operations,\n    uint32_t                nbind_operations) noexcept {\n  ngf_cmd_bind_resources(enc, bind_operations, nbind_operations);\n}\n\nstatic inline void cmd_bind_compute_resources(\n    unowned_compute_encoder enc,\n    const resource_bind_op* bind_operations,\n    uint32_t                nbind_operations) noexcept {\n  ngf_cmd_bind_compute_resources(enc, bind_operations, nbind_operations);\n}\n\nstatic inline void cmd_bind_attrib_buffer(\n    unowned_render_encoder enc,\n    unowned_buffer         vbuf,\n    uint32_t               binding,\n    size_t                 offset) noexcept {\n  ngf_cmd_bind_attrib_buffer(enc, vbuf, binding, offset);\n}\n\nstatic inline void cmd_bind_index_buffer(\n    unowned_render_encoder enc,\n    unowned_buffer         idxbuf,\n    size_t                 offset,\n    type                   index_type) noexcept {\n  ngf_cmd_bind_index_buffer(enc, idxbuf, offset, index_type);\n}\n\nstatic inline void cmd_draw(\n    unowned_render_encoder enc,\n    bool                   indexed,\n    uint32_t               first_element,\n    uint32_t               nelements,\n    uint32_t               ninstances) noexcept {\n  ngf_cmd_draw(enc, indexed, first_element, nelements, ninstances);\n}\n\nstatic inline void cmd_dispatch(\n    unowned_compute_encoder enc,\n    uint32_t                x_threadgroups,\n    uint32_t                y_threadgroups,\n    uint32_t                z_threadgroups) noexcept {\n  ngf_cmd_dispatch(enc, x_threadgroups, y_threadgroups, z_threadgroups);\n}\n\nstatic inline void cmd_copy_buffer(\n    unowned_xfer_encoder enc,\n    unowned_buffer       src,\n    unowned_buffer       dst,\n    size_t               size,\n    size_t               src_offset,\n    size_t               dst_offset) noexcept {\n  ngf_cmd_copy_buffer(enc, src, dst, size, src_offset, dst_offset);\n}\n\nstatic inline void cmd_write_image(\n    unowned_xfer_encoder enc,\n    unowned_buffer       src,\n    unowned_image        dst,\n    const image_write*   writes,\n    uint32_t             nwrites) noexcept {\n  ngf_cmd_write_image(enc, src, dst, writes, nwrites);\n}\n\nstatic inline void cmd_copy_image_to_buffer(\n    unowned_xfer_encoder enc,\n    const image_ref      src,\n    offset3d             src_offset,\n    extent3d             src_extent,\n    uint32_t             nlayers,\n    unowned_buffer       dst,\n    size_t               dst_offset) noexcept {\n  ngf_cmd_copy_image_to_buffer(enc, src, src_offset, src_extent, nlayers, dst, dst_offset);\n}\n\nstatic inline error cmd_generate_mipmaps(unowned_xfer_encoder xfenc, unowned_image img) noexcept {\n  return ngf_cmd_generate_mipmaps(xfenc, img);\n}\n\nstatic inline void cmd_begin_debug_group(unowned_cmd_buffer cmd_buffer, const char* name) noexcept {\n  ngf_cmd_begin_debug_group(cmd_buffer, name);\n}\n\nstatic inline void cmd_end_current_debug_group(unowned_cmd_buffer cmd_buffer) noexcept {\n  ngf_cmd_end_current_debug_group(cmd_buffer);\n}\n\nstatic inline void renderdoc_capture_next_frame() noexcept {\n  ngf_renderdoc_capture_next_frame();\n}\n\nstatic inline void renderdoc_capture_begin() noexcept {\n  ngf_renderdoc_capture_begin();\n}\n\nstatic inline void renderdoc_capture_end() noexcept {\n  ngf_renderdoc_capture_end();\n}\n\nnamespace detail {\n\ntemplate<class T> struct remove_ref {\n  using Type = T;\n};\ntemplate<class T> struct remove_ref<T&> {\n  using Type = T;\n};\ntemplate<class T> struct remove_ref<T&&> {\n  using Type = T;\n};\n\ntemplate<class T> using remove_ref_t = typename remove_ref<T>::Type;\n\ntemplate<class T> constexpr T&& fwd(remove_ref_t<T>& x) noexcept {\n  return (T&&)x;\n}\ntemplate<class T> constexpr T&& fwd(remove_ref_t<T>&& x) noexcept {\n  return (T&&)x;\n}\n\ntemplate<class T> constexpr remove_ref_t<T>&& move(T&& x) noexcept {\n  return (remove_ref_t<T>&&)x;\n}\n\n}  // namespace detail\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A convenience macro to allow easily propagating nicegraf errors. The provided expression must\n * evaluate to a \\ref ngf_error. If the result of the expression is not \\ref NGF_ERROR_OK, the value\n * is returned from the calling function. Note: the calling function must also return an \\ref\n * ngf_error.\n */\n#define NGF_RETURN_IF_ERROR(expr)        \\\n  {                                      \\\n    const ngf_error tmp = (expr);        \\\n    if (tmp != NGF_ERROR_OK) return tmp; \\\n  }\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A move-only RAII wrapper over nicegraf handles that provides unique ownership semantics.\n */\ntemplate<class T, class ObjectManagementFuncs> class unique_handle {\n  public:\n  /** Wraps a raw handle to a nicegraf object. */\n  explicit unique_handle(T raw) : handle_(raw) {\n  }\n\n  /** Wraps a null handle. */\n  unique_handle() : handle_(nullptr) {\n  }\n\n  unique_handle(const unique_handle&) = delete;\n  unique_handle(unique_handle&& other) : handle_(nullptr) {\n    *this = detail::move(other);\n  }\n\n  /** Disposes of the owned handle, if it is not null. */\n  ~unique_handle() {\n    destroy_if_necessary();\n  }\n\n  unique_handle& operator=(const unique_handle&) = delete;\n\n  /** Takes ownership of the handle wrapped by another object. */\n  unique_handle& operator=(unique_handle&& other) noexcept {\n    destroy_if_necessary();\n    handle_       = other.handle_;\n    other.handle_ = nullptr;\n    return *this;\n  }\n\n  typedef typename ObjectManagementFuncs::InitType init_type;\n\n  static unique_handle create(const typename ObjectManagementFuncs::InitType& info, error* err = nullptr) {\n    unique_handle h;\n    auto e = h.initialize(info);\n    if (err) *err = e;\n    return h;\n  }\n\n  /** Creates a new handle using the provided configuration, and takes ownership of it. */\n  ngf_error initialize(const typename ObjectManagementFuncs::InitType& info) {\n    destroy_if_necessary();\n    const ngf_error err = ObjectManagementFuncs::create(&info, &handle_);\n    if (err != NGF_ERROR_OK) handle_ = nullptr;\n    return err;\n  }\n\n  struct make_result {\n    unique_handle   handle;\n    const ngf_error error;\n  };\n  static make_result make(const init_type& info) {\n    unique_handle   handle;\n    const ngf_error error = handle.initialize(info);\n    return make_result {detail::move(handle), error};\n  }\n\n  /** @return The raw handle to the wrapped object. */\n  T get() {\n    return handle_;\n  }\n\n  /** @return The raw handle to the wrapped object. */\n  const T get() const {\n    return handle_;\n  }\n\n  /**\n   * Relinquishes ownership of the wrapped object and returns a raw handle to it. After this call\n   * completes, it is the responsibility of the calling code to dispose of the handle properly when\n   * it is no longer needed.\n   */\n  T release() {\n    T tmp   = handle_;\n    handle_ = nullptr;\n    return tmp;\n  }\n\n  /** Implicit conversion to the raw handle type. */\n  operator T() {\n    return handle_;\n  }\n\n  /** Implicit conversion to the raw handle type. */\n  operator const T() const {\n    return handle_;\n  }\n\n  /**\n   * Wraps a raw handle to a nicegraf object.\n   */\n  void reset(T new_handle) {\n    destroy_if_necessary();\n    handle_ = new_handle;\n  }\n\n  private:\n  void destroy_if_necessary() {\n    if (handle_) {\n      ObjectManagementFuncs::destroy(handle_);\n      handle_ = nullptr;\n    }\n  }\n\n  T handle_;\n};\n\n#define NGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(name)                  \\\n  struct ngf_##name##_ManagementFuncs {                            \\\n    using InitType = ngf_##name##_info;                            \\\n    static ngf_error create(const InitType* info, ngf_##name* r) { \\\n      return ngf_create_##name(info, r);                           \\\n    }                                                              \\\n    static void destroy(ngf_##name handle) {                       \\\n      ngf_destroy_##name(handle);                                  \\\n    }                                                              \\\n  };\n\n#define NGF_DEFINE_WRAPPER_TYPE(name) \\\n  using name = unique_handle<ngf_##name, ngf_##name##_ManagementFuncs>;\n\nNGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(shader_stage);\nNGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(graphics_pipeline);\nNGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(compute_pipeline);\nNGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(image);\nNGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(image_view);\nNGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(sampler);\nNGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(render_target);\nNGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(buffer);\nNGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(texel_buffer_view);\nNGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(context);\nNGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(cmd_buffer);\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A RAII wrapper for \\ref ngf_shader_stage.\n */\nNGF_DEFINE_WRAPPER_TYPE(shader_stage);\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A RAII wrapper for \\ref ngf_graphics_pipeline.\n */\nNGF_DEFINE_WRAPPER_TYPE(graphics_pipeline);\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A RAII wrapper for \\ref ngf_compute_pipeline.\n */\nNGF_DEFINE_WRAPPER_TYPE(compute_pipeline);\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A RAII wrapper for \\ref unowned_image.\n */\nNGF_DEFINE_WRAPPER_TYPE(image);\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A RAII wrapper for \\ref unowned_image_view.\n */\nNGF_DEFINE_WRAPPER_TYPE(image_view);\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A RAII wrapper for \\ref unowned_sampler.\n */\nNGF_DEFINE_WRAPPER_TYPE(sampler);\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A RAII wrapper for \\ref ngf_render_target.\n */\nNGF_DEFINE_WRAPPER_TYPE(render_target);\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A RAII wrapper for \\ref unowned_buffer.\n */\nNGF_DEFINE_WRAPPER_TYPE(buffer);\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A RAII wrapper for \\ref unowned_texel_buffer_view.\n */\nNGF_DEFINE_WRAPPER_TYPE(texel_buffer_view);\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A RAII wrapper for \\ref ngf_context.\n */\nNGF_DEFINE_WRAPPER_TYPE(context);\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A RAII wrapper for \\ref ngf_cmd_buffer.\n */\nNGF_DEFINE_WRAPPER_TYPE(cmd_buffer);\n\n/**\n * \\ingroup ngf_wrappers\n *\n * Wraps a render encoder with unique ownership semantics.\n */\nclass render_encoder {\n  public:\n  /**\n   * Creates a new render encoder for the given command buffer. Has the same semantics as \\ref\n   * ngf_cmd_begin_render_pass.\n   *\n   * @param cmd_buf The command buffer to create a new render encoder for.\n   * @param pass_info Render pass description.\n   */\n  explicit render_encoder(ngf_cmd_buffer cmd_buf, const ngf_render_pass_info& pass_info) {\n    ngf_cmd_begin_render_pass(cmd_buf, &pass_info, &enc_);\n  }\n\n  /**\n   * Creates a new render encoder for the given command buffer. Has the same semantics as \\ref\n   * ngf_cmd_begin_render_pass_simple.\n   *\n   * @param cmd_buf The command buffer to create a new render encoder for.\n   * @param rt The render target to render into.\n   * @param clear_color_r A floating point number between 0.0 and 1.0 specifying the red component\n   * of the clear color.\n   * @param clear_color_g A floating point number between 0.0 and 1.0 specifying the green component\n   * of the clear color.\n   * @param clear_color_b A floating point number between 0.0 and 1.0 specifying the blue component\n   * of the clear color.\n   * @param clear_color_a A floating point number between 0.0 and 1.0 specifying the alpha component\n   * of the clear color.\n   * @param clear_depth A floating point value to clear the depth attachment to (if the associated\n   * render target has one).\n   * @param clear_stencil An integer value to clear the stencil buffer to (if the assocuated render\n   * taget has one).\n   */\n  explicit render_encoder(\n      unowned_cmd_buffer    cmd_buf,\n      unowned_render_target rt,\n      float                 clear_color_r,\n      float                 clear_color_g,\n      float                 clear_color_b,\n      float                 clear_color_a,\n      float                 clear_depth,\n      uint32_t              clear_stencil) {\n    ngf_cmd_begin_render_pass_simple(\n        cmd_buf,\n        rt,\n        clear_color_r,\n        clear_color_g,\n        clear_color_b,\n        clear_color_a,\n        clear_depth,\n        clear_stencil,\n        &enc_);\n  }\n\n  /**\n   * Finishes the wrapped render pass.\n   */\n  ~render_encoder() {\n    if (enc_.pvt_data_donotuse.d0) ngf_cmd_end_render_pass(enc_);\n  }\n\n  render_encoder(render_encoder&& other) noexcept {\n    *this = detail::move(other);\n  }\n\n  render_encoder& operator=(render_encoder&& other) noexcept {\n    enc_                            = other.enc_;\n    other.enc_.pvt_data_donotuse.d0 = 0u;\n    other.enc_.pvt_data_donotuse.d1 = 0u;\n    return *this;\n  }\n\n  render_encoder(const render_encoder&)            = delete;\n  render_encoder& operator=(const render_encoder&) = delete;\n\n  /**\n   * Implicit conversion to \\ref unowned_render_encoder.\n   */\n  operator unowned_render_encoder() {\n    return enc_;\n  }\n\n  private:\n  unowned_render_encoder enc_ {};\n};\n\n/**\n * \\ingroup ngf_wrappers\n *\n * Wraps a transfer encoder with unique ownership semantics.\n */\nclass xfer_encoder {\n  public:\n  /**\n   * Creates a new transfer encoder for the given command buffer.\n   *\n   * @param cmd_buf The command buffer to create the transfer encoder for.\n   */\n  explicit xfer_encoder(unowned_cmd_buffer cmd_buf, const xfer_pass_info& pass_info) {\n    ngf_cmd_begin_xfer_pass(cmd_buf, &pass_info, &enc_);\n  }\n\n  /**\n   * Ends the wrapped transfer pass.\n   */\n  ~xfer_encoder() {\n    if (enc_.pvt_data_donotuse.d0) ngf_cmd_end_xfer_pass(enc_);\n  }\n\n  xfer_encoder(xfer_encoder&& other) noexcept {\n    *this = detail::move(other);\n  }\n\n  xfer_encoder& operator=(xfer_encoder&& other) noexcept {\n    enc_                            = other.enc_;\n    other.enc_.pvt_data_donotuse.d0 = 0u;\n    other.enc_.pvt_data_donotuse.d1 = 0u;\n    return *this;\n  }\n\n  xfer_encoder(const xfer_encoder&)            = delete;\n  xfer_encoder& operator=(const xfer_encoder&) = delete;\n\n  /**\n   * Implicit conversion to \\ref ngf_xfer_encoder.\n   */\n  operator unowned_xfer_encoder() {\n    return enc_;\n  }\n\n  private:\n  unowned_xfer_encoder enc_;\n};\n\n/**\n * \\ingroup ngf_wrappers\n *\n * Wraps a compute encoder with unique ownership semantics.\n */\nclass compute_encoder {\n  public:\n  /**\n   * Creates a new compute encoder for the given command buffer. Has the same semantics as \\ref\n   * ngf_cmd_begin_compute_pass.\n   *\n   * @param cmd_buf The command buffer to create a new compute encoder for.\n   */\n  explicit compute_encoder(ngf_cmd_buffer cmd_buf, const ngf_compute_pass_info& pass_info) {\n    ngf_cmd_begin_compute_pass(cmd_buf, &pass_info, &enc_);\n  }\n\n  /**\n   * Creates a new compute encoder for the given command buffer that doesn't execute any\n   * synchronization\n   *\n   * @param cmd_buf The command buffer to create a new compute encoder for.\n   */\n  explicit compute_encoder(ngf_cmd_buffer cmd_buf) {\n    ngf_cmd_begin_compute_pass(cmd_buf, nullptr, &enc_);\n  }\n\n  /**\n   * Finishes the wrapped compute pass.\n   */\n  ~compute_encoder() {\n    if (enc_.pvt_data_donotuse.d0) ngf_cmd_end_compute_pass(enc_);\n  }\n\n  compute_encoder(compute_encoder&& other) noexcept {\n    *this = detail::move(other);\n  }\n\n  compute_encoder& operator=(compute_encoder&& other) noexcept {\n    enc_                            = other.enc_;\n    other.enc_.pvt_data_donotuse.d0 = 0u;\n    other.enc_.pvt_data_donotuse.d1 = 0u;\n    return *this;\n  }\n\n  compute_encoder(const compute_encoder&)            = delete;\n  compute_encoder& operator=(const compute_encoder&) = delete;\n\n  /**\n   * Implicit conversion to \\ref unowned_compute_encoder.\n   */\n  operator unowned_compute_encoder() {\n    return enc_;\n  }\n\n  private:\n  unowned_compute_encoder enc_ {};\n};\n\n/**\n * \\ingroup ngf_wrappers\n *\n * Convenience wrapper for binding resources. See \\ref cmd_bind_resources for details.\n */\ntemplate<uint32_t S> struct descriptor_set {\n  /**\n   * Convenience wrapper for binding resources. See \\ref cmd_bind_resources for details.\n   */\n  template<uint32_t B> struct binding {\n    /**\n     * Creates a \\ref resource_bind_op for a \\ref unowned_image.\n     *\n     * @param image The image to bind.\n     * @param array_index If the descriptor is an array, specifies the index of the array element to\n     * bind the object to.\n     */\n    static resource_bind_op texture(const unowned_image image, uint32_t array_index = 0u) {\n      resource_bind_op op;\n      op.type                              = NGF_DESCRIPTOR_IMAGE;\n      op.target_binding                    = B;\n      op.target_set                        = S;\n      op.info.image_sampler.is_image_view  = false;\n      op.info.image_sampler.resource.image = image;\n      op.array_index                       = array_index;\n      return op;\n    }\n\n    /**\n     * Creates a \\ref resource_bind_op for an \\ref unowned_image that is to be used as a storage\n     * image\n     *\n     * @param image The image to bind.\n     */\n    static resource_bind_op storage_image(const unowned_image image, uint32_t array_index = 0u) {\n      resource_bind_op op;\n      op.type                              = NGF_DESCRIPTOR_STORAGE_IMAGE;\n      op.target_binding                    = B;\n      op.target_set                        = S;\n      op.info.image_sampler.is_image_view  = false;\n      op.info.image_sampler.resource.image = image;\n      op.array_index                       = array_index;\n      return op;\n    }\n\n    /**\n     * Creates a \\ref resource_bind_op for a \\ref unowned_image_view.\n     *\n     * @param view The view to bind.\n     * @param array_index If the descriptor is an array, specifies the index of the array element to\n     * bind the object to.\n     */\n    static resource_bind_op texture(const unowned_image_view view, uint32_t array_index = 0u) {\n      resource_bind_op op;\n      op.type                             = NGF_DESCRIPTOR_IMAGE;\n      op.target_binding                   = B;\n      op.target_set                       = S;\n      op.info.image_sampler.is_image_view = true;\n      op.info.image_sampler.resource.view = view;\n      op.array_index                      = array_index;\n      return op;\n    }\n\n    /**\n     * Creates a \\ref resource_bind_op for an \\ref unowned_image_view that is to be used as a\n     * storage image\n     *\n     * @param image The image to bind.\n     */\n    static resource_bind_op\n    storage_image(const unowned_image_view view, uint32_t array_index = 0u) {\n      resource_bind_op op;\n      op.type                             = NGF_DESCRIPTOR_STORAGE_IMAGE;\n      op.target_binding                   = B;\n      op.target_set                       = S;\n      op.info.image_sampler.is_image_view = true;\n      op.info.image_sampler.resource.view = view;\n      op.array_index                      = array_index;\n      return op;\n    }\n\n    /**\n     * Creates a \\ref resource_bind_op for an storage buffer.\n     *\n     * @param buf The buffer to bind as a storage buffer.\n     * @param offset The offset at which to bind the buffer.\n     * @param range The extent of the bound memory.\n     */\n    static resource_bind_op storage_buffer(\n        const unowned_buffer buf,\n        size_t               offset,\n        size_t               range,\n        uint32_t             array_index = 0u) {\n      resource_bind_op op;\n      op.type               = NGF_DESCRIPTOR_STORAGE_BUFFER;\n      op.target_binding     = B;\n      op.target_set         = S;\n      op.info.buffer.buffer = buf;\n      op.info.buffer.offset = offset;\n      op.info.buffer.range  = range;\n      op.array_index        = array_index;\n      return op;\n    }\n\n    /**\n     * Creates a \\ref resource_bind_op for an uniform buffer.\n     *\n     * @param buf The buffer to bind as a uniform buffer.\n     * @param offset The offset at which to bind the buffer.\n     * @param range The extent of the bound memory.\n     */\n    static resource_bind_op uniform_buffer(\n        const unowned_buffer buf,\n        size_t               offset,\n        size_t               range,\n        uint32_t             array_index = 0u) {\n      resource_bind_op op;\n      op.type               = NGF_DESCRIPTOR_UNIFORM_BUFFER;\n      op.target_binding     = B;\n      op.target_set         = S;\n      op.info.buffer.buffer = buf;\n      op.info.buffer.offset = offset;\n      op.info.buffer.range  = range;\n      op.array_index        = array_index;\n      return op;\n    }\n\n    /**\n     * Creates a \\ref resource_bind_op for a texel buffer.\n     *\n     * @param buf The buffer to bind as a texel buffer.\n     * @param offset The offset at which to bind the buffer.\n     * @param range The extent of the bound memory.\n     * @param fmt The texel format expected by the shader.\n     */\n    static resource_bind_op\n    texel_buffer(const unowned_texel_buffer_view buf_view, uint32_t array_index = 0u) {\n      resource_bind_op op;\n      op.type                   = NGF_DESCRIPTOR_TEXEL_BUFFER;\n      op.target_binding         = B;\n      op.target_set             = S;\n      op.info.texel_buffer_view = buf_view;\n      op.array_index            = array_index;\n      return op;\n    }\n\n    /**\n     * Creates a \\ref resource_bind_op for a sampler.\n     *\n     * @param sampler The sampler to use.\n     */\n    static resource_bind_op sampler(const unowned_sampler sampler, uint32_t array_index = 0u) {\n      resource_bind_op op;\n      op.type                       = NGF_DESCRIPTOR_SAMPLER;\n      op.target_binding             = B;\n      op.target_set                 = S;\n      op.info.image_sampler.sampler = sampler;\n      op.array_index                = array_index;\n      return op;\n    }\n\n    /**\n     * Creates a \\ref resource_bind_op for a combined image + sampler.\n     *\n     * @param image The image part of the combined image + sampler.\n     * @param sampler The sampler part of the combined image + sampler.\n     */\n    static resource_bind_op texture_and_sampler(\n        const unowned_image   image,\n        const unowned_sampler sampler,\n        uint32_t              array_index = 0u) {\n      resource_bind_op op;\n      op.type                              = NGF_DESCRIPTOR_IMAGE_AND_SAMPLER;\n      op.target_binding                    = B;\n      op.target_set                        = S;\n      op.info.image_sampler.is_image_view  = false;\n      op.info.image_sampler.resource.image = image;\n      op.info.image_sampler.sampler        = sampler;\n      op.array_index                       = array_index;\n      return op;\n    }\n  };\n};\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A convenience function for binding many resources at once to the shader. Example usage:\n *\n * ```\n * ngf::cmd_bind_resources(your_render_encoder,\n *                         ngf::descriptor_set<0>::binding<0>::image(your_image),\n *                         ngf::descriptor_set<0>::binding<1>::sampler(your_sampler),\n *                         ngf::descriptor_set<1>::binding<0>::uniform_buffer(your_buffer));\n * ```\n */\ntemplate<class... Args> void cmd_bind_resources(unowned_render_encoder enc, const Args&&... args) {\n  const resource_bind_op ops[] = {detail::fwd<const Args>(args)...};\n  ngf_cmd_bind_resources(enc, ops, sizeof(ops) / sizeof(resource_bind_op));\n}\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A convenience function for binding many resources at once to the shader. Example usage:\n *\n * ```\n * ngf::cmd_bind_resources(your_compute_encoder,\n *                         ngf::descriptor_set<0>::binding<0>::image(your_image),\n *                         ngf::descriptor_set<0>::binding<1>::sampler(your_sampler),\n *                         ngf::descriptor_set<1>::binding<0>::uniform_buffer(your_buffer));\n * ```\n *\n */\ntemplate<class... Args> void cmd_bind_resources(unowned_compute_encoder enc, const Args&&... args) {\n  const resource_bind_op ops[] = {detail::fwd<const Args>(args)...};\n  ngf_cmd_bind_compute_resources(enc, ops, sizeof(ops) / sizeof(resource_bind_op));\n}\n\n/**\n * \\ingroup ngf_wrappers\n *\n * A convenience class for dynamically updated structured uniform data.\n */\ntemplate<typename T> class uniform_multibuffer {\n  public:\n  uniform_multibuffer() = default;\n  uniform_multibuffer(uniform_multibuffer&& other) {\n    *this = detail::move(other);\n  }\n  uniform_multibuffer(const uniform_multibuffer&) = delete;\n\n  uniform_multibuffer& operator=(uniform_multibuffer&& other) = default;\n  uniform_multibuffer& operator=(const uniform_multibuffer&)  = delete;\n\n  ngf_error initialize(const uint32_t frames) {\n    const size_t alignment    = ngf_get_device_capabilities()->uniform_buffer_offset_alignment;\n    const size_t aligned_size = ngf_util_align_size(sizeof(T), alignment);\n    NGF_RETURN_IF_ERROR(buf_.initialize(buffer_info {\n        aligned_size * frames,\n        NGF_BUFFER_STORAGE_HOST_WRITEABLE,\n        NGF_BUFFER_USAGE_UNIFORM_BUFFER}));\n    nframes_                = frames;\n    aligned_per_frame_size_ = aligned_size;\n    return NGF_ERROR_OK;\n  }\n\n  void write(const T& data) {\n    current_offset_  = (frame_)*aligned_per_frame_size_;\n    void* mapped_buf = ngf_buffer_map_range(buf_.get(), current_offset_, aligned_per_frame_size_);\n    memcpy(mapped_buf, (void*)&data, sizeof(T));\n    ngf_buffer_flush_range(buf_.get(), 0, aligned_per_frame_size_);\n    ngf_buffer_unmap(buf_.get());\n    frame_ = (frame_ + 1u) % nframes_;\n  }\n\n  resource_bind_op bind_op_at_current_offset(\n      uint32_t set,\n      uint32_t binding,\n      size_t   additional_offset = 0,\n      size_t   range             = 0) const {\n    resource_bind_op op {};\n    op.type               = NGF_DESCRIPTOR_UNIFORM_BUFFER;\n    op.target_binding     = binding;\n    op.target_set         = set;\n    op.info.buffer.buffer = buf_.get();\n    op.info.buffer.offset = current_offset_ + additional_offset;\n    op.info.buffer.range  = (range == 0) ? aligned_per_frame_size_ : range;\n    return op;\n  }\n\n  private:\n  buffer   buf_;\n  uint32_t frame_                  = 0;\n  size_t   current_offset_         = 0;\n  size_t   aligned_per_frame_size_ = 0;\n  uint32_t nframes_                = 0;\n};\n\n}  // namespace ngf\n"
  },
  {
    "path": "include/nicegraf.h",
    "content": "/**\n * Copyright (c) 2025 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/**\n * @file\n * @brief nicegraf declarations.\n *\n * This file contains the core nicegraf API declarations.\n */\n\n/**\n * \\mainpage Reference Documentation\n *\n * These pages contain documentation automatically generated from nicegraf's\n * source code comments. The text's purpose is to concisely describe the intended\n * behavior and failure modes of the API.\n *\n * If viewing this document in a web browser or a PDF viewer, click one of the\n * following links to proceed to the documentation for the corresponding module.\n *\n *  - \\ref ngf\n *  - \\ref ngf_util\n *  - \\ref ngf_wrappers\n */\n\n/**\n * \\defgroup ngf Core C API\n * This section contains documentation for the core nicegraf routines,\n * structures and enumerations.\n *\n * \\subsection core-remarks General Remarks\n *\n * - The library is currently not intended to be linked dynamically.\n *\n * - When nicegraf's C headers are included from C++, all global functions\n *   within them are automatically declared to have C linkage. Additionally,\n *   they are declared to be noexcept.\n *\n * \\subsection object-model Objects\n *\n * nicegraf objects, such as images, buffers, render targets, etc., are\n * represented using opaque handles. The objects are constructed and destroyed\n * explicitly by the application, and it is the responsibility of the\n * application to ensure that the order of destruction is correct.\n * For applications written in C++, a set of wrappers that automate object\n * lifetime management is available. See \\ref ngf_wrappers for details.\n *\n * \\subsection error-reporting Error Reporting\n *\n * Most nicegraf routines report their completion status by returning an\n * \\ref ngf_error, and write their results to out-parameters. The returned value\n * is a generic error code. Detailed, human-readable information about errors\n * may vary from platform to platform; nicegraf reports it by invoking a\n * user-provided callback function (see \\ref ngf_diagnostic_info). The callback\n * function must accept the diagnostic message type (see\n * \\ref ngf_diagnostic_message_type), an arbitrary void pointer (the value of\n * which the user may specify when providing the callback), a printf-style\n * format string, and an arbitrary number of arguments specifying the data for\n * the format-string.\n *\n * \\subsection host-memory-management Host Memory Management\n *\n * By default, nicegraf uses the standard malloc/free to manage host memory for\n * internal purposes. The client may override this behavior by supplying custom\n * memory allocation callbacks (see \\ref ngf_allocation_callbacks).\n *\n * \\subsection gpu-memory-management GPU Memory Management\n *\n * nicegraf internally manages GPU memory for all backends. It is currently not\n * possible for clients to override this behavior and do their own GPU memory\n * management.\n *\n */\n\n#pragma once\n\n#include <stddef.h>\n#include <stdint.h>\n\n#ifdef __cplusplus\nextern \"C\" {\n#define NGF_NOEXCEPT noexcept\n#else\n#include <stdbool.h>\n#define NGF_NOEXCEPT\n#endif\n\n#define NGF_VER_MAJ 0\n#define NGF_VER_MIN 0\n\n#ifdef _MSC_VER\n#pragma region ngf_type_declarations\n#endif\n\n/**\n * @enum ngf_diagnostic_log_verbosity\n * \\ingroup ngf\n * Verbosity levels for the diagnostic message log.\n */\ntypedef enum ngf_diagnostic_log_verbosity {\n  /**\n   * \\ingroup ngf\n   * Normal level, reports only severe errors. */\n  NGF_DIAGNOSTICS_VERBOSITY_DEFAULT,\n\n  /**\n   * \\ingroup ngf\n   * Recommended for debug builds, may induce performance overhead. */\n  NGF_DIAGNOSTICS_VERBOSITY_DETAILED\n} ngf_diagnostic_log_verbosity;\n\n/**\n * @enum ngf_diagnostic_message_type\n * \\ingroup ngf\n * Type of a diagnostic log entry.\n */\ntypedef enum ngf_diagnostic_message_type {\n  /**\n   * \\ingroup ngf\n   * Informational message, not actionable. */\n  NGF_DIAGNOSTIC_INFO,\n\n  /**\n   * \\ingroup ngf\n   * Message warns of a potential issue with an API call.*/\n  NGF_DIAGNOSTIC_WARNING,\n\n  /**\n   * \\ingroup ngf\n   * Message provides details of an API call failure or a severe performance issue. */\n  NGF_DIAGNOSTIC_ERROR\n} ngf_diagnostic_message_type;\n\n/**\n * @struct ngf_renderdoc_info\n *\n * Information for initializing the RenderDoc API.\n */\ntypedef struct ngf_renderdoc_info {\n  /**\n   * Relaitve (to process) or absolute path to RenderDoc library. If this string is NULL,\n   * RenderDoc will not be initialized.\n   */\n  const char* renderdoc_lib_path;\n\n  /**\n   * Template for how RenderDoc captures are saved. If template is \"example/capture\", captures will\n   * be saved as \"example/capture_1234.rdc\".\n   */\n  const char* renderdoc_destination_template;\n} ngf_renderdoc_info;\n\n/**\n * The diagnostic callback function type.\n */\ntypedef void (*ngf_diagnostic_callback)(ngf_diagnostic_message_type, void*, const char*, ...);\n\n/**\n * @struct ngf_diagnostic_info\n * \\ingroup ngf\n * Diagnostic configuration.\n */\ntypedef struct ngf_diagnostic_info {\n  ngf_diagnostic_log_verbosity verbosity; /**< Diagnostic log verbosity. */\n  void*                        userdata;  /**< Arbitrary pointer that will\n                                               be passed as-is to the\n                                               callback. */\n  ngf_diagnostic_callback callback;       /**< Pointer to the diagnostic\n                                               message callback function.*/\n  bool enable_debug_groups; /**< Indicates whether to enable debug group functionality.\n                                 See \\ref ngf_cmd_begin_debug_group for details.*/\n} ngf_diagnostic_info;\n\n/**\n * @struct ngf_allocation_callbacks\n * \\ingroup ngf\n * Specifies host memory allocation callbacks for the library's internal needs.\n */\ntypedef struct ngf_allocation_callbacks {\n  /**\n   * This callback shall allocate a region of memory that is able to fit `nobjs` objects\n   * of size `obj_size`, and return a pointer to the allocated region.\n   * The starting address of the allocated region shall have the largest alignment for the\n   * target platform.\n   */\n  void* (*allocate)(size_t obj_size, size_t nobjs, void* userdata);\n\n  /**\n   * This callback shall free a region allocated by the custom allocator. The count\n   * and size of objects in the region are supplied as additional parameters.\n   */\n  void (*free)(void* ptr, size_t obj_size, size_t nobjs, void* userdata);\n\n  /**\n   * An arbitrary pointer that will be passed as-is to the allocate and free callbacks.\n   */\n  void* userdata;\n} ngf_allocation_callbacks;\n\n/**\n * @typedef ngf_device_handle\n * \\ingroup ngf\n *\n * A handle that uniquely identifies a rendering device.\n *\n * Note that the value of the handle corresponding to the same exact physical device may be\n * different across different instances of the same client. In other words, if the client\n * application shuts down, then starts up again, it may get different values for device handles than\n * it did before. Therefore, device handles should not be persisted. \\ingroup ngf\n */\ntypedef uint32_t ngf_device_handle;\n\n/**\n * @enum ngf_device_performance_tier\n * Enumerates different types of rendering devices.\n * \\ingroup ngf\n */\ntypedef enum ngf_device_performance_tier {\n  /** \\ingroup ngf\n   * For high-performance devices, such as discrete GPU. */\n  NGF_DEVICE_PERFORMANCE_TIER_HIGH = 0,\n\n  /** \\ingroup ngf\n   * For low-power integrated GPUs, software rendering, etc.  */\n  NGF_DEVICE_PERFORMANCE_TIER_LOW,\n\n  /** \\ingroup ngf\n   * The specific performance profile is unknown. */\n  NGF_DEVICE_PERFORMANCE_TIER_UNKNOWN,\n\n  NGF_DEVICE_PERFORMANCE_TIER_COUNT\n} ngf_device_performance_tier;\n\n/**\n * @struct ngf_init_info\n * nicegraf initialization parameters.\n * See also: \\ref ngf_initialize.\n */\ntypedef struct ngf_init_info {\n  /**\n   * Pointer to a structure containing a diagnostic log configuration.\n   * If this pointer is set to `NULL`, no diagnostic callback shall be invoked.\n   */\n  const ngf_diagnostic_info* diag_info;\n\n  /**\n   * Pointer to a structure specifying custom allocation callbacks, which the library\n   * shall use to manage CPU memory for internal use.\n   * If this pointer is set to `NULL`, standard malloc and free are used.\n   */\n  const ngf_allocation_callbacks* allocation_callbacks;\n\n  /**\n   * Handle for the rendering device that nicegraf shall execute rendering commands on.\n   * A list of available device and their handles can be obtained with \\ref ngf_enumerate_devices.\n   */\n  ngf_device_handle device;\n\n  /**\n   * Pointer to a structure containing RenderDoc API configuration.\n   * If this pointer is set to `NULL`, the RenderDoc API will not be initialized.\n   */\n  const ngf_renderdoc_info* renderdoc_info;\n\n} ngf_init_info;\n\n/**\n * @enum ngf_error\n * \\ingroup ngf\n * Enumerates the error codes that nicegraf routines may return.\n * See also \\ref error-reporting.\n */\ntypedef enum ngf_error {\n  /** \\ingroup ngf\n   * No error, operation finished successfully. */\n  NGF_ERROR_OK = 0,\n\n  /** \\ingroup ngf\n   * Host memory allocation failed. */\n  NGF_ERROR_OUT_OF_MEM,\n\n  /** \\ingroup ngf\n   * A call to the backend API that was\n   * supposed to create an object failed.*/\n  NGF_ERROR_OBJECT_CREATION_FAILED,\n\n  /** \\ingroup ngf\n   * The operation would have resulted in an out of\n   * bounds access. */\n  NGF_ERROR_OUT_OF_BOUNDS,\n\n  /** \\ingroup ngf\n   * A format enumerator provided as part of an argument to the call is not valid in that context.\n   */\n  NGF_ERROR_INVALID_FORMAT,\n\n  /** \\ingroup ngf\n   * A size passed as part of an argument to the call is either too large or too small.*/\n  NGF_ERROR_INVALID_SIZE,\n\n  /** \\ingroup ngf\n   * An enumerator passed as part of an argument to the call is not valid in that context.*/\n  NGF_ERROR_INVALID_ENUM,\n\n  /**\n   * \\ingroup ngf\n   */\n  NGF_ERROR_INVALID_OPERATION,\n \n  /** \\ingroup ngf\n   * The routine did not complete successfully. */\n  NGF_ERROR_OPERATION_FAILED,\n  /*..add new errors above this line */\n} ngf_error;\n\n/**\n * @struct ngf_irect2d\n * \\ingroup ngf\n * Represents a rectangular, axis-aligned 2D region with integer coordinates.\n */\ntypedef struct ngf_irect2d {\n  int32_t  x;      /**< X coord of lower-left corner. */\n  int32_t  y;      /**< Y coord of lower-left corner. */\n  uint32_t width;  /**< The size of the rectangle along the x-axis. */\n  uint32_t height; /**< The size of the rectangle along the y-axis. */\n} ngf_irect2d;\n\n/**\n * @struct ngf_extent3d\n * \\ingroup ngf\n * Represents a rectangular, axis-aligned 3D volume.\n */\ntypedef struct ngf_extent3d {\n  uint32_t width;  /**< The size of the volume along the x-axis. */\n  uint32_t height; /**< The size of the volume along the y-axis. */\n  uint32_t depth;  /**< The size of the volume along he z-axis. */\n} ngf_extent3d;\n\n/**\n * @struct ngf_offset3d\n * \\ingroup ngf\n * Three-dimensional offset.\n */\ntypedef struct ngf_offset3d {\n  int32_t x; /**< Offset along the x-axis. */\n  int32_t y; /**< Offset along the y-axis. */\n  int32_t z; /**< Offset along the z-axis. */\n} ngf_offset3d;\n\n/**\n * @enum ngf_stage_type\n * \\ingroup ngf\n * Shader stage types.\n * Note that some back-ends might not support all of these.\n */\ntypedef enum ngf_stage_type {\n  /** \\ingroup ngf\n   * Indicates the vertex processing stage. */\n  NGF_STAGE_VERTEX = 0,\n\n  /** \\ingroup ngf\n   * Indicates the fragment processing stage. */\n  NGF_STAGE_FRAGMENT,\n\n  /** \\ingroup ngf\n   * Indicates the compute stage.\n   */\n  NGF_STAGE_COMPUTE,\n\n  NGF_STAGE_COUNT\n} ngf_stage_type;\n\n/**\n * @struct ngf_shader_stage_info\n * \\ingroup ngf\n *\n * Describes a programmable shader stage.\n */\ntypedef struct ngf_shader_stage_info {\n  ngf_stage_type type; /**< Stage type (vert/frag/etc.) */\n\n  /**\n   * This shall be a pointer to a memory buffer containing the code for\n   * the shader stage.\n   *\n   * The specific contents of the buffer depend on which backend nicegraf\n   * is being used with:\n   *  - for the Vulkan backend, nicegraf expects the SPIR-V bytecode for the shader stage.\n   *  - for the Metal backend, nicegraf expects the source code for the shader stage in the Metal\n   * Shading Language.\n   *\n   * Additionally, the Metal backend expects the code to contain a special comment, mapping all\n   * <descriptor set, binding> pairs to the native Metal argument table slots. The comment shall\n   * be a C-style block comment - beginning with a forward slash, followed by an asterisk -\n   * containing the following word:\n   *\n   * ```\n   * NGF_NATIVE_BINDING_MAP\n   * ```\n   *\n   * followed by a newline character.\n   *\n   * Each of the following lines until the end of the comment shall have the following format:\n   *\n   * ```\n   * (s b) : m\n   * ```\n   *\n   * where `s` is the set number, `b` is the binding number within the set, and `m` is the index\n   * of the corresponding resource in Metal's argument table.\n   *\n   * For example, let's say the Metal shader refers to index 3 in the texture argument table.\n   * Adding the following line to the binding map comment\n   *\n   * ```\n   * (0 1) : 3\n   * ```\n   *\n   * would tell the nicegraf metal backend to use the third slot of the texture argument table when\n   * an image is bound to set 0, binding 1 using \\ref ngf_cmd_bind_resources.\n   *\n   * When compiling HLSL shaders using nicegraf-shaderc, the comment with the binding map is\n   * generated automatically.\n   */\n  const void* content;\n\n  /** The number of bytes in the \\ref ngf_shader_stage_info::content buffer. */\n  uint32_t    content_length;\n  const char* debug_name;       /**< Optional name, will appear in debug logs, may be NULL.*/\n  const char* entry_point_name; /**< Entry point name for this shader stage. */\n} ngf_shader_stage_info;\n\n/**\n * @struct ngf_shader_stage\n * \\ingroup ngf\n *\n * An opaque handle to a programmable stage of the rendering pipeline.\n *\n * Programmable stages are specified using backend-specific blobs of\n * data, as described in the documentation for \\ref ngf_shader_stage_info::content.\n *\n * On platforms that require a compilation step at runtime, details about\n * compile errors are reported via the debug callback mechanism.\n *\n * Shader stage objects are necessary for creating \\ref ngf_graphics_pipeline objects, but once\n * the pipelines have been created, the shader stages that had been used to create\n * them can safely be disposed of.\n *\n * See also: \\ref ngf_shader_stage_info, \\ref ngf_create_shader_stage, \\ref\n * ngf_destroy_shader_stage.\n */\ntypedef struct ngf_shader_stage_t* ngf_shader_stage;\n\n/**\n * @enum ngf_polygon_mode\n * \\ingroup ngf\n *\n * Enumerates ways to draw polygons.\n * See also \\ref ngf_rasterization_info.\n */\ntypedef enum ngf_polygon_mode {\n  /** \\ingroup ngf\n   * Fill the entire polyoon.*/\n  NGF_POLYGON_MODE_FILL = 0,\n\n  /** \\ingroup ngf\n   * Outline only.*/\n  NGF_POLYGON_MODE_LINE,\n\n  /** \\ingroup ngf\n   * Vertices only.*/\n  NGF_POLYGON_MODE_POINT,\n  NGF_POLYGON_MODE_COUNT\n} ngf_polygon_mode;\n\n/**\n * @enum ngf_cull_mode\n * \\ingroup ngf\n *\n * Enumerates polygon culling strategies.\n * See also \\ref ngf_rasterization_info.\n */\ntypedef enum ngf_cull_mode {\n  /** \\ingroup ngf\n   * Cull back-facing polygons. */\n  NGF_CULL_MODE_BACK = 0,\n\n  /** \\ingroup ngf\n   * Cull front-facing polygons. */\n  NGF_CULL_MODE_FRONT,\n\n  /** \\ingroup ngf\n   * Cull all polygons. */\n  NGF_CULL_MODE_FRONT_AND_BACK,\n\n  /** \\ingroup ngf\n   * Do not cull anything. */\n  NGF_CULL_MODE_NONE,\n  NGF_CULL_MODE_COUNT\n} ngf_cull_mode;\n\n/**\n * @enum ngf_front_face_mode\n * \\ingroup ngf\n * Enumerates possible vertex winding orders, which are used to decide which\n * polygons are front- or back-facing.\n * See also \\ref ngf_rasterization_info.\n */\ntypedef enum ngf_front_face_mode {\n  /** \\ingroup ngf\n   * Polygons with vertices in counter-clockwise order are considered front-facing. */\n  NGF_FRONT_FACE_COUNTER_CLOCKWISE = 0,\n\n  /** \\ingroup ngf\n   * Polygons with vertices in clockwise order are considered front-facing. */\n  NGF_FRONT_FACE_CLOCKWISE,\n\n  NGF_FRONT_FACE_COUNT\n} ngf_front_face_mode;\n\n/**\n * @struct ngf_rasterization_info\n * \\ingroup ngf\n * Rasterization stage parameters.\n */\ntypedef struct ngf_rasterization_info {\n  bool discard; /**< Enable/disable rasterizer discard. Use this in pipelines that\n                     don't write any fragment data.*/\n  ngf_polygon_mode    polygon_mode;      /**< How to draw polygons.*/\n  ngf_cull_mode       cull_mode;         /**< Which polygons to cull.*/\n  ngf_front_face_mode front_face;        /**< Which winding counts as front-facing.*/\n  bool                enable_depth_bias; /**< Controls whether to enable depth bias. See also: \\ref\n                                            ngf_cmd_set_depth_bias */\n} ngf_rasterization_info;\n\n/**\n * @enum ngf_compare_op\n * \\ingroup ngf\n * Compare operations used in depth and stencil tests.\n */\ntypedef enum ngf_compare_op {\n  /** \\ingroup ngf\n   * Comparison test never succeeds. */\n  NGF_COMPARE_OP_NEVER = 0,\n\n  /** \\ingroup ngf\n   * Comparison test succeeds if A < B. */\n  NGF_COMPARE_OP_LESS,\n\n  /** \\ingroup ngf\n   * Comparison test succeeds if A <= B. */\n  NGF_COMPARE_OP_LEQUAL,\n\n  /** \\ingroup ngf\n   * Comparison test succeeds if A == B. */\n  NGF_COMPARE_OP_EQUAL,\n\n  /** \\ingroup ngf\n   * Comparison test succeeds if A >= B. */\n  NGF_COMPARE_OP_GEQUAL,\n\n  /** \\ingroup ngf\n   * Comparison test succeeds if A > B. */\n  NGF_COMPARE_OP_GREATER,\n\n  /** \\ingroup ngf\n   * Comparison test succeeds if A != B. */\n  NGF_COMPARE_OP_NEQUAL,\n\n  /** \\ingroup ngf\n   * Comparison test always succeeds. */\n  NGF_COMPARE_OP_ALWAYS,\n\n  NGF_COMPARE_OP_COUNT\n} ngf_compare_op;\n\n/**\n * @enum ngf_stencil_op\n * \\ingroup ngf\n * Operations that can be performed on stencil buffer.\n */\ntypedef enum ngf_stencil_op {\n  /** \\ingroup ngf\n   * Don't touch. */\n  NGF_STENCIL_OP_KEEP = 0,\n\n  /** \\ingroup ngf\n   * Set to 0. */\n  NGF_STENCIL_OP_ZERO,\n\n  /** \\ingroup ngf\n   * Replace with reference value. */\n  NGF_STENCIL_OP_REPLACE,\n\n  /** \\ingroup ngf\n   * Increment, clamping to max value. */\n  NGF_STENCIL_OP_INCR_CLAMP,\n\n  /** \\ingroup ngf\n   * Increment, wrapping to 0. */\n  NGF_STENCIL_OP_INCR_WRAP,\n\n  /** \\ingroup ngf\n   * Decrement, clamping to 0. */\n  NGF_STENCIL_OP_DECR_CLAMP,\n\n  /** \\ingroup ngf\n   * Decrement, wrapping to max value. */\n  NGF_STENCIL_OP_DECR_WRAP,\n\n  /** \\ingroup ngf\n   * Bitwise invert. */\n  NGF_STENCIL_OP_INVERT,\n\n  NGF_STENCIL_OP_COUNT\n} ngf_stencil_op;\n\n/**\n * @struct ngf_stencil_info\n * \\ingroup ngf\n * Stencil operation description.\n */\ntypedef struct ngf_stencil_info {\n  ngf_stencil_op fail_op;       /**< What to do on stencil test fail.*/\n  ngf_stencil_op pass_op;       /**< What to do on pass.*/\n  ngf_stencil_op depth_fail_op; /**< What to do when depth test fails but stencil test passes.*/\n  ngf_compare_op compare_op;    /**< Stencil comparison function.*/\n  uint32_t       compare_mask;  /**< Compare mask.*/\n  uint32_t       write_mask;    /**< Write mask.*/\n  uint32_t       reference;     /**< Reference value (used for \\ref NGF_STENCIL_OP_REPLACE).*/\n} ngf_stencil_info;\n\n/**\n * @struct ngf_depth_stencil_info\n * \\ingroup ngf\n * A graphics pipeline's depth/stencil state description.\n */\ntypedef struct ngf_depth_stencil_info {\n  /**\n   * Stencil test and actions for front-facing polys.\n   * This is ignored when stencil testing is disabled.\n   */\n  ngf_stencil_info front_stencil;\n\n  /**\n   * Stencil test and actions for back-facing polys.\n   * This is ignored when stencil testing is disabled.\n   */\n  ngf_stencil_info back_stencil;\n\n  /**\n   * The comparison function to use when performing the depth test.\n   * This is ignored when depth testing is disabled.\n   */\n  ngf_compare_op depth_compare;\n\n  /**\n   * Whether to enable stencil testing.\n   * The exact procedure for the stencil test, and the actions to\n   * perform on success or failure can be specified separately\n   * for front- and back-facing polygons (see \\ref ngf_depth_stencil_info::front_stencil and\n   * \\ref ngf_depth_stencil_info::back_stencil).\n   */\n  bool stencil_test;\n\n  /**\n   * Whether to enable depth test.\n   * When this is enabled, fragments that fail the test specified in\n   * \\ref ngf_depth_stencil_info::depth_compare, get discarded.\n   */\n  bool depth_test;\n\n  /**\n   * Whether to enable writing to the depth buffer.\n   * When this is enabled, fragments that pass the depth test have their\n   * depth written into the depth buffer.\n   */\n  bool depth_write;\n\n} ngf_depth_stencil_info;\n\n/**\n * @enum ngf_blend_factor\n * \\ingroup ngf\n * Factors that can be used for source and destination values during the blend operation.\n * The factor can be thought\n * See \\ref ngf_blend_info for details.\n */\ntypedef enum ngf_blend_factor {\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: sets each color component to 0;\n   * - if used as a blend factor for alpha: sets alpha to 0.\n   */\n  NGF_BLEND_FACTOR_ZERO = 0,\n\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: leaves the color unchanged;\n   * - if used as a blend factor for alpha: leaves the alpha value unchanged.\n   */\n  NGF_BLEND_FACTOR_ONE,\n\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: multiplies each color component by the corresponding\n   * component of the \"source\" color value;\n   * - if used as a blend factor for alpha: multiples the alpha value by the \"source\" alpha value.\n   */\n  NGF_BLEND_FACTOR_SRC_COLOR,\n\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: multiplies each color component by one minus the\n   * corresponding component of the \"source\" color value;\n   * - if used as a blend factor for alpha: multiples the alpha value by one minus the \"source\"\n   * alpha value.\n   */\n  NGF_BLEND_FACTOR_ONE_MINUS_SRC_COLOR,\n\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: multiplies each color component by the corresponding\n   * component of the \"destination\" color value;\n   * - if used as a blend factor for alpha: multiples the alpha value by the \"destination\" alpha\n   * value.\n   */\n  NGF_BLEND_FACTOR_DST_COLOR,\n\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: multiplies each color component by one minus the\n   * corresponding component of the \"destination\" color value;\n   * - if used as a blend factor for alpha: multiples the alpha value by one minus the \"destination\"\n   * alpha value.\n   */\n  NGF_BLEND_FACTOR_ONE_MINUS_DST_COLOR,\n\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: multiplies each color component by the \"source\" alpha\n   * value;\n   * - if used as a blend factor for alpha: multiples the alpha value by the \"source\" alpha value.\n   */\n  NGF_BLEND_FACTOR_SRC_ALPHA,\n\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: multiplies each color component by one minus the\n   * \"source\" alpha value;\n   * - if used as a blend factor for alpha: multiples the alpha value by one minus the \"source\"\n   * alpha value.\n   */\n  NGF_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,\n\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: multiplies each color component by the \"destination\"\n   * alpha value;\n   * - if used as a blend factor for alpha: multiples the alpha value by the \"destination\" alpha\n   * value.\n   */\n  NGF_BLEND_FACTOR_DST_ALPHA,\n\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: multiplies each color component by one minus the\n   * \"destination\" alpha value;\n   * - if used as a blend factor for alpha: multiples the alpha value by one minus the \"destination\"\n   * alpha value.\n   */\n  NGF_BLEND_FACTOR_ONE_MINUS_DST_ALPHA,\n\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: multiplies the red, green and blue components of the\n   * color by the 1st, 2nd and 3rd elements of \\ref ngf_graphics_pipeline_info::blend_consts\n   * respectively;\n   * - if used as a blend factor for alpha: multiplies the alpha value by the 4th component of \\ref\n   * ngf_graphics_pipeline_info::blend_consts.\n   */\n  NGF_BLEND_FACTOR_CONSTANT_COLOR,\n\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: multiplies the red, green and blue components of the\n   * color by one minus the 1st, 2nd and 3rd elements of \\ref\n   * ngf_graphics_pipeline_info::blend_consts respectively;\n   * - if used as a blend factor for alpha: multiplies the alpha value by one minus the 4th\n   * component of \\ref ngf_graphics_pipeline_info::blend_consts.\n   */\n  NGF_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR,\n\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: multiplies the components of the color by the 4th\n   * element of \\ref ngf_graphics_pipeline_info::blend_consts;\n   * - if used as a blend factor for alpha: multiplies the alpha value by the 4th component of \\ref\n   * ngf_graphics_pipeline_info::blend_consts.\n   */\n  NGF_BLEND_FACTOR_CONSTANT_ALPHA,\n\n  /**\n   * \\ingroup ngf\n   * - If used as a blend factor for color: multiplies the components of the color by one minus the\n   * 4th element of \\ref ngf_graphics_pipeline_info::blend_consts;\n   * - if used as a blend factor for alpha: multiplies the alpha value by one minus the 4th\n   * component of \\ref ngf_graphics_pipeline_info::blend_consts.\n   */\n  NGF_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA,\n\n  NGF_BLEND_FACTOR_COUNT\n} ngf_blend_factor;\n\n/**\n * @enum ngf_blend_op\n * \\ingroup ngf\n * Operations that can be performed to blend the values computed by the fragment stage\n * (source values, denoted `S` in the member documentation) with values already present\n * in the target color attachment of the framebuffer (destination values, denoted `D` in\n * the member documentation).\n *\n * The factors (\\ref ngf_blend_factor) for the source and destination values are denoted\n * as `Fs` and `Fd` respectively in the member documentation below.\n *\n */\ntypedef enum ngf_blend_op {\n  /** \\ingroup ngf\n   * The result of the blend operation shall be `S*Fs + D*Fd` */\n  NGF_BLEND_OP_ADD,\n\n  /** \\ingroup ngf\n   * The result of the blend operation shall be `S*Fs - D*Fd` */\n  NGF_BLEND_OP_SUB,\n\n  /** \\ingroup ngf\n   * The result of the blend operation shall be `D*Fd - S*Fs` */\n  NGF_BLEND_OP_REV_SUB,\n\n  /** \\ingroup ngf\n   * The result of the blend operation shall be `min(S, D)`   */\n  NGF_BLEND_OP_MIN,\n\n  /** \\ingroup ngf\n   * The result of the blend operation shall be `max(S, D)`   */\n  NGF_BLEND_OP_MAX,\n\n  NGF_BLEND_OP_COUNT\n} ngf_blend_op;\n\n/**\n * Identifies a color channel for color write mask. See \\ref ngf_blend_info::color_write_mask for\n * details.\n */\ntypedef enum ngf_color_write_mask_bit {\n  NGF_COLOR_MASK_WRITE_BIT_R = 0x01,\n  NGF_COLOR_MASK_WRITE_BIT_G = 0x02,\n  NGF_COLOR_MASK_WRITE_BIT_B = 0x04,\n  NGF_COLOR_MASK_WRITE_BIT_A = 0x08\n} ngf_color_write_mask_bit;\n\n/**\n * @struct ngf_blend_info\n * \\ingroup ngf\n * Describes how blending should be handled by the pipeline.\n * If blending is disabled, the resulting color and alpha values are directly assigned\n * the color and alpha values computed at the fragment stage.\n *\n * When blending is enabled, the resulting color and alpha values are computed using the\n * corresponding blend operations and factors (specified separately for color and alpha).\n * Note that if the render target attachment from which the destination values are read\n * uses an sRGB format, the destination color values are linearized prior to being used\n * in a blend operation.\n *\n * If the render target attachment uses an sRGB format, the resulting color value\n * is converted to an sRGB representation prior to being finally written to the attachment.\n */\ntypedef struct ngf_blend_info {\n  ngf_blend_op     blend_op_color;         /**< The blend operation to perform for color. */\n  ngf_blend_op     blend_op_alpha;         /**< The blend operation to perform for alpha. */\n  ngf_blend_factor src_color_blend_factor; /**< The source blend factor for color. */\n  ngf_blend_factor dst_color_blend_factor; /**< The destination blend factor for color. */\n  ngf_blend_factor src_alpha_blend_factor; /**< The source blend factor for alpha. */\n  ngf_blend_factor dst_alpha_blend_factor; /**< The destination blend factor for alpha. */\n  uint32_t         color_write_mask; /**< A combination of \\ref ngf_color_write_mask_bit flags that\n                                        specifies which color channels actually get written out for the\n                                        attachment corresponding to this blend state. */\n  bool enable;                       /**< Specifies whether blending is enabled.*/\n} ngf_blend_info;\n\n/**\n * @enum ngf_type\n * \\ingroup ngf\n * Enumerates the available vertex attribute component types.\n */\ntypedef enum ngf_type {\n  /** \\ingroup ngf\n   * Signed 8-bit integer. */\n  NGF_TYPE_INT8 = 0,\n\n  /** \\ingroup ngf\n   * Unsigned 8-bit integer. */\n  NGF_TYPE_UINT8,\n\n  /** \\ingroup ngf\n   * Signed 16-bit integer. */\n  NGF_TYPE_INT16,\n\n  /** \\ingroup ngf\n   * Unsigned 16-bit integer. */\n  NGF_TYPE_UINT16,\n\n  /** \\ingroup ngf\n   * Signed 32-bit integer. */\n  NGF_TYPE_INT32,\n\n  /** \\ingroup ngf\n   * Unsigned 32-bit integer. */\n  NGF_TYPE_UINT32,\n\n  /** \\ingroup ngf\n   * 32-bit floating point number. */\n  NGF_TYPE_FLOAT,\n\n  /** \\ingroup ngf\n   * 16-bit floating point number. */\n  NGF_TYPE_HALF_FLOAT,\n\n  /** \\ingroup ngf\n   * Double-precision floating point number. */\n  NGF_TYPE_DOUBLE,\n\n  NGF_TYPE_COUNT\n} ngf_type;\n\n/**\n * @enum ngf_input_rate\n * \\ingroup ngf\n * The vertex input rate specifies whether a new set of attributes is read from a buffer per each\n * vertex or per each instance.\n */\ntypedef enum ngf_vertex_input_rate {\n  /**\n   * \\ingroup ngf\n   *\n   * Attributes are read per-vertex.\n   * With this vertex input rate, each vertex receives its own set of attributes.\n   */\n  NGF_INPUT_RATE_VERTEX = 0,\n\n  /**\n   * \\ingroup ngf\n   *\n   * Attributes are read per-instance.\n   * With this vertex input rate, all vertices within the same instance share the same\n   * attribute values.\n   */\n  NGF_INPUT_RATE_INSTANCE,\n  NGF_VERTEX_INPUT_RATE_COUNT\n} ngf_vertex_input_rate;\n\n/**\n * @struct ngf_vertex_buf_binding_desc\n * \\ingroup ngf\n * Specifies a vertex buffer binding.\n * A _vertex buffer binding_ may be thought of as a slot to which a vertex attribute buffer can be\n * bound. An \\ref ngf_graphics_pipeline may have several such slots, which are addressed by their\n * indices. Vertex attribute buffers can be bound to these slots with \\ref\n * ngf_cmd_bind_attrib_buffer. The binding also partly defines how the contents of the bound buffer\n * is interpreted - via \\ref ngf_vertex_buf_binding_desc::stride and \\ref\n * ngf_vertex_buf_binding_desc::input_rate\n */\ntypedef struct ngf_vertex_buf_binding_desc {\n  uint32_t binding; /**< Index of the binding that this structure describes.*/\n\n  /**\n   * Specifies the distance (in bytes) between the starting bytes of two consecutive attribute\n   * values.\n   *\n   * As an example, assume the buffer contains data for a single attribute, such as the position of\n   * a vertex in three-dimensional space. Each component of the position is a 32-bit floating point\n   * number. The values are laid out in memory one after another:\n   *\n   * ```\n   *  ________ ________ ________ ________ ________ ________ ____\n   * |        |        |        |        |        |        |\n   * | pos0.x | pos0.y | pos0.z | pos1.x | pos1.y | pos1.z | ...\n   * |________|________|________|________|________|________|____\n   *\n   * ```\n   * In this case, the stride is 3*4 = 12 bytes - the distance from the beginning of the first\n   * attribute to the beginning of the next attribute is equal to the size of one attribute value.\n   *\n   * Now consider a different case, where we have two attributes: a three-dimensional position and\n   * an RGB color, and the buffer first lists all the attribute values for the first vertex,\n   * then all attribute values for the second vertex and so on:\n   *\n   * ```\n   *  ________ ________ ________ ________ ________ ________ ________ _____\n   * |        |        |        |        |        |        |        |\n   * | pos0.x | pos0.y | pos0.z | col0.x | col0.y | col0.z | pos1.x | ...\n   * |________|________|________|________|________|________|________|_____\n   *\n   * ```\n   *\n   * In this case, the position of the next vertex does not immediately follow the position previous\n   * one - there is the value of the color attribute in between. In this case, assuming the\n   * attribute components use a 32-bit floating point, the stride would have to be\n   * `3 * 4 + 3 * 4 = 24` bytes.\n   */\n  uint32_t stride;\n\n  /**\n   * Specifies whether attributes are read from the bound buffer\n   * per-vetex or per-instance.\n   */\n  ngf_vertex_input_rate input_rate;\n} ngf_vertex_buf_binding_desc;\n\n/**\n * @struct ngf_vertex_attrib_desc\n * \\ingroup ngf\n * Specifies information about a vertex attribute.\n */\ntypedef struct ngf_vertex_attrib_desc {\n  uint32_t location; /**< Attribute index. */\n  uint32_t binding;  /**< The index of the vertex attribute buffer binding to use.*/\n  uint32_t offset;   /**< Offset in the buffer at which attribute data starts.*/\n  ngf_type type;     /**< Type of attribute component.*/\n  uint32_t size;     /**< Number of attribute components. This value has to be between 1 and 4\n                        (inclusive). */\n\n  /**\n   * Whether the vertex stage sees the raw or normalized values for the attribute components.\n   * Only attribute components of types \\ref NGF_TYPE_INT8, \\ref NGF_TYPE_UINT8, \\ref\n   * NGF_TYPE_INT16 and \\ref NGF_TYPE_UINT16 can be normalized. For signed types, the values are\n   * scaled to the [-1; 1] floating point range, for unsigned types they are scaled to [0; 1].\n   */\n  bool normalized;\n} ngf_vertex_attrib_desc;\n\n/**\n * @struct ngf_vertex_input_info\n * \\ingroup ngf\n * Specifies information about the pipeline's vertex input.\n */\ntypedef struct ngf_vertex_input_info {\n  uint32_t nattribs;           /**< Number of attribute descriptions.*/\n  uint32_t nvert_buf_bindings; /**< Number of vertex buffer binding descriptions.*/\n\n  /**\n   * Pointer to an array of structures describing vertex attribute buffer\n   * bindings.\n   */\n  const ngf_vertex_buf_binding_desc* vert_buf_bindings;\n\n  /**\n   * Pointer to an array of structures describing the vertex attributes.\n   */\n  const ngf_vertex_attrib_desc* attribs;\n} ngf_vertex_input_info;\n\n/**\n * @enum ngf_sample_count\n * \\ingroup ngf\n * Specifies the number of MSAA samples.\n */\ntypedef enum ngf_sample_count {\n  NGF_SAMPLE_COUNT_1  = 1,\n  NGF_SAMPLE_COUNT_2  = 2,\n  NGF_SAMPLE_COUNT_4  = 4,\n  NGF_SAMPLE_COUNT_8  = 8,\n  NGF_SAMPLE_COUNT_16 = 16,\n  NGF_SAMPLE_COUNT_32 = 32,\n  NGF_SAMPLE_COUNT_64 = 64,\n} ngf_sample_count;\n\n/**\n * @struct ngf_multisample_info\n * \\ingroup ngf\n *\n * Specifies the state of multisampling.\n */\ntypedef struct ngf_multisample_info {\n  ngf_sample_count sample_count;      /**< MSAA sample count. */\n  bool             alpha_to_coverage; /**< Whether alpha-to-coverage is enabled.*/\n} ngf_multisample_info;\n\n/**\n * @enum ngf_image_format\n * \\ingroup ngf\n *\n * Image formats.\n *\n * Some backends may not support all of those.\n * Using an sRGB format in a color attachment or swapchain image means that all\n * color values output by the fragment stage are interpreted as being in linear\n * color space, and an appropriate transfer function is applied to them to\n * covert them to the sRGB colorspace before writing them to the target.\n * Using an sRGB format in a sampled image means that all color values stored\n * in the image are interpreted to be in the sRGB color space, and all read\n * operations automatically apply a transfer function to convert the values\n * from sRGB to linear color space.\n */\ntypedef enum ngf_image_format {\n  NGF_IMAGE_FORMAT_R8 = 0,\n  NGF_IMAGE_FORMAT_RG8,\n  NGF_IMAGE_FORMAT_RG8_SNORM,\n  NGF_IMAGE_FORMAT_RGB8,\n  NGF_IMAGE_FORMAT_RGBA8,\n  NGF_IMAGE_FORMAT_SRGB8,\n  NGF_IMAGE_FORMAT_SRGBA8,\n  NGF_IMAGE_FORMAT_BGR8,\n  NGF_IMAGE_FORMAT_BGRA8,\n  NGF_IMAGE_FORMAT_BGR8_SRGB,\n  NGF_IMAGE_FORMAT_BGRA8_SRGB,\n  NGF_IMAGE_FORMAT_RGB10A2,\n  NGF_IMAGE_FORMAT_R32F,\n  NGF_IMAGE_FORMAT_RG32F,\n  NGF_IMAGE_FORMAT_RGB32F,\n  NGF_IMAGE_FORMAT_RGBA32F,\n  NGF_IMAGE_FORMAT_R16F,\n  NGF_IMAGE_FORMAT_RG16F,\n  NGF_IMAGE_FORMAT_RGB16F,\n  NGF_IMAGE_FORMAT_RGBA16F,\n  NGF_IMAGE_FORMAT_RG11B10F,\n  NGF_IMAGE_FORMAT_RGB9E5,\n  NGF_IMAGE_FORMAT_R16_UNORM,\n  NGF_IMAGE_FORMAT_R16_SNORM,\n  NGF_IMAGE_FORMAT_RG16_UNORM,\n  NGF_IMAGE_FORMAT_RG16_SNORM,\n  NGF_IMAGE_FORMAT_RGBA16_UNORM,\n  NGF_IMAGE_FORMAT_RGBA16_SNORM,\n  NGF_IMAGE_FORMAT_R8U,\n  NGF_IMAGE_FORMAT_R8S,\n  NGF_IMAGE_FORMAT_R16U,\n  NGF_IMAGE_FORMAT_R16S,\n  NGF_IMAGE_FORMAT_RG16U,\n  NGF_IMAGE_FORMAT_RGB16U,\n  NGF_IMAGE_FORMAT_RGBA16U,\n  NGF_IMAGE_FORMAT_R32U,\n  NGF_IMAGE_FORMAT_RG32U,\n  NGF_IMAGE_FORMAT_RGB32U,\n  NGF_IMAGE_FORMAT_RGBA32U,\n  NGF_IMAGE_FORMAT_BC7,\n  NGF_IMAGE_FORMAT_BC7_SRGB,\n  NGF_IMAGE_FORMAT_BC6H_SFLOAT,\n  NGF_IMAGE_FORMAT_BC6H_UFLOAT,\n  NGF_IMAGE_FORMAT_BC5_UNORM,\n  NGF_IMAGE_FORMAT_BC5_SNORM,\n  NGF_IMAGE_FORMAT_ASTC_4x4,\n  NGF_IMAGE_FORMAT_ASTC_4x4_SRGB,\n  NGF_IMAGE_FORMAT_ASTC_5x4,\n  NGF_IMAGE_FORMAT_ASTC_5x4_SRGB,\n  NGF_IMAGE_FORMAT_ASTC_5x5,\n  NGF_IMAGE_FORMAT_ASTC_5x5_SRGB,\n  NGF_IMAGE_FORMAT_ASTC_6x5,\n  NGF_IMAGE_FORMAT_ASTC_6x5_SRGB,\n  NGF_IMAGE_FORMAT_ASTC_6x6,\n  NGF_IMAGE_FORMAT_ASTC_6x6_SRGB,\n  NGF_IMAGE_FORMAT_ASTC_8x5,\n  NGF_IMAGE_FORMAT_ASTC_8x5_SRGB,\n  NGF_IMAGE_FORMAT_ASTC_8x6,\n  NGF_IMAGE_FORMAT_ASTC_8x6_SRGB,\n  NGF_IMAGE_FORMAT_ASTC_8x8,\n  NGF_IMAGE_FORMAT_ASTC_8x8_SRGB,\n  NGF_IMAGE_FORMAT_ASTC_10x5,\n  NGF_IMAGE_FORMAT_ASTC_10x5_SRGB,\n  NGF_IMAGE_FORMAT_ASTC_10x6,\n  NGF_IMAGE_FORMAT_ASTC_10x6_SRGB,\n  NGF_IMAGE_FORMAT_ASTC_10x8,\n  NGF_IMAGE_FORMAT_ASTC_10x8_SRGB,\n  NGF_IMAGE_FORMAT_ASTC_10x10,\n  NGF_IMAGE_FORMAT_ASTC_10x10_SRGB,\n  NGF_IMAGE_FORMAT_ASTC_12x10,\n  NGF_IMAGE_FORMAT_ASTC_12x10_SRGB,\n  NGF_IMAGE_FORMAT_ASTC_12x12,\n  NGF_IMAGE_FORMAT_ASTC_12x12_SRGB,\n  NGF_IMAGE_FORMAT_DEPTH32,\n  NGF_IMAGE_FORMAT_DEPTH16,\n  NGF_IMAGE_FORMAT_DEPTH24_STENCIL8,\n  NGF_IMAGE_FORMAT_UNDEFINED,\n  NGF_IMAGE_FORMAT_COUNT\n} ngf_image_format;\n\n/**\n * @enum ngf_attachment_type\n * \\ingroup ngf\n * Enumerates render target attachment types.\n */\ntypedef enum ngf_attachment_type {\n  /** \\ingroup ngf\n   * For attachments containing color data. */\n  NGF_ATTACHMENT_COLOR = 0,\n\n  /** \\ingroup ngf\n   * For attachments containing depth data. */\n  NGF_ATTACHMENT_DEPTH,\n\n  /** \\ingroup ngf\n   * For attachments containing combined depth and stencil data. */\n  NGF_ATTACHMENT_DEPTH_STENCIL\n} ngf_attachment_type;\n\n/**\n * @struct ngf_attachment_description\n * \\ingroup ngf\n * Describes the type and format of a render target attachment.\n */\ntypedef struct ngf_attachment_description {\n  ngf_attachment_type type; /**< What the attachment shall be used for. */\n  ngf_image_format format;  /**< Format of the associated image. Note that it must be valid for the\n                               given attachment type. */\n  ngf_sample_count sample_count; /**< Number of samples per pixel in the associated image. */\n  bool is_resolve; /**< Whether the image associated with this attachment is used as an MSAA resolve\n                      target. */\n} ngf_attachment_description;\n\n/**\n * @struct ngf_attachment_descriptions\n * \\ingroup ngf\n * A list of attachment descriptions.\n */\ntypedef struct ngf_attachment_descriptions {\n  /** Pointer to a continuous array of \\ref ngf_attachment_descriptions::ndescs \\ref\n   * ngf_attachment_description objects.\n   */\n  const ngf_attachment_description* descs;\n\n  uint32_t ndescs; /**< The number of attachment descriptions in the list. */\n} ngf_attachment_descriptions;\n\n/**\n * @enum ngf_primitive_topology\n * \\ingroup ngf\n *\n * Enumerates the available primitive topologies (ways to group vertices into primitives).\n */\ntypedef enum ngf_primitive_topology {\n  /**\n   * \\ingroup ngf\n   * A list of separate triangles - each three vertices define a separate triangle.\n   */\n  NGF_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 0,\n\n  /**\n   * \\ingroup ngf\n   * A list of connected triangles, with consecutive triangles sharing an edge like so:\n   * ```\n   *  o---------o-----------o\n   *   \\       /  \\       /\n   *     \\   /      \\   / ...\n   *       o----------o\n   *\n   * ```\n   */\n  NGF_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,\n\n  /**\n   * \\ingroup ngf\n   * A list of separate lines. Each two vertices define a separate line.\n   */\n  NGF_PRIMITIVE_TOPOLOGY_LINE_LIST,\n\n  /**\n   * \\ingroup ngf\n   * A list of connected lines. The end of a line is the beginning of the next line in the list.\n   */\n  NGF_PRIMITIVE_TOPOLOGY_LINE_STRIP,\n\n  NGF_PRIMITIVE_TOPOLOGY_COUNT\n} ngf_primitive_topology;\n\n/**\n * @struct ngf_constant_specialization\n * \\ingroup ngf\n *\n * A constant specialization entry, sets the value for a single specialization constant.\n */\ntypedef struct ngf_constant_specialization {\n  uint32_t constant_id; /**< ID of the specialization constant used in the shader stage */\n  uint32_t offset;      /**< Offset at which the user-provided value is stored in the specialization\n                           buffer. */\n  ngf_type type;        /**< Type of the specialization constant. */\n} ngf_constant_specialization;\n\n/**\n * @struct ngf_specialization_info\n * \\ingroup ngf\n * Sets specialization constant values for a pipeline.\n * Specialization constants are a kind of shader constant whose values can be set at pipeline\n * creation time. The shaders that run as part of said pipeline will then see the provided values\n * during execution.\n */\ntypedef struct ngf_specialization_info {\n  const ngf_constant_specialization* specializations;  /**< List of specialization entries. */\n  uint32_t                           nspecializations; /**< Number of specialization entries. */\n  const void* value_buffer; /**< Pointer to a buffer containing the values for the\n                           specialization constants. */\n} ngf_specialization_info;\n\ntypedef struct ngf_input_assembly_info {\n  ngf_primitive_topology primitive_topology;\n  bool                   enable_primitive_restart;\n} ngf_input_assembly_info;\n\n/**\n * @struct ngf_graphics_pipeline_info\n * \\ingroup ngf\n *\n * Contains all information necessary for creating a graphics pipeline object.\n */\ntypedef struct ngf_graphics_pipeline_info {\n  ngf_shader_stage              shader_stages[5]; /**< The programmable stages for this pipeline. */\n  uint32_t                      nshader_stages; /**< The number of programmable stages involved. */\n  const ngf_rasterization_info* rasterization;  /**< Specifies the parameters for the rasterizer. */\n  const ngf_multisample_info*   multisample;    /**< Specifies the parameters for multisampling. */\n\n  /**\n   * Specifies the parameters for depth and stencil testing.\n   */\n  const ngf_depth_stencil_info* depth_stencil;\n\n  /**\n   * Specifies vertex attributes and vertex attribute buffer bindings.\n   */\n  const ngf_vertex_input_info* input_info;\n\n  /**\n   * Specifies how primitives are assembled from vertices.\n   */\n  const ngf_input_assembly_info* input_assembly_info;\n\n  const ngf_specialization_info* spec_info; /**< Specifies the values for specialization constants\n                                               (if any) used by the programmable stages. */\n\n  /**\n   * Describes which render targets compatible with this pipeline.\n   * A compatible render target must have the same number of attachments as specified in the list,\n   * with matching type, format and sample count.\n   */\n  const ngf_attachment_descriptions* compatible_rt_attachment_descs;\n\n  /**\n   * A pointer to an array of \\ref ngf_blend_info structures specifying the parameters for blending.\n   * The array must contain exactly the same number of elements as there are color attachments\n   * specified in \\ref ngf_graphics_pipeline_info::compatible_rt_attachment_descs.\n   * If set to NULL, all color attachments will have blending disabled and fully enabled color write\n   * mask.\n   */\n  const ngf_blend_info* color_attachment_blend_states;\n\n  float blend_consts[4]; /**< Blend constants used by \\ref NGF_BLEND_FACTOR_CONSTANT_COLOR, \\ref\n                            NGF_BLEND_FACTOR_CONSTANT_ALPHA, \\ref\n                            NGF_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR and \\ref\n                            NGF_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA . */\n\n  const char* debug_name;\n} ngf_graphics_pipeline_info;\n\n/**\n * @struct ngf_graphics_pipeline\n * \\ingroup ngf\n *\n * An opaque handle to a graphics pipeline object.\n *\n * See also: \\ref ngf_graphics_pipeline_info, \\ref ngf_create_graphics_pipeline and \\ref\n * ngf_destroy_graphics_pipeline.\n */\ntypedef struct ngf_graphics_pipeline_t* ngf_graphics_pipeline;\n\n/**\n * @struct ngf_compute_pipeline_info\n * \\ingroup  ngf\n *\n * Contains all information necessary for creating a compute pipeline object.\n */\ntypedef struct ngf_compute_pipeline_info {\n  ngf_shader_stage shader_stage; /**< The (only) stage for this pipeline. */\n  const ngf_specialization_info*\n      spec_info; /**< Specifies the value of  specialization consts used by this pipeline. */\n  const char* debug_name;\n} ngf_compute_pipeline_info;\n\n/**\n * @struct ngf_compute_pipeline\n * \\ingroup ngf\n *\n * An opaque handle to a compute pipeline object.\n *\n * See also: \\ref ngf_compute_pipeline_info, \\ref ngf_create_compute_pipeline and \\ref\n * ngf_destroy_compute_pipeline.\n */\ntypedef struct ngf_compute_pipeline_t* ngf_compute_pipeline;\n\n/**\n * @enum ngf_descriptor_type\n * \\ingroup ngf\n *\n * Available descriptor types.\n * Not that some back-ends may not support all of the listed descriptor types.\n */\ntypedef enum ngf_descriptor_type {\n  /**\n   * \\ingroup ngf\n   *\n   * A uniform buffer, also known as a constant buffer, can be used to pass\n   * a small to medium sized chunk of data to the shader in a structured way.\n   */\n  NGF_DESCRIPTOR_UNIFORM_BUFFER = 0,\n\n  /**\n   * \\ingroup ngf\n   *\n   * An \\ref ngf_image.\n   */\n  NGF_DESCRIPTOR_IMAGE,\n\n  /**\n   * \\ingroup ngf\n   *\n   * An \\ref ngf_sampler.\n   */\n  NGF_DESCRIPTOR_SAMPLER,\n\n  /**\n   * \\ingroup ngf\n   *\n   * A combination of an image and sampler in a single object.\n   */\n  NGF_DESCRIPTOR_IMAGE_AND_SAMPLER,\n\n  /**\n   * \\ingroup ngf\n   *\n   * A texel buffer can be used to pass a large amount of unstructured data\n   * (i.e. a big array of `float4`s) to the shader.\n   */\n  NGF_DESCRIPTOR_TEXEL_BUFFER,\n\n  /**\n   * \\ingroup ngf\n   *\n   * A storage buffer is a large buffer that can be both read and written in shaders.\n   */\n  NGF_DESCRIPTOR_STORAGE_BUFFER,\n\n  /**\n   * An image that can be both read and written to in a shader.\n   */\n  NGF_DESCRIPTOR_STORAGE_IMAGE,\n\n  NGF_DESCRIPTOR_ACCELERATION_STRUCTURE,\n\n  NGF_DESCRIPTOR_TYPE_COUNT\n} ngf_descriptor_type;\n\n/**\n * @enum ngf_sampler_filter\n * \\ingroup ngf\n *\n *  Enumerates filters for texture lookups.\n */\ntypedef enum ngf_sampler_filter {\n  /**\n   * \\ingroup ngf\n   *\n   * When used as the minification (\\ref ngf_sampler_info::min_filter) or  magnification (\\ref\n   * ngf_sampler_info::mag_filter) filter, the result of the filtering operation shall be the\n   * value of the texel whose center is nearest to the sample.\n   *\n   * When used as \\ref ngf_sampler_info::mip_filter, makes the selected mip level snap to the one\n   * that is closest to the requested mip level value.\n   */\n  NGF_FILTER_NEAREST = 0,\n\n  /**\n   * \\ingroup ngf\n   *\n   * When used as the minification (\\ref ngf_sampler_info::min_filter) or  magnification (\\ref\n   * ngf_sampler_info::mag_filter) filter, the result of the filtering operation shall be linearly\n   * interpolated from the values of 4 (in case of 2D images) or 8 (in case of 3D images) texels\n   * whose centers are nearest to the sample.\n   *\n   * When used as \\ref ngf_sampler_info::mip_filter, linearly blends the values from two mip levels\n   * closest to the requested mip level value.\n   */\n  NGF_FILTER_LINEAR,\n\n  NGF_FILTER_COUNT\n} ngf_sampler_filter;\n\n/**\n * @enum ngf_sampler_wrap_mode\n * \\ingroup ngf\n *\n * Enumerates strategies for dealing with sampling an image out-of-bounds.\n */\ntypedef enum ngf_sampler_wrap_mode {\n  /** \\ingroup ngf\n   * Clamp the texel value to what's at the edge of the image. */\n  NGF_WRAP_MODE_CLAMP_TO_EDGE = 0,\n\n  /** \\ingroup ngf\n   * Repeat the image contents. */\n  NGF_WRAP_MODE_REPEAT,\n\n  /** \\ingroup ngf\n   * Repeat the image contents, mirrored. */\n  NGF_WRAP_MODE_MIRRORED_REPEAT,\n\n  NGF_WRAP_MODE_COUNT\n} ngf_sampler_wrap_mode;\n\n/**\n * @struct ngf_sampler_info\n * \\ingroup ngf\n *\n * Information for creating an \\ref ngf_sampler object.\n */\ntypedef struct ngf_sampler_info {\n  ngf_sampler_filter    min_filter; /**< The filter to apply when the sampled image is minified .*/\n  ngf_sampler_filter    mag_filter; /**< The filter to apply when the sampled image is magnified. */\n  ngf_sampler_filter    mip_filter; /**< The filter to use when transitioning between mip levels. */\n  ngf_sampler_wrap_mode wrap_u;     /**< Wrap mode for the U coordinate. */\n  ngf_sampler_wrap_mode wrap_v;     /**< Wrap mode for the V coordinate. */\n  ngf_sampler_wrap_mode wrap_w;     /**< Wrap mode for the W coordinate. */\n  float lod_max;  /**< Maximum mip level that shall be used during the filtering operation.\n                   *  Note that this refers to the _level itself_ and not the dimensions of data\n                   *  residing in that level, e.g. level 0 (the smallest possible level) has\n                   *  the largest dimensions.\n                   */\n  float lod_min;  /**< Minimum mip level that shall be used during the filtering operation.\n                   *  Note that this refers to the _level itself_ and not the dimensions of data\n                   *  residing in that level, e.g. level 0 (the smallest possible level) has\n                   *  the largest dimensions.\n                   */\n  float lod_bias; /**< A bias to add to the mip level calculated during the sample operation. */\n  float max_anisotropy;             /**< Max allowed degree of anisotropy. Ignored if \\ref\n                                     * ngf_sampler_info::enable_anisotropy is false.\n                                     */\n  bool           enable_anisotropy; /**< Whether to allow anisotropic filtering. */\n  ngf_compare_op compare_op; /**< The comparison to use when comparing depth texture samples to a\n                              * reference value. Set to \\ref ngf_compare_op::NGF_COMPARE_OP_NEVER to\n                              * disable comparison for the sampler. */\n} ngf_sampler_info;\n\n/**\n * @struct ngf_sampler\n * \\ingroup ngf\n *\n * An opaque handle for a sampler object.\n *\n * Samplers encapsulate how to filter an image - what happens when an image is minified or\n * magnified, whether anisotropic filtering is enabled, etc. See \\ref ngf_sampler_info for more\n * details.\n *\n * Samplers can be bound separately from images - in which case the shader code sees them as two\n * distinct objects, and the same sampler can be ussed to sample two different images. They can also\n * be combined into a single descriptor (see \\ref NGF_DESCRIPTOR_IMAGE_AND_SAMPLER), in which case\n * the shader code sees only a single image object, which can be sampled only one certain way.\n */\ntypedef struct ngf_sampler_t* ngf_sampler;\n\n/**\n * @enum ngf_image_usage\n * \\ingroup ngf\n *\n * Image usage flags.\n *\n * A valid image usage mask may be formed by combining one or more of these\n * values with a bitwise OR operator.\n */\ntypedef enum ngf_image_usage {\n  /** \\ingroup ngf\n   * The image may be read from in a shader.*/\n  NGF_IMAGE_USAGE_SAMPLE_FROM = 0x01,\n\n  /** \\ingroup ngf\n   * The image may be used as an attachment for a render target.*/\n  NGF_IMAGE_USAGE_ATTACHMENT = 0x02,\n\n  /** \\ingroup ngf\n   * The image may be used as a destination for a transfer operation. **/\n  NGF_IMAGE_USAGE_XFER_DST = 0x04,\n\n  /** \\ingroup ngf\n   * Mipmaps may be generated for the image with \\ref ngf_cmd_generate_mipmaps. */\n  NGF_IMAGE_USAGE_MIPMAP_GENERATION = 0x08,\n\n  /** \\ingroup ngf\n   * The image may be read or written to by a shader. */\n  NGF_IMAGE_USAGE_STORAGE = 0x10,\n\n  /** \\ingroup ngf\n   * The image may be used as a source for a transfer operation. */\n  NGF_IMAGE_USAGE_XFER_SRC = 0x20\n} ngf_image_usage;\n\n/**\n * @enum ngf_image_type\n * \\ingroup ngf\n *\n * Enumerates the possible image types.\n */\ntypedef enum ngf_image_type {\n  /** \\ingroup ngf\n   * Two-dimensional image. */\n  NGF_IMAGE_TYPE_IMAGE_2D = 0,\n\n  /** \\ingroup ngf\n   * Three-dimensional image. */\n  NGF_IMAGE_TYPE_IMAGE_3D,\n\n  /** \\ingroup ngf\n   * Cubemap. */\n  NGF_IMAGE_TYPE_CUBE,\n\n  NGF_IMAGE_TYPE_COUNT\n} ngf_image_type;\n\n/**\n * @struct ngf_image_info\n * \\ingroup ngf\n *\n * Information required to create an \\ref ngf_image object.\n */\ntypedef struct ngf_image_info {\n  ngf_image_type type;    /**< The image type. */\n  ngf_extent3d   extent;  /**< The width, height and depth. Note that dimensions irrelevant for the\n                             specified image type are ignored.*/\n  uint32_t         nmips; /**< The number of mip levels in the image.*/\n  uint32_t         nlayers;      /**< Number of layers within the image. */\n  ngf_image_format format;       /**< Internal format.*/\n  ngf_sample_count sample_count; /**< The number of samples per pixel in the image. **/\n  uint32_t         usage_hint;   /**< Specifies how the client intends to use the image. Must be a\n                                      combination of \\ref ngf_image_usage flags.*/\n} ngf_image_info;\n\n/**\n * @struct ngf_image\n * \\ingroup ngf\n *\n * An opaque handle to an image object.\n *\n * Images are multidimensional arrays of data that can be sampled from in shaders, or rendered into.\n * The individual elements of such arrays shall be referred to as \"texels\". An \\ref ngf_image_format\n * describes the specific type and layout of data elements within a single texel. Note that\n * compressed image formats typically don't store values of texels directly, rather they store\n * enough information that the texel values can be reconstructed (perhaps lossily) by the rendering\n * device.\n *\n * Images can be one of the following types (see \\ref ngf_image_type):\n *  - a two-dimensional image, identified by \\ref NGF_IMAGE_TYPE_IMAGE_2D and representing a\n *    two-dimensional array of texels;\n *  - a three-dimensional image, identified by \\ref NGF_IMAGE_TYPE_IMAGE_3D and representing a\n *    three-dimensional array of texels;\n *  - a cubemap, identified by \\ref NGF_IMAGE_TYPE_CUBE and representing a collection of six\n *    two-dimensional texel arrays, each corresponding to a face of a cube.\n *\n * An image object may actually contain several images of the same type, format and dimensions.\n * Those are referred to as \"layers\" and images containing more than a single layer are called\n * \"layered\", or \"image arrays\". Note that a multi-layered 2D image is different from a\n * single-layered 3D image, because filtering is not performed across levels when sampling it. Also\n * note that layered cubemaps are not supported by all hardware - see \\ref\n * ngf_device_capabilities::cubemap_arrays_supported.\n *\n * Each image layer may contain mip levels. Mip level 0 is the layer itself, and each subsequent\n * level (1, 2 and so on) is 2x smaller in dimensions, and usually contains the downscaled version\n * of the preceding level for the purposes of filtering, although the application is free to upload\n * arbitrary data into any mip level, as long as dimension requirements are respected.\n */\ntypedef struct ngf_image_t* ngf_image;\n\n/**\n * @enum ngf_cubemap_face\n * \\ingroup ngf\n *\n * Members of this enumeration are used to refer to the different faces of a cubemap.\n */\ntypedef enum ngf_cubemap_face {\n  NGF_CUBEMAP_FACE_POSITIVE_X,\n  NGF_CUBEMAP_FACE_NEGATIVE_X,\n  NGF_CUBEMAP_FACE_POSITIVE_Y,\n  NGF_CUBEMAP_FACE_NEGATIVE_Y,\n  NGF_CUBEMAP_FACE_POSITIVE_Z,\n  NGF_CUBEMAP_FACE_NEGATIVE_Z,\n  NGF_CUBEMAP_FACE_COUNT\n} ngf_cubemap_face;\n\n/**\n * @struct ngf_image_ref\n * \\ingroup ngf\n *\n * A reference to a part of an image.\n */\ntypedef struct ngf_image_ref {\n  ngf_image        image;        /**< The image being referred to.*/\n  uint32_t         mip_level;    /**< The mip level within the image.*/\n  uint32_t         layer;        /**< The layer within the image.*/\n  ngf_cubemap_face cubemap_face; /**< The face of the cubemap for cubemaps, ignored for\n                                      non-cubemap images.*/\n} ngf_image_ref;\n\n/**\n * @struct ngf_image_view_info\n * \\ingroup ngf\n *\n * Information required to create an \\ref ngf_image_view.\n * Contains the definition of the sub-resource represented by the view as well as\n * the view's corresponding type and format.\n */\ntypedef struct ngf_image_view_info {\n  ngf_image      src_image;      /**< References the source image. */\n  uint32_t       base_mip_level; /**< Specifies the first mip level represented in the view. */\n  uint32_t       nmips;          /**< Specifies the number of mip levels represented in the view. */\n  uint32_t       base_layer;     /**< Specifies the first image layer represented in the view. */\n  uint32_t       nlayers;        /**< Specifies the number of layers represented in the view. */\n  ngf_image_type view_type;      /**< The type to reinterpret the source image as.\n                                       Must be compatible with the source image type.*/\n  ngf_image_format view_format;  /**< The format to reinterpret the source image as.\n                                       Must be compatible with the source image format.*/\n} ngf_image_view_info;\n\n/**\n * @struct ngf_image_view\n * \\ingroup ngf\n *\n * An opaque handle to an image view object.\n *\n * Image views provide a way to reinterpret different sub-parts of a source image as having a\n * particular type and/or format. They can be bound and used in GPU programs just like regular\n * images. Image views are backed by the memory of their corresponding source images and do not\n * incur additional GPU allocations. They become invalid if their source image is destroyed.\n *\n * Image views can use a different type than the source image, however this is subject to\n * compatibility rules defined in the table below:\n *\n * Source image type              | Compatible view types\n * ------------------------------ | ----------------------------------------------------------\n * \\ref NGF_IMAGE_TYPE_IMAGE_2D   | \\ref NGF_IMAGE_TYPE_IMAGE_2D, \\ref NGF_IMAGE_TYPE_IMAGE_3D\n * \\ref NGF_IMAGE_TYPE_CUBE       | \\ref NGF_IMAGE_TYPE_IMAGE_2D\n * \\ref NGF_IMAGE_TYPE_IMAGE_3D   | \\ref NGF_IMAGE_TYPE_IMAGE_3D\n *\n * Attempting to create a view with a type that is not compatible with the source image type will\n * result in an error.\n *\n * Image views can use a different pixel format from the source image (thus \"type punning\" or\n * reinterpreting pixel data). However, the format must be compatible with the source image format.\n * Format compatibility is platform-dependent. Attempting to create a view with a format that is not\n * compatible with the source image format will result in an error.\n */\ntypedef struct ngf_image_view_t* ngf_image_view;\n\n/**\n * @struct ngf_render_target_info\n * \\ingroup ngf\n * Information required to create a render target object.\n */\ntypedef struct ngf_render_target_info {\n  /** List of attachment descriptions. */\n  const ngf_attachment_descriptions* attachment_descriptions;\n\n  /** Image references, describing what is bound to each attachment. */\n  const ngf_image_ref* attachment_image_refs;\n} ngf_render_target_info;\n\n/**\n * @struct ngf_render_target\n * \\ingroup ngf\n *\n * An opaque handle to a render target object.\n *\n * Render targets are collections of images that can be rendered into. Each image in the collection\n * is referred to as an \"attachment\". Some attachments have special meaning, for example the depth\n * or the combined depth+stencil attachment, the contents of which are used in depth/stencil tests.\n * A render target is not allowed to have multiple depth or depth+stencil attachments, however it is\n * allowed to have multiple color attachments (up to a certain limit).\n */\ntypedef struct ngf_render_target_t* ngf_render_target;\n\n/**\n * @struct ngf_clear_info\n * \\ingroup ngf\n *\n * Specifies a render target clear operation.\n */\ntypedef union ngf_clear_info {\n  /**\n   * The color to clear to. Each element corresponds to the red, green, blue and alpha channel\n   * respectively, and is a floating point value within the [0; 1] range, with 0.0 corresponding to\n   * none an 1.0 corresponding to full intensity. If the format of the render target image does not\n   * have a corresponding channel, the value is ignored.\n   * This field is used for color attachments only.\n   */\n  float clear_color[4];\n\n  /**\n   * The depth and stencil values to clear to. This field is used for depth or combined\n   * depth/stencil attachments only.\n   */\n  struct {\n    float    clear_depth;   /**< The depth value to clear to. */\n    uint32_t clear_stencil; /**< The stencil value to clear to. */\n  } clear_depth_stencil;\n} ngf_clear;\n\n/**\n * @enum ngf_attachment_load_op\n * \\ingroup ngf\n * Enumerates actions that can be performed on attachment \"load\" (at the start of a render pass).\n */\ntypedef enum ngf_attachment_load_op {\n  /** \\ingroup ngf\n   * Don't care what happens. */\n  NGF_LOAD_OP_DONTCARE = 0,\n\n  /** \\ingroup ngf\n   * Preserve the prior contents of the attachment. */\n  NGF_LOAD_OP_KEEP,\n\n  /** \\ingroup ngf\n   * Clear the attachment. */\n  NGF_LOAD_OP_CLEAR,\n  NGF_LOAD_OP_COUNT\n} ngf_attachment_load_op;\n\n/**\n * @enum ngf_attachment_store_op\n * \\ingroup ngf\n * Enumerates actions that can be performed on attachment \"store\" (at the end of a render pass).\n */\ntypedef enum ngf_attachment_store_op {\n  /**\n   * \\ingroup ngf\n   *\n   * Don't care what happens. Use this if you don't plan on reading back the\n   * contents of the attachment in any shaders, or presenting it to screen.\n   */\n  NGF_STORE_OP_DONTCARE = 0,\n\n  /**\n   * \\ingroup ngf\n   *\n   * Use this if you plan on reading the contents of the attachment in any shaders or\n   * presenting it to screen. The contents of the attachment shall be written out to system memory.\n   */\n  NGF_STORE_OP_STORE,\n\n  /**\n   * \\ingroup ngf\n   *\n   * Use this to resolve a multisampled color attachment to a corresponding resolve attachment.\n   */\n  NGF_STORE_OP_RESOLVE,\n\n  NGF_STORE_OP_COUNT\n} ngf_attachment_store_op;\n\nstruct ngfi_private_encoder_data {\n  uintptr_t d0;\n  uintptr_t d1;\n};\n\n/**\n * @struct ngf_render_encoder\n * \\ingroup ngf\n *\n * A render encoder records rendering commands (such as draw calls) into its\n * corresponding command buffer.\n */\ntypedef struct ngf_render_encoder {\n  struct ngfi_private_encoder_data pvt_data_donotuse;\n} ngf_render_encoder;\n\n/**\n * @struct ngf_xfer_encoder\n * \\ingroup ngf\n *\n * A transfer encoder records transfer commands (i.e. copying buffer contents)\n * into its corresponding command buffer.\n */\ntypedef struct ngf_xfer_encoder {\n  struct ngfi_private_encoder_data pvt_data_donotuse;\n} ngf_xfer_encoder;\n\n/**\n * @struct ngf_compute_encoder\n * \\ingroup ngf\n *\n * A compute encoder records compute dispatches into its corresponding command buffer.\n */\ntypedef struct ngf_compute_encoder {\n  struct ngfi_private_encoder_data pvt_data_donotuse;\n} ngf_compute_encoder;\n\n/**\n * @struct ngf_render_pass_info\n * \\ingroup ngf\n * Information required to begin a render pass.\n */\ntypedef struct ngf_render_pass_info {\n  /**\n   * A render target that shall be rendered to during this pass.\n   */\n  ngf_render_target render_target;\n\n  /**\n   * A pointer to a buffer of \\ref ngf_load_op enumerators specifying the operation to perform at\n   * the start of the render pass for each attachment of \\ref ngf_render_pass_info::render_target.\n   * The buffer must have at least the same number of elements as there are attachments in the\n   * render target. The `i`th element of the buffer corresponds to the `i`th attachment.\n   */\n  const ngf_attachment_load_op* load_ops;\n\n  /**\n   * A pointer to a buffer of \\ref ngf_store_op enumerators specifying the operation to perform at\n   * the end of the render pass for each attachment of \\ref ngf_render_pass_info::render_target. The\n   * buffer must have at least the same number of elements as there are attachments in the render\n   * target. The `i`th element of the buffer corresponds to the `i`th attachment.\n   */\n  const ngf_attachment_store_op* store_ops;\n\n  /**\n   * If no attachment has a clear as its load op, this field may be NULL.\n   * Otherwise, it shall be a pointer to a buffer of \\ref ngf_clear objects. The buffer must contain\n   * at least as many elements as there are attachments in the render target. The `i`th element of\n   * the buffer corresponds to the `i`th attachment. For attachments that are to be cleared at the\n   * beginning of the pass, the clear values from the corresponding element of the buffer are used.\n   * The rest of the buffer's elements are ignored.\n   */\n  const ngf_clear* clears;\n} ngf_render_pass_info;\n\n/**\n * @struct ngf_xfer_pass_info\n * \\ingroup ngf\n *\n * Information required to begin a transfer pass.\n */\n\ntypedef struct ngf_xfer_pass_info {\n  void* reserved;\n} ngf_xfer_pass_info;\n\n/**\n * @struct ngf_compute_pass_info\n * \\ingroup ngf\n *\n * Information required to begin a compute pass.\n */\ntypedef struct ngf_compute_pass_info {\n  void* reserved;\n} ngf_compute_pass_info;\n\n/**\n * @enum ngf_buffer_storage_type\n * \\ingroup ngf\n * Enumerates types of memory backing a buffer object.\n */\ntypedef enum ngf_buffer_storage_type {\n  /**\n   * \\ingroup ngf\n   * Memory that can be read by the host.\n   */\n  NGF_BUFFER_STORAGE_HOST_READABLE,\n\n  /**\n   * \\ingroup ngf\n   * Memory that can be written to by the host.\n   */\n  NGF_BUFFER_STORAGE_HOST_WRITEABLE,\n\n  /**\n   * \\ingroup ngf\n   * Memory that can be both read from and written to by the\n   * host.\n   */\n  NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE,\n\n  /**\n   * \\ingroup ngf\n   *\n   * Memory that is local to the device (GPU). Normally, this type of storage\n   * isn't accessible directly from the host and the contents of a\n   * buffer backed by this type of memory can only be modified by executing a\n   * \\ref ngf_cmd_copy_buffer.\n   */\n  NGF_BUFFER_STORAGE_DEVICE_LOCAL,\n\n  /**\n   * \\ingroup ngf\n   *\n   * Memory that is both local to the device (GPU) and mappable/writeable directly\n   * from host. This type of storage is available only when the capability\n   * \\ref ngf_device_capabilities::device_local_memory_is_host_visible is supported.\n   * Examples of systems that may support this type of storage are iGPUs or discrete\n   * GPUs with ReBAR enabled.\n   * Using this type of backing storage allows the host to write bytes directly into\n   * the mapped memory, obviating the need for staging buffers in some cases.\n   */\n  NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABLE,\n\n  /**\n   * \\ingroup ngf\n   *\n   * Same as \\ref NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABALE, but additionally allows\n   * the host to read directly from mapped memory.\n   */\n  NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_READABLE_WRITEABLE\n} ngf_buffer_storage_type;\n\n/**\n * @enum ngf_buffer_usage\n * \\ingroup ngf\n * Enumerates the buffer usage flags. A valid buffer usage mask may be formed by combining a subset\n * of these values with a bitwise OR operator.\n */\ntypedef enum ngf_buffer_usage {\n  /** \\ingroup ngf\n   * The buffer may be used as a source for transfer operations. */\n  NGF_BUFFER_USAGE_XFER_SRC = 0x01,\n\n  /** \\ingroup ngf\n   * The buffer may be used as a destination for transfer operations. */\n  NGF_BUFFER_USAGE_XFER_DST = 0x02,\n\n  /** \\ingroup ngf\n   * The buffer may be bound as a uniform buffer. */\n  NGF_BUFFER_USAGE_UNIFORM_BUFFER = 0x04,\n\n  /** \\ingroup ngf\n   * The buffer may be used as the source of index data for indexed draws. */\n  NGF_BUFFER_USAGE_INDEX_BUFFER = 0x08,\n\n  /** \\ingroup ngf\n   * The buffer may be used as a source of vertex attribute data. */\n  NGF_BUFFER_USAGE_VERTEX_BUFFER = 0x10,\n\n  /** \\ingroup ngf\n   * The buffer may be bound as a uniform texel buffer. */\n  NGF_BUFFER_USAGE_TEXEL_BUFFER = 0x20,\n\n  /**\n   * \\ingroup ngf\n   * The buffer may be bound as a storage buffer. */\n  NGF_BUFFER_USAGE_STORAGE_BUFFER = 0x40,\n\n  NGF_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT = 0x80,\n  NGF_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT = 0x100,\n  NGF_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT = 0x200\n\n} ngf_buffer_usage;\n\n/**\n * @struct ngf_buffer_info\n * \\ingroup ngf\n * Information required to create a buffer object.\n */\ntypedef struct ngf_buffer_info {\n  size_t                  size;         /**< The size of the buffer in bytes. */\n  ngf_buffer_storage_type storage_type; /**< Flags specifying the preferred storage type.*/\n  uint32_t                buffer_usage; /**< Flags specifying the intended usage.*/\n} ngf_buffer_info;\n\n/**\n * @struct ngf_buffer\n * \\ingroup ngf\n *\n * An opaque handle to a buffer object.\n */\ntypedef struct ngf_buffer_t* ngf_buffer;\n\n/**\n * @struct ngf_buffer_slice\n * \\ingroup ngf\n *\n * A reference to a subregion of a buffer.\n */\ntypedef struct ngf_buffer_slice {\n  ngf_buffer buffer; /**< The handle of the buffer being referred to. */\n  size_t     offset; /**< Starting offset of the subregion. */\n  size_t     range;  /**< Size of the subregion. */\n} ngf_buffer_slice;\n\n/**\n * @struct ngf_texel_buffer_view\n * \\ingroup ngf\n *\n * GPU programs have to access texel buffers through special \"texel buffer view\" objects which\n * specify the exact format of the data stored in the buffer.\n * See also: \\ref ngf_texel_buffer_view_info, \\ref ngf_create_texel_buffer_view.\n */\ntypedef struct ngf_texel_buffer_view_t* ngf_texel_buffer_view;\n\n/**\n * @struct ngf_texel_buffer_view_info\n *\n * Information required to create a texel buffer view object.\n */\ntypedef struct ngf_texel_buffer_view_info {\n  ngf_buffer       buffer;       /**< The buffer that the view covers. */\n  size_t           offset;       /**< Offset withing the buffer (in bytes) that the view covers. */\n  size_t           size;         /**< The size of the range (in byutes) that the view covers. */\n  ngf_image_format texel_format; /**< The texel format to intepret the buffer contents as. */\n} ngf_texel_buffer_view_info;\n\n/**\n * @struct ngf_buffer_bind_info\n * \\ingroup ngf\n * Specifies a buffer resource bind operation.\n */\ntypedef struct ngf_buffer_bind_info {\n  ngf_buffer buffer; /**< Which buffer to bind.*/\n  size_t     offset; /**< Offset at which to bind the buffer.*/\n  size_t     range;  /**< Bound range.*/\n} ngf_buffer_bind_info;\n\n/**\n * @struct ngf_image_sampler_bind_info\n * \\ingroup ngf\n * Specifies an image and/or sampler resource bind operation. To bind a combined image sampler, both\n * fields have to be set.\n */\ntypedef struct ngf_image_sampler_bind_info {\n  bool is_image_view;\n  union {\n    ngf_image      image;\n    ngf_image_view view;\n  } resource; /**< The image OR image view to bind. Can be NULL if binding just a sampler. */\n  ngf_sampler sampler; /**< The sampler to bind. Can be NULL if binding just an image. */\n} ngf_image_sampler_bind_info;\n\n/**\n * @struct ngf_resource_bind_op\n * \\ingroup ngf\n *\n * Specifies a resource binding operation.\n *\n * The resource binding model in nicegraf is similar to that of Vulkan. Shaders group their\n * resources into \"sets\", and individual slot within thosse sets are referred to as \"bindings\".\n * The main difference in nicegraf is that one does not have to explicitly allocate descriptor pools\n * like in Vulkan. Instead, the application code simply says which set and binding to assign a\n * particular resource to. Internally, some optimization may be performed to avoid redundant binds.\n * For backends that don't have a similar resource binding model (e.g. Metal), a special comment\n * musst be added to the shader code that maps the backend's \"native\" binding model onto this one.\n * See \\ref ngf_shader_stage_info::content for more details on that.\n */\ntypedef struct ngf_resource_bind_op {\n  uint32_t            target_set;     /**< Target set ID. */\n  uint32_t            target_binding; /**< Target binding ID. */\n  ngf_descriptor_type type;           /**< The type of the resource being bound. */\n  union {\n    ngf_buffer_bind_info        buffer;\n    ngf_texel_buffer_view       texel_buffer_view;\n    ngf_image_sampler_bind_info image_sampler;\n    uintptr_t                   acceleration_structure; /**< The opaque handle to the acceleration structure. */\n  } info;               /**< The details about the resource being bound, depending on type. */\n  uint32_t array_index; /**< Specifies the destination array index for bindings that are arrays. */\n} ngf_resource_bind_op;\n\n/**\n * @enum ngf_present_mode\n * \\ingroup ngf\n * Enumerates possible presentation modes.\n * \"Presentation mode\" refers to the particular way the CPU,\n * GPU and the presentation engine interact. Some of the listed presentation modes\n * may not be supported on various backend, hardware or OS combinations. If an\n * unsupported mode is requested, nicegraf silently falls back onto \\ref NGF_PRESENTATION_MODE_FIFO.\n */\ntypedef enum ngf_present_mode {\n  /**\n   * \\ingroup ngf\n   *\n   * This is the only presentation mode that is guaranteed to be supported.\n   * In this mode, the presentation requests are queued internally, and the\n   * presentation engine waits for the vertical blanking signal to present\n   * the image at the front of the queue. This mode guarantees no\n   * frame tearing.\n   */\n  NGF_PRESENTATION_MODE_FIFO,\n\n  /**\n   * \\ingroup ngf\n   *\n   * In this mode, the presentation engine does not wait for the vertical blanking signal, instead\n   * presenting an image immediately. This mode results in lower latency but may induce frame\n   * tearing. It is not recommended to use this mode on mobile targets.\n   */\n  NGF_PRESENTATION_MODE_IMMEDIATE\n} ngf_present_mode;\n\n/**\n * Enumerates color spaces for swapchain images.\n * Check \\ref ngf_device_capabilities::colorspace_support to determine whether a particular color\n * space is supported.\n */\ntypedef enum ngf_colorspace {\n  NGF_COLORSPACE_SRGB_NONLINEAR = 0u,\n  NGF_COLORSPACE_EXTENDED_SRGB_NONLINEAR,\n  NGF_COLORSPACE_EXTENDED_SRGB_LINEAR,\n  NGF_COLORSPACE_DISPLAY_P3,\n  NGF_COLORSPACE_DISPLAY_P3_LINEAR,\n  NGF_COLORSPACE_DCI_P3,\n  NGF_COLORSPACE_ITUR_BT2020,\n  NGF_COLORSPACE_ITUR_BT2100_PQ,\n  NGF_COLORSPACE_COUNT\n} ngf_colorspace;\n\n/**\n * @struct ngf_swapchain_info\n * \\ingroup ngf\n * Swapchain configuration.\n */\ntypedef struct ngf_swapchain_info {\n  ngf_image_format color_format;  /**< Swapchain image format. */\n  ngf_colorspace   colorspace;    /**< Colorspace that the swapchain image uses. */\n  ngf_image_format depth_format;  /**< Format to use for the depth buffer, if set to\n                                     NGF_IMAGE_FORMAT_UNDEFINED, no depth buffer will be created. */\n  ngf_sample_count sample_count;  /**< Number of samples per pixel (0 for non-multisampled) */\n  uint32_t         capacity_hint; /**< Number of images in swapchain (may be ignored)*/\n  uint32_t         width;         /**< Width of swapchain images in pixels. */\n  uint32_t         height;        /**< Height of swapchain images in pixels. */\n  ngf_present_mode present_mode;  /**< Desired present mode. */\n  uintptr_t        native_handle; /**< HWND, ANativeWindow, NSWindow, etc. */\n  bool             enable_compute_access; /**< Whether to enable access to swapchain images from compute stage. */\n} ngf_swapchain_info;\n\n/**\n * @struct ngf_context\n * \\ingroup ngf\n * An opaque handle to a nicegraf rendering context.\n *\n * A context represents the internal state of the library that is required for\n * performing most of the library's functionality. This includes, but is not\n * limited to: presenting rendered content in a window; creating and managing\n * resources, such as images, buffers and command buffers; recording and\n * submitting command buffers.\n *\n * Most operations, with the exception of `ngf_init` and context management\n * functions themelves, require a context to be \"current\" on the calling\n * thread.\n *\n * Invoking `ngf_set_context` will make a context current on the calling\n * thread. Once a context is made current on a thread, it cannot be migrated to\n * another thread.\n *\n * The results of using resources created within one context, in another\n * context are undefined, unless the two contexts are explicitly configured to\n * share data. When contexts are configured as shared, resources created in one\n * can be used in the other, and vice versa. Notably, command buffers created\n * and recorded in one context, can be submitted in another, shared context.\n *\n * A context mainatins exclusive ownership of its swapchain (if it has one),\n * and even shared contexts cannot acquire, present or render to images from\n * that swapchain.\n *\n * See also: \\ref ngf_context_info and \\ref ngf_create_context.\n */\ntypedef struct ngf_context_t* ngf_context;\n\n/**\n * @struct ngf_context_info\n * \\ingroup ngf\n * Configures a nicegraf rendering context.\n */\ntypedef struct ngf_context_info {\n  /**\n   * Configures the swapchain that the context will be presenting to. This\n   * can be NULL if all rendering is done off-screen and the context never\n   * presents to a window.\n   */\n  const ngf_swapchain_info* swapchain_info;\n\n  /**\n   * A reference to another context; the newly created context shall be able to use the resources\n   * (such as buffers and images) created within the given context, and vice versa Can be NULL.\n   */\n  const ngf_context shared_context;\n} ngf_context_info;\n\n/**\n * @struct ngf_cmd_buffer_info\n * \\ingroup ngf\n * Information about a command buffer.\n */\ntypedef struct ngf_cmd_buffer_info {\n  uint32_t reserved;\n} ngf_cmd_buffer_info;\n\n/**\n * @struct ngf_cmd_buffer\n * \\ingroup ngf\n * Encodes a series of rendering commands.\n *\n * Internally, a command buffer may be in any of the following five states:\n *   - new;\n *   - ready;\n *   - recording;\n *   - awaiting submission;\n *   - submitted.\n *\n * Every newly created command buffer is in the \"new\" state. It can be\n * transitioned to the \"ready\" state by calling \\ref ngf_start_cmd_buffer on it.\n *\n * When a command buffer is in the \"ready\" state, you may begin recording a new\n * series of rendering commands into it.\n *\n * Recording commands into a command buffer is performed using command\n * encoders. There are a few different types of encoders, supporting different\n * types of commands.\n *\n * A new encoder may be created for a command buffer only if the command buffer\n * is in either the \"ready\" or the \"awaiting submission\" state.\n *\n * Creating a new encoder for a command buffer transitions that command buffer\n * to the \"recording\" state.\n *\n * Finishing and disposing of an active encoder transitions its corresponding\n * command buffer into the \"awaiting submission\" state.\n *\n * The three rules above mean that a command buffer may not have more than\n * one encoder active at a given time.\n *\n * Once all of the desired commands have been recorded, and the command buffer\n * is in the \"awaiting submission\" state, the command buffer may be submitted\n * for execution via a call to \\ref ngf_submit_cmd_buffers, which transitions it\n * into the \"submitted\" state.\n *\n * Submission may only be performed on command buffers that are in the\n * \"awaiting submission\" state.\n *\n * Once a command buffer is in the \"submitted\" state, it is\n * impossible to append any new commands to it.\n * It is, however, possible to begin recording a new, completely separate batch\n * of commands by calling \\ref ngf_start_cmd_buffer which implicitly\n * transitions the buffer to the \"ready\" state if it is already \"submitted\".\n * This does not affect any previously submitted commands.\n *\n * Calling a command buffer function on a buffer that is in a state not\n * expected by that function will result in an error. For example, calling\n * \\ref ngf_submit_cmd_buffers would produce an error on a buffer that is in\n * the \"ready\" state, since, according to the rules outlined above,\n * \\ref ngf_submit_cmd_buffers expects command buffers to be in the \"awaiting\n * submission\" state.\n *\n */\ntypedef struct ngf_cmd_buffer_t* ngf_cmd_buffer;\n\n/**\n * @typedef ngf_frame_token\n * \\ingroup ngf\n * A token identifying a frame of rendering. See \\ref ngf_begin_frame and \\ref ngf_end_frame for\n * details.\n */\ntypedef uintptr_t ngf_frame_token;\n\n/**\n * This is a special value used within the \\ref ngf_device_capabilities structure\n * to indicate that a limit value (i.e. max texture size) is not known or not\n * relevant for the current backend.\n */\n#define NGF_DEVICE_LIMIT_UNKNOWN (~0u)\n\n/**\n * @struct ngf_device_capabilities\n * \\ingroup ngf\n * Contains information about various device features, limits, etc. Clients\n * shouldn't instantiate this structure. See \\ref ngf_get_device_capabilities.\n */\ntypedef struct ngf_device_capabilities {\n  /**\n   * When binding uniform buffers, the specified offset must be\n   * a multiple of this number.\n   */\n  size_t uniform_buffer_offset_alignment;\n\n  /**\n   * When binding storage buffers, the specified offset must be a multiple of this number.\n   */\n  size_t storage_buffer_offset_alignment;\n\n  /**\n   * When binding a uniform buffer, the specified range must not exceed\n   * this value.\n   */\n  size_t max_uniform_buffer_range;\n\n  /**\n   * When binding texel buffers, the specified offset must be\n   * a multiple of this number.\n   */\n  size_t texel_buffer_offset_alignment;\n\n  /**\n   * The maximum allowed number of vertex attributes per pipeline.\n   */\n  size_t max_vertex_input_attributes_per_pipeline;\n\n  /**\n   * The maximum allowed number of sampled images (textures) per single\n   * shader stage. Descriptors with type \\ref NGF_DESCRIPTOR_IMAGE_AND_SAMPLER\n   * and \\ref NGF_DESCRIPTOR_TEXEL_BUFFER do count against this limit.\n   */\n  size_t max_sampled_images_per_stage;\n\n  /**\n   * The maximum allowed number of sampler objects per single shader stage.\n   * Descriptors with type \\ref NGF_DESCRIPTOR_IMAGE_AND_SAMPLER do count against\n   * this limit.\n   */\n  size_t max_samplers_per_stage;\n\n  /**\n   * The maximum allowed number of uniform buffers per single shader stage.\n   */\n  size_t max_uniform_buffers_per_stage;\n\n  /**\n   * This is the maximum number of _components_, across all inputs, for the fragment\n   * stage. \"Input component\" refers to the individual components of an input vector.\n   * For example, if the fragment stage has a single float4 input (vector of 4 floats),\n   * then it has 4 input components.\n   */\n  size_t max_fragment_input_components;\n\n  /**\n   * This is the maximum number of inputs for the fragment stage.\n   */\n  size_t max_fragment_inputs;\n\n  /**\n   * Maximum allowed width of a 1D image.\n   */\n  size_t max_1d_image_dimension;\n\n  /**\n   * Maximum allowed width, or height of a 2D image.\n   */\n  size_t max_2d_image_dimension;\n\n  /**\n   * Maximum allowed width, height, or depth of a 3D image.\n   */\n\n  size_t max_3d_image_dimension;\n\n  /**\n   * Maximum allowed width, or height of a cubemap.\n   */\n  size_t max_cube_image_dimension;\n\n  /**\n   * Maximum allowed number of layers in an image.\n   */\n  size_t max_image_layers;\n\n  /**\n   * Maximum number of color attachments that can be written to\n   * during a render pass.\n   */\n  size_t max_color_attachments_per_pass;\n\n  /**\n   * The maximum degree of sampler anisotropy.\n   */\n  float max_sampler_anisotropy;\n\n  /**\n   * This flag is set to `true` if the platform supports [0; 1]\n   * range for the clip-space z coordinate. nicegraf enforces clip-space\n   * z to be in this range on all backends that support it. This ensures\n   * better precision for near-field objects.\n   * See the following for an in-depth explanation:\n   * http://web.archive.org/web/20210829130722/https://developer.nvidia.com/content/depth-precision-visualized\n   */\n  bool clipspace_z_zero_to_one;\n\n  /**\n   * This flag is set to true if the device supports cubemap arrays.\n   */\n  bool cubemap_arrays_supported;\n\n  /**\n   * Bitmap representing multisample count support for framebuffer color attachments\n   * For example, (framebuffer_color_sample_counts & 16) indicates support for 16 samples\n   */\n  size_t framebuffer_color_sample_counts;\n\n  /**\n   * The highest supported sample count for framebuffer color attachments.\n   * This value is derived from \\ref framebuffer_color_sample_counts.\n   */\n  ngf_sample_count max_supported_framebuffer_color_sample_count;\n\n  /**\n   * Bitmap representing multisample count support for framebuffer depth attachments\n   * For example, (framebuffer_depth_sample_counts & 16) indicates support for 16 samples\n   */\n  size_t framebuffer_depth_sample_counts;\n\n  /**\n   * The highest supported sample count for framebuffer depth attachments.\n   * This value is derived from \\ref framebuffer_depth_sample_counts.\n   */\n  ngf_sample_count max_supported_framebuffer_depth_sample_count;\n\n  /**\n   * Bitmap representing multisample count support for color textures\n   * For example, (texture_color_sample_counts & 16) indicates support for 16 samples\n   */\n  size_t texture_color_sample_counts;\n\n  /**\n   * The highest supported sample count for color textures.\n   * This value is derived from \\ref texture_color_sample_counts.\n   */\n  ngf_sample_count max_supported_texture_color_sample_count;\n\n  /**\n   * Bitmap representing multisample count support for depth textures\n   * For example, (texture_depth_sample_counts & 16) indicates support for 16 samples\n   */\n  size_t texture_depth_sample_counts;\n\n  /**\n   * The highest supported sample count for depth textures.\n   * This value is derived from \\ref texture_depth_sample_counts.\n   */\n  ngf_sample_count max_supported_texture_depth_sample_count;\n\n  /**\n   * Indicates whether the device-local storage is also host visible.\n   * Examples of cases where this may be supported are iGPU systems with unified memory,\n   * or discrete GPUs with ReBAR enabled.\n   * On systems with this capability, device-local storage can be mapped directly into\n   * the host address space, removing the need for host-visible staging buffers in certain\n   * cases.\n   */\n  bool device_local_memory_is_host_visible;\n\n  /**\n   * Indicates whether the device is capable of inline raytracing.\n   */\n  bool supports_inline_raytracing;\n\n} ngf_device_capabilities;\n\n/**\n * Maximum length of a device's name.\n * \\ingroup ngf\n */\n#define NGF_DEVICE_NAME_MAX_LENGTH (256u)\n\n/**\n * @struct ngf_device\n * Information about a rendering device.\n * See also: \\ref ngf_get_device_list\n * \\ingroup ngf\n */\ntypedef struct ngf_device {\n  ngf_device_performance_tier performance_tier; /**< Device's performance tier. */\n  ngf_device_handle           handle; /**< A handle to be passed to \\ref ngf_initialize. */\n\n  /**\n   * A string associated with the device. This is _not_ guaranteed to be unique per device.\n   */\n  char name[NGF_DEVICE_NAME_MAX_LENGTH];\n\n  ngf_device_capabilities capabilities; /**< Device capabilities and limits. */\n} ngf_device;\n\n/**\n * @struct ngf_image_write\n * Specifies an operation writing data from a source buffer into a mip level of an image.\n *\n * See \\ref ngf_cmd_write_image.\n */\ntypedef struct ngf_image_write {\n  size_t       src_offset;     /** < Data offset in the source buffer. */\n  ngf_offset3d dst_offset;     /** < Offset in texels of the subregion to write. */\n  ngf_extent3d extent;         /** < Size in texels of the subregion to write. */\n  uint32_t     dst_level;      /** < Destination mip level. */\n  uint32_t     dst_base_layer; /** < Starting destination layer. */\n  uint32_t     nlayers;        /** < Number of layers to copy for the specified mip level. */\n} ngf_image_write;\n\n#ifdef _MSC_VER\n#pragma endregion\n\n#pragma region ngf_function_declarations\n#endif\n\n/**\n * \\ingroup ngf\n *\n * Obtains a list of rendering devices available to nicegraf.\n *\n * This function is not thread-safe.\n * The devices are not returned in any particular order, and the order is not guaranteed to be the\n * same every time the function is called.\n * @param devices pointer to a pointer to `const` \\ref ngf_device. If not `NULL`, this will be\n * populated with a pointer to an array of \\ref ngf_device instances, each containing data about a\n * rendering device available to the system. Callers should not attempt to free the returned\n * pointer.\n * @param ndevices pointer to a `uint32_t`. If not NULL, the number of available rendering devices\n * shall be written to the memory pointed to by this parameter.\n */\nngf_error ngf_get_device_list(const ngf_device** devices, uint32_t* ndevices) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Initializes nicegraf.\n *\n * The client should call this function only once during the\n * entire lifetime of the application. This function is not thread safe.\n * @param init_info Initialization parameters.\n */\nngf_error ngf_initialize(const ngf_init_info* init_info) NGF_NOEXCEPT;\n\n/*\n * \\ingroup ngf\n *\n * De-initializes nicegraf.\n *\n * The client should call this function only once during the\n * entire lifetime of the application. Must be called after\n * \\ref ngf_initialize and after \\ref ngf_destroy_context has\n * been called on every initialized \\ref ngf_context.\n */\nvoid ngf_shutdown() NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Creates a new \\ref ngf_context.\n *\n * @param info The context configuration.\n */\nngf_error ngf_create_context(const ngf_context_info* info, ngf_context* result) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Destroys the given \\ref ngf_context.\n *\n * @param ctx The context to destroy.\n */\nvoid ngf_destroy_context(ngf_context ctx) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Adjust the images associated with the given context's swapchain.\n\n * This function must be called every time that the window the context's presenting to is resized.\n * It is up to the client application to detect the resize events and call this function.\n * Not calling this function on resize results in undefined behavior.\n *\n * @param ctx The context to operate on\n * @param new_width New window client area width in pixels\n * @param new_height New window client area height in pixels\n */\nngf_error ngf_resize_context(ngf_context ctx, uint32_t new_width, uint32_t new_height) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Set the given nicegraf context as current for the calling thread.\n *\n * All subsequent rendering operations invoked from the calling thread shall affect\n * the given context.\n *\n * Once a context has been set as current on a thread, it cannot be migrated to\n * another thread.\n *\n * @param ctx The context to set as current.\n */\nngf_error ngf_set_context(ngf_context ctx) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n * Get the active nicegraf context associated with the calling thread.\n *\n * Returns NULL if no context associated with the calling thread exists.\n */\nngf_context ngf_get_context() NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Begin a frame of rendering.\n *\n * This function starts a frame of rendering in the calling thread's current context.\n * It generates a special token associated with the frame, which is required for recording\n * command buffers (see \\ref ngf_start_cmd_buffer).\n * @param token A pointer to a \\ref ngf_frame_token. The generated frame token shall be returned\n * here.\n */\nngf_error ngf_begin_frame(ngf_frame_token* token) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * End the current frame of rendering on the calling thread's context.\n *\n * @param token The frame token generated by the corresponding preceding call to \\ref\n * ngf_begin_frame.\n */\nngf_error ngf_end_frame(ngf_frame_token token) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Obtain a handle to the current swapchain image.\n *\n * The obtained handle should not be destroyed, or persisted across frames by the calling code.\n * Only use it to bind the current swapchain image as a resource accessed from the compute stage.\n *\n * @param token The frame token generated by the last call to \\ref ngf_begin_frame.\n * @param result The pointer to the swapchain image handle shall be written here.\n */\nngf_error ngf_get_current_swapchain_image(ngf_frame_token token, ngf_image* result) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * @return A pointer to an \\ref ngf_device_capabilities instance, or NULL, if no context is present\n *         on the calling thread.\n */\nconst ngf_device_capabilities* ngf_get_device_capabilities(void) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Creates a new shader stage object.\n *\n * @param stages Information required to construct the shader stage object.\n * @param result Pointer to where the handle to the newly created object will be returned.\n */\nngf_error\nngf_create_shader_stage(const ngf_shader_stage_info* info, ngf_shader_stage* result) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Destroys the given shader stage.\n *\n * @param stage The handle to the shader stage object to be destroyed.\n */\nvoid ngf_destroy_shader_stage(ngf_shader_stage stage) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Creates a new graphics pipeline object.\n *\n * @param info Information required to construct the graphics pipeline object.\n * @param result Pointer to where the handle to the newly created object will be returned.\n */\nngf_error ngf_create_graphics_pipeline(\n    const ngf_graphics_pipeline_info* info,\n    ngf_graphics_pipeline*            result) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Destroys the given graphics pipeline object.\n *\n * @param pipeline The handle to the pipeline object to be destroyed.\n */\nvoid ngf_destroy_graphics_pipeline(ngf_graphics_pipeline pipeline) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Creates a new compute pipeline object.\n *\n * @param info Information required to construct the compute pipeline object.\n * @param result Pointer to where the handle to the newly created object will be returned.\n */\nngf_error ngf_create_compute_pipeline(\n    const ngf_compute_pipeline_info* info,\n    ngf_compute_pipeline*            result) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Destroys the given compute pipeline object.\n *\n * @param pipeline The handle to the pipeline object to be destroyed.\n */\nvoid ngf_destroy_compute_pipeline(ngf_compute_pipeline pipeline) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Creates a new image object.\n *\n * @param info Information required to construct the image object.\n * @param result Pointer to where the handle to the newly created object will be returned.\n */\nngf_error ngf_create_image(const ngf_image_info* info, ngf_image* result) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Destroys the given image object.\n *\n * @param image The handle to the image object to be destroyed.\n */\nvoid ngf_destroy_image(ngf_image image) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Creates a new image view object.\n *\n * @param info Information required to construct the image view object.\n * @param result Pointer to where the handle to the newly created object will be returned.\n */\nngf_error\nngf_create_image_view(const ngf_image_view_info* info, ngf_image_view* result) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Destroys the given image view object.\n *\n * @param image The handle to the image view object to be destroyed.\n */\nvoid ngf_destroy_image_view(ngf_image_view image_view) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Creates a new sampler object.\n *\n * @param info Information required to construct the sampler object.\n * @param result Pointer to where the handle to the newly created object will be returned.\n */\nngf_error ngf_create_sampler(const ngf_sampler_info* info, ngf_sampler* result) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Destroys the given sampler object.\n *\n * @param ssampler The handle to the sampler object to be destroyed.\n */\nvoid ngf_destroy_sampler(ngf_sampler sampler) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Create a new rendertarget object.\n *\n * @param info Information required to construct the rendertarget object.\n * @param result Pointer to where the handle to the newly created object will be returned.\n */\nngf_error ngf_create_render_target(const ngf_render_target_info* info, ngf_render_target* result)\n    NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Destroys the given render target.\n *\n * @param rendertarget The handle to the rendertarget object to be destroyed.\n */\nvoid ngf_destroy_render_target(ngf_render_target rendertarget) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Returns the handle to the \\ref ngf_render_target associated with the the current context's\n * swapchain (aka the default render target). If the current context does not have a swapchain, the\n * result shall be null. Otherwise, it shall be a render target that has a color attachment\n * associated with the context's swapchain. If the swapchain was created with an accompanying depth\n * buffer, the render target shall have an attachment for that as well.\n *\n * The caller should not attempt to destroy the returned render target. It shall\n * be destroyed automatically, together with the parent context.\n */\nngf_render_target ngf_default_render_target() NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Returns the attachment descriptions for the default render target. The caller should not attempt\n * to free the returned pointer or modify the contents of the memory it points to.\n */\nconst ngf_attachment_descriptions* ngf_default_render_target_attachment_descs() NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Creates a new buffer object.\n *\n * @param info Information required to construct the buffer object.\n * @param result Pointer to where the handle to the newly created object will be written to.\n */\nngf_error ngf_create_buffer(const ngf_buffer_info* info, ngf_buffer* result) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Destroys the given buffer object.\n *\n * @param buffer The handle to the buffer object to be destroyed.\n */\nvoid ngf_destroy_buffer(ngf_buffer buffer) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Maps a region of a given buffer to host memory.\n *\n * It is an error to bind a buffer that is currently mapped using any command. If a buffer that\n * needs to be bound is mapped, first call \\ref ngf_buffer_flush_range to ensure any new data in the\n * mapped range becomes visible to the subsequent commands, then call \\ref ngf_buffer_unmap. Writing\n * into any region that could be in use by previously submitted commands results in undefined\n * behavior.\n *\n * @param buf The handle to the buffer to be mapped.\n * @param offset The offset at which the mapped region starts, in bytes. It must\n *               satisfy platform-specific alignment requirements. See, for example, \\ref\n *               ngf_device_capabilities::uniform_buffer_offset_alignment and \\ref\n *               ngf_device_capabilities::texel_buffer_offet_alignment.\n * @param size  The size of the mapped region, in bytes.\n * @param flags A combination of flags from \\ref ngf_buffer_map_flags.\n * @return A pointer to the mapped memory, or NULL if the buffer could not be mapped.\n */\nvoid* ngf_buffer_map_range(ngf_buffer buf, size_t offset, size_t size) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Ensures that any writes performed by the CPU into the mapped range are be visible to subsequently\n * submitted rendering commands executed by the rendering device.\n * @param ptr The handle to the buffer that needs to be flushed.\n * @param offset The offset, relative to the start of the mapped range, at which\n *               the flushed region starts, in bytes.\n * @param size  The size of the flushed region, in bytes.\n */\nvoid ngf_buffer_flush_range(ngf_buffer buf, size_t offset, size_t size) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Unmaps a previously mapped buffer.\n *\n * If multiple regions were mapped, all of them are unmapped. Any pointers returned by prior calls\n * to \\ref ngf_buffer_map_range are invalidated.\n *\n * @param buf The buffer that needs to be unmapped.\n */\nvoid ngf_buffer_unmap(ngf_buffer buf) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n * Creates a new texel buffer view object.\n *\n * @param info Information required to construct the texel buffer view object.\n * @param result Pointer to where the handle to the newly created object will be written to.\n */\nngf_error ngf_create_texel_buffer_view(\n    const ngf_texel_buffer_view_info* info,\n    ngf_texel_buffer_view*            result) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Destroys the given texel buffer view object.\n *\n * @param buffer The handle to the texel buffer view object to be destroyed.\n */\nvoid ngf_destroy_texel_buffer_view(ngf_texel_buffer_view buf_view) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Waits for all pending rendering commands to complete.\n *\n * Do not use this function lightly. It is expensive because it introduces a sync point between the\n * CPU and the rendering device.\n */\nvoid ngf_finish(void) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n * Maximum size, in bytes, of the inline data block that may be set on an encoder via\n * \\ref ngf_set_bytes / \\ref ngf_set_compute_bytes. Matches Vulkan's portability floor for\n * push constants. Every pipeline created by nicegraf reserves a push-constant range of\n * this size, free for any shader to consume via `[[vk::push_constant]]`.\n */\n#define NGF_MAX_ENCODER_INLINE_BYTES 128u\n\n/**\n * \\ingroup ngf\n * Sets a small inline data block visible to subsequent draws in the underlying\n * command buffer. May be called before or after binding a pipeline; pushed\n * values persist across pipeline binds within the same encoder.\n *\n * `size_bytes` must be <= \\ref NGF_MAX_ENCODER_INLINE_BYTES and a multiple of 4.\n * Returns \\ref NGF_ERROR_INVALID_SIZE if either constraint is violated.\n * `data == NULL` or `size_bytes == 0` is a silent no-op.\n */\nngf_error ngf_set_bytes(ngf_render_encoder enc, const void* data, size_t size_bytes) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n * Compute counterpart of \\ref ngf_set_bytes.\n */\nngf_error ngf_set_compute_bytes(ngf_compute_encoder enc, const void* data, size_t size_bytes) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Marks the given resources as \"read-only\". Once a resource has been marked as read-only,\n * nicegraf's internal hazard-tracking operations may be omitted for it, improving CPU\n * performance. Performing any modifying operations on a resource that had previously been\n * marked as \"read-only\" results in undefined behaviour.\n * \n * @param img A pointer to an array of handles to images, which are to be marked as read-only.\n * @param nimgs The number of images to be marked as read-only.\n * @param bufs A pointer to an array of handles to buffers, which are to be marked as read-only.\n * @param nbufs The number of buffers to be marked as read-only.\n */\nvoid ngf_mark_read_only(ngf_image* imgs, uint32_t nimgs, ngf_buffer* bufs, uint32_t nbufs) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Creates a new command buffer.\n *\n * @param info The information required to create the new command buffer.\n * @param result Pointer to where the handle to the newly created command buffer will be returned.\n */\nngf_error\nngf_create_cmd_buffer(const ngf_cmd_buffer_info* info, ngf_cmd_buffer* result) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Destroys the given command buffer.\n *\n * If there is any work submitted via the given command buffer still pending on the rendering\n * device, it shall be executed asynchronously. Therefore, application code doesn't need to wait for\n * the commands associated with the command buffer to finish before it can safely dispose of the\n * command buffer.\n *\n * @param buffer The handle to the command buffer object to be destroyed.\n */\nvoid ngf_destroy_cmd_buffer(ngf_cmd_buffer buffer) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Resets the command buffer.\n *\n * Erases all the commands previously recorded into the given command buffer,\n * and prepares it for recording commands to be submitted within the frame\n * identified by the specified token.\n *\n * The command buffer is required to be in the \"ready\" state.\n *\n * @param buf The handle to the command buffer to operate on\n * @param token The token for the frame within which the recorded commands are going to be\n *              submitted.\n */\nngf_error ngf_start_cmd_buffer(ngf_cmd_buffer buf, ngf_frame_token token) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Submits the commands recorded in the given command buffers for execution.\n * All command buffers must be in the \"awaiting submission\" state, and shall be transitioned to the\n * \"submitted\" state.\n *\n * @param nbuffers The number of command buffers being submitted for execution.\n * @param bufs A pointer to a contiguous array of \\ref nbuffers handles to command buffer objects to\n *             be submitted for execution.\n */\nngf_error ngf_submit_cmd_buffers(uint32_t nbuffers, ngf_cmd_buffer* bufs) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Begins a new render pass.\n\n * A render pass can be thought of as a sequence of rendering commands associated with a particular\n * render target. At the start of the pass, an \"load operation\" is performed for each attachment.\n The application code\n * may specify exactly what load operations to perform for each individual attachment. After that,\n all the\n * rendering commands are executed. Finally, at the end of the pass, a \"store operation\" is\n performed for each attachment.\n * Again, the application code may specify exactly which store operations to perform for each\n individual attachment.\n * @param buf The command buffer to operate on. Must be in the \"ready\" state, shall be transitioned\n *            to the \"recording\" state.\n * @param pass_info Specifies the renderpass parameters, such as load and store operations.\n * @param enc Pointer to memory into which a handle to a render encoder will be returned. All the\n *            commands associated with the renderpass must be recorder using that encoder.\n */\nngf_error ngf_cmd_begin_render_pass(\n    ngf_cmd_buffer              buf,\n    const ngf_render_pass_info* pass_info,\n    ngf_render_encoder*         enc) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Similar to \\ref ngf_cmd_begin_render_pass, but with some choices pre-made:\n *   - All color attachments of the render target are cleared to the specified color.\n *   - Depth and stencil attachments are cleared to the specified respective values (if they are\n * present).\n *   - The store action for any attachment that is not marked as \"sampled from\" (see \\ref\n *     ngf_attachment_description::is_sampled), is set to NGF_STORE_OP_DONTCARE.\n *   - The store action for attachments marked as \"sampled from\", is set to NGF_STORE_OP_STORE.\n * @param buf The command buffer to operate on.\n * @param rt The handle to the render target to use for the pass.\n * @param clear_color_r The red component of the clear color to be used on color attachments.\n *                      Ignored for attachments that don't have that channel.\n * @param clear_color_g The red component of the clear color to be used on color attachments.\n *                      Ignored for attachments that don't have that channel.\n * @param clear_color_b The red component of the clear color to be used on color attachments.\n *                      Ignored for attachments that don't have that channel.\n * @param clear_color_a The red component of the clear color to be used on color attachments.\n *                      Ignored for attachments that don't have that channel.\n * @param clear_depth The value to clear the depth attachment to (if it is present).\n * @param clear_stencil The value to clear the stencil attachment to (if it is present).\n */\nngf_error ngf_cmd_begin_render_pass_simple(\n    ngf_cmd_buffer      buf,\n    ngf_render_target   rt,\n    float               clear_color_r,\n    float               clear_color_g,\n    float               clear_color_b,\n    float               clear_color_a,\n    float               clear_depth,\n    uint32_t            clear_stencil,\n    ngf_render_encoder* enc) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Ends a render pass.\n *\n * Disposes of the given render command encoder, transitioning its corresponding\n * command buffer to the \"ready\" state.\n */\nngf_error ngf_cmd_end_render_pass(ngf_render_encoder enc) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Begins a transfer pass.\n *\n * A transfer pass is a sequence of commands that copy data.\n *\n * @param buf The handle to the command buffer to operate on. Must be in the \"ready\"\n *            state, will be transitioned to the \"recording\" state.\n * @param pass_info Pointer to \\ref ngf_xfer_pass_info specifying details about this transfer pass.\n * @param enc Pointer to memory where a handle to a transfer encoder shall be returned. All commands\n *            associated with the transfer pass must be recorded using that encoder.\n */\nngf_error ngf_cmd_begin_xfer_pass(\n    ngf_cmd_buffer            buf,\n    const ngf_xfer_pass_info* pass_info,\n    ngf_xfer_encoder*         enc) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Ends a transfer pass.\n *\n * Disposes of the given transfer cmd encoder, transitioning its corresponding\n * command buffer to the \"ready\" state.\n */\nngf_error ngf_cmd_end_xfer_pass(ngf_xfer_encoder enc) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Begins a compute pass.\n *\n * @param buf The handle of the command buffer to operate on. Must be in the \"ready\"\n *             state, will be transitioned to the \"recording\" state.\n * @param pass_info A pointer to \\ref ngf_compute_pass_info specifying details about this compute\n * pass.\n * @param enc Pointer to memory where a handle to a transfer encoder shall be returned. All commands\n *            associated with the transfer pass must be recorded using that encoder.\n */\nngf_error ngf_cmd_begin_compute_pass(\n    ngf_cmd_buffer               buf,\n    const ngf_compute_pass_info* pass_info,\n    ngf_compute_encoder*         enc) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Ends a compute pass.\n *\n * Disposes of the given compute cmd encoder, transitioning its corresponding\n * command buffer to the \"ready\" state.\n */\nngf_error ngf_cmd_end_compute_pass(ngf_compute_encoder enc) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Binds a graphics pipeline.\n */\nvoid ngf_cmd_bind_gfx_pipeline(ngf_render_encoder buf, ngf_graphics_pipeline pipeline) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Binds a compute pipeline.\n */\nvoid ngf_cmd_bind_compute_pipeline(ngf_compute_encoder buf, ngf_compute_pipeline pipeline)\n    NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Sets the viewport to be used in subsequent rendering commands.\n * The viewport defines a region of the destination framebuffer that the resulting rendering\n * is scaled to fit into.\n */\nvoid ngf_cmd_viewport(ngf_render_encoder buf, const ngf_irect2d* r) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Sets the scissor region to be used in the subsequent rendering commands.\n * The scissor defines a region of the framebuffer that can be affected by the rendering commands.\n * Any pixels outside of that region are not written to.\n */\nvoid ngf_cmd_scissor(ngf_render_encoder enc, const ngf_irect2d* r) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Sets the reference value to be used in stencil tests.\n */\nvoid ngf_cmd_stencil_reference(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Set the compare mask to be used in stencil tests.\n */\nvoid ngf_cmd_stencil_compare_mask(ngf_render_encoder enc, uint32_t front, uint32_t back)\n    NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Sets the stencil write mask.\n */\nvoid ngf_cmd_stencil_write_mask(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Configures a bias value to be added to the depth of each rasterized fragment.\n * Unclamped bias `b` is computed as follows:\n *\n *  `b = const_scale * r + max_slope * slope_scale`\n *\n * where:\n *  - `r` is a constant value dependent on the format of the depth buffer and other factors,\n * representing the minimum absolute difference between two rasterized depth values.\n *  - `max_slope` is ideally the length of the depth function's gradient vector at the point\n * corresponding to the fragment (but can be approximated by `max(|dZ/dx|, |dZ/dy|)`.\n *\n * The final bias `B`, which is added to the fragment depth, is computed as follows:\n *\n * `B = clamp > 0.0f ? min(clamp, b) : (clamp < 0.0f ? max(clamp, b) : b)`\n *\n * Requires the bound pipeline to have depth bias enabled to have effect.\n * See \\ref ngf_rasterization_info::enable_depth_bias.\n */\nvoid ngf_cmd_set_depth_bias(\n    ngf_render_encoder enc,\n    float              const_scale,\n    float              slope_scale,\n    float              clamp) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Bind resources for shaders to access. See ngf_resource_bind_op for more information.\n *\n * @param enc The handle to the render encoder object to record the command into.\n * @param bind_operations A pointer to a contiguous array of \\ref ngf_resource_bind_op objects.\n * @param nbinds The number of elements in the array pointed to by \\ref bind_operations.\n */\nvoid ngf_cmd_bind_resources(\n    ngf_render_encoder          enc,\n    const ngf_resource_bind_op* bind_operations,\n    uint32_t                    nbind_operations) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Bind resources for shaders to access. See ngf_resource_bind_op for more information.\n *\n * @param enc The handle to the render encoder object to record the command into.\n * @param bind_operations A pointer to a contiguous array of \\ref ngf_resource_bind_op objects.\n * @param nbinds The number of elements in the array pointed to by \\ref bind_operations.\n */\nvoid ngf_cmd_bind_compute_resources(\n    ngf_compute_encoder         enc,\n    const ngf_resource_bind_op* bind_operations,\n    uint32_t                    nbind_operations) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Binds a vertex attribute buffer to be used in the next draw.\n *\n * @param enc The handle to the render encoder.\n * @param vbuf The handle to the vertex buffer to bind.\n * @param binding The vertex buffer binding ID to bind the buffer to.\n * @param offset The offset (in bytes) to bind at.\n */\nvoid ngf_cmd_bind_attrib_buffer(\n    ngf_render_encoder enc,\n    ngf_buffer         vbuf,\n    uint32_t           binding,\n    size_t             offset) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Binds an index buffer to be used in the next draw.\n *\n * @param enc The handle to the render encoder.\n * @param idxbuf The handle to the index buffer to bind.\n * @param offset The offset at which to bind the buffer (in bytes).\n * @param index_type The type of values that are stored in the index buffer. Can be either \\ref\n *                   NGF_TYPE_UINT32 or \\ref NGF_TYPE_UINT16.\n */\nvoid ngf_cmd_bind_index_buffer(\n    ngf_render_encoder enc,\n    ngf_buffer         idxbuf,\n    size_t             offset,\n    ngf_type           index_type) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Executes a draw.\n * This command is not supported by compute-type command buffers.\n *\n * @param enc The render encoder to record the command into.\n * @param indexed Indicates whether the draw uses an index buffer or not.\n * @param first_element Offset of the first vertex.\n * @param nelements Number of vertices to process.\n * @param ninstance Number of instances (use `1` for regular non-instanced draws).\n */\nvoid ngf_cmd_draw(\n    ngf_render_encoder enc,\n    bool               indexed,\n    uint32_t           first_element,\n    uint32_t           nelements,\n    uint32_t           ninstances) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Encodes a compute shader dispatch.\n * This command is not supported by draw-type buffers.\n *\n * @param enc The encoder to record the command into.\n * @param x_threadgroups Number of threadgroups along the X dimension of the grid.\n * @param y_threadgroups Number of threadgroups along the Y dimension of the grid.\n * @param z_threadgroups Number of threadgroups along the Z dimension of the grid.\n */\nvoid ngf_cmd_dispatch(\n    ngf_compute_encoder enc,\n    uint32_t            x_threadgroups,\n    uint32_t            y_threadgroups,\n    uint32_t            z_threadgroups) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Copies data between buffers.\n *\n * @param enc The handle to the transfer encoder object to record the command into.\n * @param src The handle to the buffer object to be copied from.\n * @param dst The handle to the buffer object to be copied into.\n * @param size The size of the copied region, in bytes.\n * @param src_offset The offset in the source buffer to copy from.\n * @param dst_offset The offset in the destination buffer to copy into.\n */\nvoid ngf_cmd_copy_buffer(\n    ngf_xfer_encoder enc,\n    ngf_buffer       src,\n    ngf_buffer       dst,\n    size_t           size,\n    size_t           src_offset,\n    size_t           dst_offset) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Copies data from a buffer into an image.\n *\n * For non-compressed formats, the source data is assumed to be arranged in a simple linear layout.\n * Cubemap faces and layers are assumed to be stored successively in the source buffer, from first\n * to last. For each layer, the first texel corresponds to the lower left corner of the image, and\n * the subsequent texels progress from left to right, through the remainder of the bottom row, and\n * from then on, through higher rows.\n *\n * @param enc The handle to the transfer encoder object to record the command into.\n * @param src The handle to the buffer object to be copied from.\n * @param dst The image that the data from the buffer shall be written into.\n * @param writes A pointer to an array of \\ref ngf_image_write objects, each describing a write to a\n * mip level of the image to be written.\n * @param nwrites Number of objects in the `writes` array.\n */\nvoid ngf_cmd_write_image(\n    ngf_xfer_encoder       enc,\n    ngf_buffer             src,\n    ngf_image              dst,\n    const ngf_image_write* writes,\n    uint32_t               nwrites) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Copies data from an image to a buffer.\n *\n * @param enc The handle to the transfer encoder object to record the command into.\n * @param src Reference to the image region that shall be copied from.\n * @param src_offset The offset in the source image from which to start copying.\n * @param extent The size of the region in the source mip level being copied.\n * @param nlayers The number of layers to be copied.\n * @param dst Reference to the image region that shall be written to.\n * @param dst_offset Offset within the target mip level to write to (in texels).\n */\nvoid ngf_cmd_copy_image_to_buffer(\n    ngf_xfer_encoder    enc,\n    const ngf_image_ref src,\n    ngf_offset3d        src_offset,\n    ngf_extent3d        src_extent,\n    uint32_t            nlayers,\n    ngf_buffer          dst,\n    size_t              dst_offset) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Generates mipmaps automatically.\n *\n * Mipmaps are generated for all layers of the given image, from level 1 to the the maximum level\n * specified when creating the image, using the data from the preceding level as the source. Level 0\n * of each layer is expected to be populated by the application code prior to calling this function.\n *\n * @param xfenc A transfer command encoder.\n * @param img The handle to the image to operate on.\n */\nngf_error ngf_cmd_generate_mipmaps(ngf_xfer_encoder xfenc, ngf_image img) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Records the beginning of a \"debug group\" into the given command buffer.\n *\n * Debug groups are a way to group together related commands for easier vieweing in graphics\n * debugging tools such as RenderDoc. They do not have any other functional impact. Debug groups\n * have to be enabled during initialization. See \\ref ngf_diagnostic_info.\n *\n * This command records a marker into the given command buffer indicating that the subsequent\n * commands recorded into the buffer pertain to a certain debug group.\n *\n * @param cmd_buffer the command buffer to record the debug group start marker into.\n * @param name The name of the debug group that will appear in debugging tools.\n */\nvoid ngf_cmd_begin_debug_group(ngf_cmd_buffer cmd_buffer, const char* name) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n *\n * Records the end of a \"debug group\" into the given command buffer.\n *\n * This command records a marker into the given command buffer that terminates the current debug\n * group if there is one. Subsequent commands recorded into the buffer shall not pertain to any\n * debug group until a new one is started.\n *\n * @param cmd_buffer The command buffer to record the debug group end marker into.\n */\nvoid ngf_cmd_end_current_debug_group(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n * Triggers RenderDoc Capture.\n *\n * Captures the next frame from the active window in the current context.\n * If called, subsequent calls to \\ref ngf_renderdoc_capture_begin and \\ref\n * ngf_renderdoc_capture_end will do nothing until after the next frame that\n * ngf_renderdoc_capture_next_frame was called (i.e. you cannot do nested captures).\n */\nvoid ngf_renderdoc_capture_next_frame() NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n * Begins RenderDoc Capture.\n *\n * Begins frame capture for the active window in the current context.\n * Ended by \\ref ngf_renderdoc_capture_end.\n */\nvoid ngf_renderdoc_capture_begin() NGF_NOEXCEPT;\n\n/**\n * \\ingroup ngf\n * Triggers RenderDoc Capture.\n *\n * Ends frame capture for the active window in the current context.\n */\nvoid ngf_renderdoc_capture_end() NGF_NOEXCEPT;\n\n#ifdef _MSC_VER\n#pragma endregion\n#endif\n\n#ifdef __cplusplus\n}\n#endif\n"
  },
  {
    "path": "misc/common/CMakeLists.txt",
    "content": "#[[\nCopyright (c) 2023 nicegraf contributors\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the “Software”), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n]]\n\ncmake_minimum_required(VERSION 3.23.3)\nproject(nicegraf-misc-common)\n\n\nset(CMAKE_C_STANDARD 99)\nset(CMAKE_CXX_STANDARD 20)\n\ninclude(\"${CMAKE_CURRENT_LIST_DIR}/../../build-utils.cmake\")\n\nif (WIN32)\n    set(NICESHADE_PLATFORM win PARENT_SCOPE)\n    set(NGF_BACKEND nicegraf-vk)\nelseif(APPLE)\n    set(NICESHADE_PLATFORM macos PARENT_SCOPE)\n    if (NGF_USE_MVK STREQUAL \"yes\")\n      set(NGF_BACKEND nicegraf-vk)\n    else()\n      set(NGF_BACKEND nicegraf-mtl)\n    endif()\nelseif(UNIX AND NOT APPLE)\n    set(NICESHADE_PLATFORM linux PARENT_SCOPE)\n    set(NGF_BACKEND nicegraf-vk)\nelse()\n    message(FATAL_ERROR \"Your platform is not currently supported by nicegraf.\")\nendif()\n\nnmk_header_library(NAME nicegraf\n                   PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/../../include\n                   PUB_DEPS ${NGF_BACKEND})\n\n\nstring(TOUPPER \"NGF_BACKEND_${NGF_BACKEND}\" NGF_BACKEND_DEFINE)\nstring(REPLACE \"-\" \"_\" NGF_BACKEND_DEFINE ${NGF_BACKEND_DEFINE})\n\nif (NGF_BUILD_SAMPLES STREQUAL \"yes\")\n  nmk_static_library(NAME nicegraf-misc-common\n                     SRCS  ${CMAKE_CURRENT_LIST_DIR}/shader-loader.h\n                           ${CMAKE_CURRENT_LIST_DIR}/shader-loader.cpp\n                           ${CMAKE_CURRENT_LIST_DIR}/file-utils.h\n                           ${CMAKE_CURRENT_LIST_DIR}/file-utils.cpp\n                           ${CMAKE_CURRENT_LIST_DIR}/mesh-loader.cpp\n                           ${CMAKE_CURRENT_LIST_DIR}/mesh-loader.h\n                           ${CMAKE_CURRENT_LIST_DIR}/targa-loader.cpp\n                           ${CMAKE_CURRENT_LIST_DIR}/targa-loader.h\n                           ${CMAKE_CURRENT_LIST_DIR}/logging.h\n                     DEPS  nicegraf\n                     PVT_DEFINES  ${NGF_BACKEND_DEFINE}\n                     PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/)\nendif()\n"
  },
  {
    "path": "misc/common/check.h",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include \"logging.h\"\n#include <nicegraf-util.h>\n\n#include <stdio.h>\n#include <stdlib.h>\n\n#pragma warning(disable:26812)\n\nnamespace ngf_misc {\n\n#define NGF_MISC_CHECK_NGF_ERROR(expr)                         \\\n  {                                                               \\\n    const ngf_error err = (expr);                                 \\\n    if (err != NGF_ERROR_OK) {                                    \\\n      ::ngf_misc::loge(\"nicegraf error %d (file %s line %d), aborting.\\n\", err, __FILE__, __LINE__); \\\n      fflush(stderr);                                             \\\n      abort();                                                    \\\n    }                                                             \\\n  }\n\n#define NGF_MISC_ASSERT(expr)                                                                 \\\n  {                                                                                              \\\n    if (!(expr)) {                                                                               \\\n      ::ngf_misc::loge(\"assertion %s failed (file %s line %d)\\n\", #expr, __FILE__, __LINE__); \\\n      fflush(stderr);                                                                            \\\n      abort();                                                                                   \\\n    }                                                                                            \\\n  }\n\n}  // namespace ngf_misc\n"
  },
  {
    "path": "misc/common/file-utils.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"file-utils.h\"\n\n #include <fstream>\n #include <stdexcept>\n\n namespace ngf_misc {\n\nstd::vector<char> load_file(const char* file_name) {\n  std::basic_ifstream<char> fs(file_name, std::ios::binary | std::ios::in);\n  if (!fs.is_open()) {\n    throw std::runtime_error{ file_name };\n  }\n  return std::vector<char> { std::istreambuf_iterator<char>(fs),\n                             std::istreambuf_iterator<char>() };\n}\n\n}  // namespace ngf_common\n"
  },
  {
    "path": "misc/common/file-utils.h",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include <stdint.h>\n#include <vector>\n\n#if defined(_WIN64) || defined(_WIN32)\n#define NGF_MISC_PATH_SEPARATOR \"\\\\\"\n#else\n#define NGF_MISC_PATH_SEPARATOR \"/\"\n#endif\n\nnamespace ngf_misc {\n\nstd::vector<char> load_file(const char* file_name);\n\n}\n"
  },
  {
    "path": "misc/common/logging.h",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include <stdarg.h>\n#include <stdio.h>\n\nnamespace ngf_misc {\n\ninline void vlog_msg(char prefix, const char* fmt, va_list args) {\n  auto file = prefix == 'E' ? stderr : stdout;\n  fprintf(file, \"\\n[%c] \", prefix);\n  vfprintf(file, fmt, args);\n  fprintf(file, \"\\n\");\n}\n\ninline void vloge(const char* fmt, va_list args) {\n  vlog_msg('E', fmt, args);\n}\n\ninline void vlogi(const char* fmt, va_list args) {\n  vlog_msg('I', fmt, args);\n}\n\ninline void vlogd(const char* fmt, va_list args) {\n  vlog_msg('D', fmt, args);\n}\n\ninline void loge(const char* fmt, ...) {\n  va_list args;\n  va_start(args, fmt);\n  vloge(fmt, args);\n  va_end(args);\n}\n\ninline void logi(const char* fmt, ...) {\n  va_list args;\n  va_start(args, fmt);\n  vlogi(fmt, args);\n  va_end(args);\n}\n\ninline void logd(const char* fmt, ...) {\n#if !defined(NDEBUG)\n  va_list args;\n  va_start(args, fmt);\n  vlogd(fmt, args);\n  va_end(args);\n#else\n  (void)fmt;\n#endif\n}\n\n}  // namespace ngf_misc\n"
  },
  {
    "path": "misc/common/mesh-loader.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n#define _CRT_SECURE_NO_WARNINGS\n#include \"mesh-loader.h\"\n\n#include \"check.h\"\n\n#include <stdint.h>\n#include <stdio.h>\n\nnamespace ngf_misc {\n\nstatic void read_into_mapped_buffer(FILE* f, ngf_buffer buf, size_t data_size) {\n  void* mapped_buffer_mem = ngf_buffer_map_range(buf, 0u, data_size);\n  const size_t read_elements =\n      fread(mapped_buffer_mem, sizeof(char), data_size, f);\n  NGF_MISC_ASSERT(read_elements == data_size);\n  ngf_buffer_flush_range(buf, 0, data_size);\n  ngf_buffer_unmap(buf);\n}\n\nmesh load_mesh_from_file(const char* mesh_file_name, ngf_xfer_encoder xfenc) {\n  mesh  result;\n  FILE* mesh_file = fopen(mesh_file_name, \"rb\");\n  NGF_MISC_ASSERT(mesh_file != NULL);\n\n  /* Indicates to skip staging buffers and copy directly to device-local memory if possible. */\n  const bool skip_staging = ngf_get_device_capabilities()->device_local_memory_is_host_visible;\n\n  /**\n   * Read the \"file header\" - 4-byte field with the lowest bit indicating\n   * the presence of normals, and the second-lowest bit indicating the\n   * presence of UV coordinates (position attribute is always assumed).\n   */\n  uint32_t header        = 0u;\n  size_t   read_elements = 0u;\n  read_elements          = fread(&header, sizeof(header), 1u, mesh_file);\n  NGF_MISC_ASSERT(read_elements == 1u);\n  result.have_normals = header & 1;\n  result.have_uvs     = header & 2;\n\n  /**\n   * Read the total size of the vertex data. Depending on device capabilities,\n   * read it all directly into the GPU buffer, or read into a staging buffer.\n   */\n  uint32_t vertex_data_size = 0u;\n  read_elements             = fread(&vertex_data_size, sizeof(vertex_data_size), 1u, mesh_file);\n  NGF_MISC_ASSERT(read_elements == 1u);\n  const ngf_buffer_info vertex_data_staging_buffer_info = {\n      .size         = vertex_data_size,\n      .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE,\n      .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC,\n  };\n  ngf::buffer vertex_data_staging_buffer;\n  if (!skip_staging) {\n    NGF_MISC_CHECK_NGF_ERROR(\n        vertex_data_staging_buffer.initialize(vertex_data_staging_buffer_info));\n  }\n  const ngf_buffer_info vertex_data_buffer_info = {\n      .size         = vertex_data_staging_buffer_info.size,\n      .storage_type = skip_staging ? NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABLE : NGF_BUFFER_STORAGE_DEVICE_LOCAL,\n      .buffer_usage = NGF_BUFFER_USAGE_VERTEX_BUFFER | NGF_BUFFER_USAGE_XFER_DST,\n  };\n  NGF_MISC_CHECK_NGF_ERROR(result.vertex_data.initialize(vertex_data_buffer_info));\n\n  read_into_mapped_buffer(\n      mesh_file,\n      skip_staging ? result.vertex_data.get() : vertex_data_staging_buffer.get(),\n      vertex_data_size);\n\n  /**\n   * Read the number of indices in the mesh. If number of indices is 0, the\n   * mesh is considered to not have an index buffer, and a non-indexed draw call\n   * should be used to render it.\n   */\n  read_elements = fread(&result.num_indices, sizeof(uint32_t), 1, mesh_file);\n  NGF_MISC_ASSERT(read_elements == 1u);\n\n  /**\n   * Allocate buffer(s) for the index data, and read the index data.\n   * As before, we try to read directly into the GPU buffer if the device allows it.\n   */\n  ngf::buffer index_data_staging_buffer;\n  const ngf_buffer_info index_data_staging_buffer_info = {\n      .size         = sizeof(uint32_t) * result.num_indices,\n      .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE,\n      .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC,\n  };\n  const ngf_buffer_info index_data_buffer_info = {\n      .size         = sizeof(uint32_t) * result.num_indices,\n      .storage_type = skip_staging ? NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABLE\n                                   : NGF_BUFFER_STORAGE_DEVICE_LOCAL,\n      .buffer_usage = NGF_BUFFER_USAGE_INDEX_BUFFER | NGF_BUFFER_USAGE_XFER_DST,\n  };\n  if (result.num_indices > 0) {\n    NGF_MISC_CHECK_NGF_ERROR(result.index_data.initialize(index_data_buffer_info));\n    if (!skip_staging) {\n      NGF_MISC_CHECK_NGF_ERROR(\n          index_data_staging_buffer.initialize(index_data_staging_buffer_info));\n    }\n    read_into_mapped_buffer(\n        mesh_file,\n        skip_staging ? result.index_data.get() : index_data_staging_buffer.get(),\n        index_data_staging_buffer_info.size);\n  }\n\n  /**\n   * Record commands to upload staging data if we have to.\n   */\n  if (!skip_staging) {\n    ngf_cmd_copy_buffer(\n        xfenc,\n        vertex_data_staging_buffer.get(),\n        result.vertex_data.get(),\n        vertex_data_buffer_info.size,\n        0u,\n        0u);\n    if (result.num_indices > 0) {\n      ngf_cmd_copy_buffer(\n          xfenc,\n          index_data_staging_buffer.get(),\n          result.index_data.get(),\n          index_data_staging_buffer_info.size,\n          0u,\n          0u);\n    }\n  }\n\n  return result;\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "misc/common/mesh-loader.h",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include <nicegraf-wrappers.h>\n\nnamespace ngf_misc {\n\nstruct mesh {\n  ngf::buffer vertex_data;\n  ngf::buffer index_data;\n  uint32_t    num_indices;\n  bool        have_normals;\n  bool        have_uvs;\n};\n\nmesh load_mesh_from_file(const char* file_name, ngf_xfer_encoder xfenc);\n\n}"
  },
  {
    "path": "misc/common/shader-loader.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"shader-loader.h\"\n\n#include \"file-utils.h\"\n#include \"check.h\"\n\n#include <fstream>\n#include <string>\n\nnamespace ngf_misc {\n\n#if defined(NGF_BACKEND_NICEGRAF_VK)\n#define SHADER_EXTENSION \".spv\"\n#elif defined(NGF_BACKEND_NICEGRAF_MTL) || defined(NGF_BACKEND_NICEGRAF_MTL_CPP)\n#define SHADER_EXTENSION \".21.msl\"\n#else\n#error \"build system needs to define samples backend\"\n#endif\n\nngf::shader_stage\nload_shader_stage(const char* shader_file_name, const char* entry_point_name, ngf_stage_type type) {\n  constexpr const char* shaders_root_dir        = \"shaders\" NGF_MISC_PATH_SEPARATOR;\n  constexpr const char* stage_to_file_ext_map[] = {\"vs\", \"ps\", \"cs\"};\n\n  const std::string file_name = shaders_root_dir + std::string(shader_file_name) + \".\" +\n                                stage_to_file_ext_map[type] + SHADER_EXTENSION;\n  const std::vector<char> content    = load_file(file_name.c_str());\n  ngf_shader_stage_info      stage_info = {\n      .type             = type,\n      .content          = reinterpret_cast<const uint8_t*>(content.data()),\n      .content_length   = (uint32_t)content.size(),\n      .debug_name       = \"\",\n      .entry_point_name = entry_point_name};\n\n  ngf::shader_stage stage;\n  NGF_MISC_CHECK_NGF_ERROR(stage.initialize(stage_info));\n\n  return stage;\n}\n\n}  // namespace ngf_misc\n"
  },
  {
    "path": "misc/common/shader-loader.h",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include <nicegraf-wrappers.h>\n\nnamespace ngf_misc {\n\nngf::shader_stage\nload_shader_stage(const char* shader_file_name, const char* entry_point_name, ngf_stage_type type);\n\n}"
  },
  {
    "path": "misc/common/targa-loader.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"targa-loader.h\"\n\n#include <algorithm>\n#include <math.h>\n#include <stdexcept>\n\nnamespace ngf_misc {\nnamespace tga {\n/* image type constants */\nenum class img_type : uint8_t {\n  none                = 0,\n  color_mapped        = 1,\n  true_color          = 2,\n  black_and_white     = 3,\n  color_mapped_rle    = 9,\n  true_color_rle      = 10,\n  black_and_white_rle = 11\n};\n\n/* targa structures */\n#pragma pack(push, 1)\nstruct cmap {\n  uint16_t first_entry_idx;\n  uint16_t num_entries;\n  uint8_t  bits_per_entry;\n};\n\nstruct image {\n  uint16_t x_origin;\n  uint16_t y_origin;\n  uint16_t width;\n  uint16_t height;\n  uint8_t  bitsperpel;\n  uint8_t  descriptor;\n};\n\nstruct header {\n  uint8_t  id_length;\n  uint8_t  has_cmap;\n  img_type type;\n  cmap     cmap_entry;\n  image    img;\n};\n\nstruct footer {\n  uint32_t ext_offset;\n  uint32_t dev_offset;\n  char     sig[18];\n};\n#pragma pack(pop)\n\n}  // namespace tga\n\nnamespace {\n\nfloat srgb_to_linear(uint8_t srgb_value) {\n  const float srgb_valuef = (float)srgb_value / 255.0f;\n  return srgb_valuef <= 0.04045f ? (srgb_valuef / 12.92f)\n                                 : powf(((srgb_valuef + 0.055f) / 1.055f), 2.4f);\n}\n\nuint8_t linear_to_srgb(float linear_value) {\n  const float srgb_valuef = linear_value <= 0.0031308f\n                                ? (12.92f * linear_value)\n                                : (1.055f * powf(linear_value, 1.0f / 2.4f) - 0.055f);\n  return (uint8_t)(std::min(1.0f, srgb_valuef) * 255.0f);\n}\n\n}  // namespace\n\nvoid load_targa(\n    const void* in_buf,\n    size_t      in_buf_size,\n    void*       out_buf,\n    size_t      out_buf_size,\n    uint32_t*   width_px,\n    uint32_t*   height_px) {\n  auto in_bytes  = (const char*)in_buf;\n  auto out_bytes = (char*)out_buf;\n\n  /* obtain header and footer data. */\n  auto hdr = (const tga::header*)in_buf;\n  auto ftr = (const tga::footer*)&in_bytes[in_buf_size - sizeof(tga::footer)];\n\n  /* write width and height outputs. */\n  *width_px  = hdr->img.width;\n  *height_px = hdr->img.height;\n\n  /* if the output buffer pointer is null, we're done. */\n  if (out_buf == nullptr) { return; }\n\n  /* compute expected output size and check if it fits into the provided\n     output buffer. */\n  const size_t expected_output_size = 4u * hdr->img.width * hdr->img.height;\n  if (expected_output_size > out_buf_size) { throw std::runtime_error(\"buffer overflow\"); }\n\n  /* verify that footer is valid. */\n  const char* expected_sig = \"TRUEVISION-XFILE.\";\n  for (size_t si = 0; si < sizeof(ftr->sig); ++si) {\n    if (ftr->sig[si] != expected_sig[si]) { throw std::runtime_error(\"tga signature not found\"); }\n  }\n\n  /* only rle-encoded true-color images are allowed. */\n  if (hdr->type != tga::img_type::true_color_rle) {\n    throw std::runtime_error(\"unsupported tga feature detected\");\n  }\n  const bool has_alpha = (hdr->img.descriptor & 0x08) != 0;\n\n  /* obtain extension data offset. */\n  const size_t ext_offset = ftr->ext_offset;\n\n  /* read 'attributes type' field to determine whether alpha is\n     premultiplied.\n     if no extension section is present, assume non-premultiplied\n     alpha. */\n  const char attr_type = !has_alpha || ext_offset == 0 ? 3 : in_bytes[ext_offset + 494];\n  if (attr_type != 3 && attr_type != 4) { throw std::runtime_error(\"invalid attribute type\"); }\n  const bool is_premul_alpha = attr_type == 4;\n\n  /* read and decode image data, writing result to output. */\n  const char*  img_data       = in_bytes + sizeof(tga::header) + hdr->id_length;\n  size_t written_pixels = 0;\n  const size_t bytes_per_pel  = has_alpha ? 4 : 3;\n  while (written_pixels < hdr->img.width * hdr->img.height &&\n         img_data - in_bytes < (ptrdiff_t)in_buf_size) {\n    const char   packet_hdr    = *img_data;\n    const bool   is_rle_packet = packet_hdr & 0x80;\n    const size_t packet_length = 1u + (packet_hdr & 0x7f);\n    ++img_data; /* advance img. data to point to start of packet data. */\n    for (size_t p = 0u; p < packet_length; ++p) {\n      /* pixel data is stored as BGRA. */\n      const uint8_t  a     = has_alpha ? (uint8_t)img_data[3] : 0xff;\n      const float   af     = (float)a / 255.0f;\n      auto        premul = [&](uint8_t v) {\n        if (is_premul_alpha || !has_alpha)\n          return v;\n        else {\n          /* need to convert from sRGB to linear, premultiply then convert back. */\n          const float linear        = srgb_to_linear(v);\n          const float linear_premul = linear * af;\n          return linear_to_srgb(linear_premul);\n        }\n      };\n      const uint8_t b = premul((uint8_t)img_data[0]), g = premul((uint8_t)img_data[1]), r = premul((uint8_t)img_data[2]);\n      out_bytes[written_pixels * 4u + 0] = (char)r;\n      out_bytes[written_pixels * 4u + 1] = (char)g;\n      out_bytes[written_pixels * 4u + 2] = (char)b;\n      out_bytes[written_pixels * 4u + 3] = (char)a;\n      ++written_pixels;\n      if (!is_rle_packet) img_data += bytes_per_pel;\n    }\n    if (is_rle_packet) img_data += bytes_per_pel;\n  }\n\n  if (img_data - in_bytes >= (ptrdiff_t)in_buf_size) {\n    throw std::runtime_error(\"buffer overflow\");\n  }\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "misc/common/targa-loader.h",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include <stdint.h>\n#include <stddef.h>\n\nnamespace ngf_misc {\n\n/**\n * Decodes an RLE-encoded true color targa file with an optional\n * alpha channel into the target buffer.\n * Assumes the source file uses sRGB color space.\n * If `out_buf` is non-NULL, raw RGBA values, in sRGB, with\n * premultiplied alpha, will be written to it. The width and\n * height of the image are returned in the output parameters.\n * If `out_buf` is NULL, no decoding is performed, however\n * the width and height of the image are still returned.\n */\nvoid load_targa(\n    const void* in_buf,\n    size_t      in_buf_size,\n    void*       out_buf,\n    size_t      out_buf_size,\n    uint32_t*   width_px,\n    uint32_t*   height_px);\n\n}  // namespace ngf_misc\n"
  },
  {
    "path": "misc/shaders.cmake",
    "content": "function (ngf_shaders_target)\n   cmake_parse_arguments(SHADERS_TARGET \"\" \"NAME;OUTPUT_DIR;NICESHADE_PATH\" \"SRCS\" ${ARGN})\n   foreach(source_path ${SHADERS_TARGET_SRCS})\n      file(STRINGS ${source_path} tech_lines REGEX \"// *T *: *([a-zA-Z0-9_]+)\")\n      if (tech_lines)\n        set(tech_names \"\")\n        foreach(tech_line ${tech_lines})\n          string(REPLACE \":\" \";\" tmp ${tech_line})\n          list(GET tmp 1 tmp)\n          string(STRIP \"${tmp}\" tmp)\n          string(REGEX REPLACE \" +\" \";\" tmp ${tmp})\n          list(GET tmp 0 tech_name)\n          list(APPEND tech_names \"${tech_name}\")\n        endforeach(tech_line)\n        set(output_files_list \"\")\n        get_filename_component(header_file_name ${source_path} NAME_WE)\n        if(NOT ${header_file_name} MATCHES \"compute-*\")\n          foreach(tech ${tech_names})\n            list(APPEND output_files_list \"${SHADERS_TARGET_OUTPUT_DIR}/${tech}.vs.21.msl\")\n            list(APPEND output_files_list \"${SHADERS_TARGET_OUTPUT_DIR}/${tech}.ps.21.msl\")\n            list(APPEND output_files_list \"${SHADERS_TARGET_OUTPUT_DIR}/${tech}.vs.spv\")\n            list(APPEND output_files_list \"${SHADERS_TARGET_OUTPUT_DIR}/${tech}.ps.spv\")\n            list(APPEND output_files_list \"${SHADERS_TARGET_OUTPUT_DIR}/${tech}.pipeline\")\n          endforeach(tech)\n         else()\n          foreach(tech ${tech_names})\n            list(APPEND output_files_list \"${SHADERS_TARGET_OUTPUT_DIR}/${tech}.cs.21.msl\")\n            list(APPEND output_files_list \"${SHADERS_TARGET_OUTPUT_DIR}/${tech}.cs.spv\")\n            list(APPEND output_files_list \"${SHADERS_TARGET_OUTPUT_DIR}/${tech}.pipeline\")\n          endforeach(tech)\n        endif()\n        list(APPEND output_files_list \"${SHADERS_TARGET_OUTPUT_DIR}/${header_file_name}_binding_consts.h\")\n        add_custom_command(OUTPUT ${output_files_list}\n                           MAIN_DEPENDENCY ${source_path}\n                           COMMAND ${SHADERS_TARGET_NICESHADE_PATH}/niceshade ARGS ${source_path} \"-t\" \"msl21\" \"-t\" \"spv\" \"-O\" \"${SHADERS_TARGET_OUTPUT_DIR}\" \"-h\" \"${header_file_name}_binding_consts.h\")\n                           #WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/samples/shaders)\n        set(generated_shaders_list \"${output_files_list};${generated_shaders_list}\")\n      endif()\n    endforeach(source_path)\n    add_custom_target(${SHADERS_TARGET_NAME} DEPENDS ${generated_shaders_list})\nendfunction()"
  },
  {
    "path": "samples/00-template/sample-impl.cpp",
    "content": "/**\n * Copyright (c) 2021 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"sample-interface.h\"\n\n#include <stdio.h>\n\nnamespace ngf_samples {\n\nstruct sample_data {\n  uint32_t magic_number = 0xdeadbeef;\n};\n\nvoid* sample_initialize(uint32_t , uint32_t ) {\n  printf(\"sample initializing.\\n\");\n  auto d = new sample_data{};\n  d->magic_number = 0xbadf00d;\n  printf(\"sample initialization complete.\\n\");\n  return static_cast<void*>(d);\n}\n\nvoid sample_draw_frame(\n    ngf_frame_token ,\n    uint32_t        ,\n    uint32_t        ,\n    float           ,\n    void*           ) {\n  //auto data = static_cast<sample_data*>(userdata);\n  //printf(\"drawing frame %d (w %d h %d) at time %f magic number 0x%x\\n\", frame_token, width, height, time, data->magic_number);\n}\n\nvoid sample_draw_ui(void*) {\n}\n\nvoid sample_shutdown(void* userdata) {\n  auto data = static_cast<sample_data*>(userdata);\n  delete data;\n  printf(\"shutting down\\n\");\n}\n\n}\n"
  },
  {
    "path": "samples/01-fullscreen-triangle/fullscreen-triangle.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"check.h\"\n#include \"nicegraf-util.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"sample-interface.h\"\n#include \"shader-loader.h\"\n\n#include <stdio.h>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nnamespace fullscreen_triangle {\nstruct state {\n  ngf::graphics_pipeline pipeline;\n};\n}  // namespace fullscreen_triangle\n\nvoid* sample_initialize(\n    uint32_t,\n    uint32_t,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder /*xfer_encoder*/) {\n  auto state = new fullscreen_triangle::state {};\n\n  /**\n   * Load the shader stages.\n   * Note that these are only necessary when creating pipeline objects.\n   * After the pipeline objects have been created, the shader stage objects\n   * can be safely discarded.\n   */\n  const ngf::shader_stage vertex_shader_stage =\n      load_shader_stage(\"fullscreen-triangle\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage fragment_shader_stage =\n      load_shader_stage(\"fullscreen-triangle\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  /**\n   * Prepare a template with some default values for pipeline initialization.\n   */\n  ngf_util_graphics_pipeline_data pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&pipeline_data);\n\n  /**\n   * Set shader stages.\n   */\n  pipeline_data.pipeline_info.nshader_stages   = 2;\n  pipeline_data.pipeline_info.shader_stages[0] = vertex_shader_stage.get();\n  pipeline_data.pipeline_info.shader_stages[1] = fragment_shader_stage.get();\n\n  /**\n   * Set multisampling state.\n   */\n  pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n\n  /**\n   * Set the compatible render target description.\n   */\n  pipeline_data.pipeline_info.compatible_rt_attachment_descs =\n      ngf_default_render_target_attachment_descs();\n\n  /**\n   * Initialize the pipeline object.\n   */\n  NGF_MISC_CHECK_NGF_ERROR(state->pipeline.initialize(pipeline_data.pipeline_info));\n\n  return static_cast<void*>(state);\n}\n\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float /*time_delta*/,\n    ngf_frame_token /*token*/,\n    uint32_t w,\n    uint32_t h,\n    float /*time*/,\n    void* userdata) {\n  auto state = static_cast<fullscreen_triangle::state*>(userdata);\n\n  ngf_cmd_bind_gfx_pipeline(main_render_pass, state->pipeline.get());\n  const ngf_irect2d viewport {0, 0, w, h};\n  ngf_cmd_viewport(main_render_pass, &viewport);\n  ngf_cmd_scissor(main_render_pass, &viewport);\n\n  /**\n   * Make a drawcall.\n   */\n  ngf_cmd_draw(main_render_pass, false, 0, 3, 1);\n}\n\nvoid sample_pre_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_draw_frame(ngf_cmd_buffer, void*) {\n}\nvoid sample_post_submit(void*) {\n}\n\nvoid sample_draw_ui(void*) {\n}\n\nvoid sample_shutdown(void* userdata) {\n  auto state = static_cast<fullscreen_triangle::state*>(userdata);\n  delete state;\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/02-render-to-texture/render-to-texture.cpp",
    "content": "/**\n * Copyright (c) 2021 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"sample-interface.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"nicegraf-util.h\"\n#include \"shader-loader.h\"\n#include \"check.h\"\n\n#include <stdio.h>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nstruct render_to_texture_data {\n  ngf::render_target default_rt;\n  ngf::render_target offscreen_rt;\n  ngf::graphics_pipeline blit_pipeline;\n  ngf::graphics_pipeline offscreen_pipeline;\n  ngf::image rt_texture;\n  ngf::sampler sampler;\n};\n\nvoid* sample_initialize(\n    uint32_t,\n    uint32_t,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder /*xfer_encoder*/) {\n  auto state = new render_to_texture_data{};\n  \n  /* Create the image to render to. */\n  const ngf_extent3d img_size { 512u, 512u, 1u };\n  const ngf_image_info img_info {\n    NGF_IMAGE_TYPE_IMAGE_2D,\n    img_size,\n    1u,\n    1u,\n    NGF_IMAGE_FORMAT_BGRA8_SRGB,\n    NGF_SAMPLE_COUNT_1,\n    NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_ATTACHMENT\n  };\n  NGF_MISC_CHECK_NGF_ERROR(state->rt_texture.initialize(img_info));\n \n  /* Create the offscreen render target.*/\n  const ngf_attachment_description offscreen_color_attachment_description {\n    .type = NGF_ATTACHMENT_COLOR,\n    .format = NGF_IMAGE_FORMAT_BGRA8_SRGB,\n    .sample_count = NGF_SAMPLE_COUNT_1,\n  };\n  const ngf_attachment_descriptions attachments_list = {\n    .descs = &offscreen_color_attachment_description,\n    .ndescs = 1u,\n  };\n  const ngf_image_ref img_ref = {\n    .image = state->rt_texture.get(),\n    .mip_level = 0u,\n    .layer = 0u,\n    .cubemap_face = NGF_CUBEMAP_FACE_COUNT\n  };\n  ngf_render_target_info rt_info {\n    &attachments_list,\n    &img_ref\n  };\n  NGF_MISC_CHECK_NGF_ERROR(state->offscreen_rt.initialize(rt_info));\n\n  /**\n   * Load shader stages.\n   */\n  const ngf::shader_stage blit_vertex_stage = \n    load_shader_stage(\"simple-texture\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage blit_fragment_stage =\n    load_shader_stage(\"simple-texture\", \"PSMain\", NGF_STAGE_FRAGMENT);\n  const ngf::shader_stage offscreen_vertex_stage =\n    load_shader_stage(\"small-triangle\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage offscreen_fragment_stage =\n    load_shader_stage(\"small-triangle\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  /**\n   * Create pipeline for blit.  \n   */\n  ngf_util_graphics_pipeline_data blit_pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&blit_pipeline_data);\n  blit_pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n  ngf_graphics_pipeline_info &blit_pipe_info =\n      blit_pipeline_data.pipeline_info;\n  blit_pipe_info.nshader_stages = 2u;\n  blit_pipe_info.shader_stages[0] = blit_vertex_stage.get();\n  blit_pipe_info.shader_stages[1] = blit_fragment_stage.get();\n  blit_pipe_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs();\n  NGF_MISC_CHECK_NGF_ERROR(state->blit_pipeline.initialize(blit_pipe_info));\n\n  /**\n   * Create pipeline for offscreen pass.\n   */\n  ngf_util_graphics_pipeline_data offscreen_pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&offscreen_pipeline_data);\n  ngf_graphics_pipeline_info &offscreen_pipe_info =\n      offscreen_pipeline_data.pipeline_info;\n  offscreen_pipe_info.nshader_stages = 2u;\n  offscreen_pipe_info.shader_stages[0] = offscreen_vertex_stage.get();\n  offscreen_pipe_info.shader_stages[1] = offscreen_fragment_stage.get();\n  offscreen_pipe_info.compatible_rt_attachment_descs = &attachments_list;\n  NGF_MISC_CHECK_NGF_ERROR(state->offscreen_pipeline.initialize(offscreen_pipe_info));\n \n  /* Create sampler.*/\n  const ngf_sampler_info samp_info {\n    NGF_FILTER_LINEAR,\n    NGF_FILTER_LINEAR,\n    NGF_FILTER_NEAREST,\n    NGF_WRAP_MODE_CLAMP_TO_EDGE,\n    NGF_WRAP_MODE_CLAMP_TO_EDGE,\n    NGF_WRAP_MODE_CLAMP_TO_EDGE,\n    0.0f,\n    0.0f,\n    0.0f,\n    1.0f,\n    false\n  };\n  NGF_MISC_CHECK_NGF_ERROR(state->sampler.initialize(samp_info));\n\n  return static_cast<void*>(state);\n}\n\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float /* time_delta */,\n    ngf_frame_token frame_token,\n    uint32_t        w,\n    uint32_t        h,\n    float           ,\n    void* userdata) {\n  auto state = reinterpret_cast<render_to_texture_data*>(userdata);\n  ngf_irect2d         offsc_viewport {0, 0, 512, 512};\n  ngf_irect2d         onsc_viewport {0, 0, w, h};\n  ngf_cmd_buffer      offscr_cmd_buf = nullptr;\n  ngf_cmd_buffer_info cmd_info       = {};\n  ngf_create_cmd_buffer(&cmd_info, &offscr_cmd_buf);\n  ngf_start_cmd_buffer(offscr_cmd_buf, frame_token);\n  {\n    ngf::render_encoder renc {offscr_cmd_buf, state->offscreen_rt, .0f, 0.0f, 0.0f, 0.0f, 1.0, 0u};\n    ngf_cmd_bind_gfx_pipeline(renc, state->offscreen_pipeline);\n    ngf_cmd_viewport(renc, &offsc_viewport);\n    ngf_cmd_scissor(renc, &offsc_viewport);\n    ngf_cmd_draw(renc, false, 0u, 3u, 1u);\n  }\n  ngf_submit_cmd_buffers(1, &offscr_cmd_buf);\n  ngf_destroy_cmd_buffer(offscr_cmd_buf);\n\n  ngf_cmd_bind_gfx_pipeline(main_render_pass, state->blit_pipeline);\n  ngf_cmd_viewport(main_render_pass, &onsc_viewport);\n  ngf_cmd_scissor(main_render_pass, &onsc_viewport);\n  ngf::cmd_bind_resources(\n      main_render_pass,\n      ngf::descriptor_set<0>::binding<1>::texture(state->rt_texture.get()),\n      ngf::descriptor_set<0>::binding<2>::sampler(state->sampler.get()));\n  ngf_cmd_draw(main_render_pass, false, 0u, 3u, 1u);\n}\n\nvoid sample_pre_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_draw_ui(void*) {}\n\nvoid sample_post_submit(void*){}\n\nvoid sample_shutdown(void* userdata) {\n  auto data = static_cast<render_to_texture_data*>(userdata);\n  delete data;\n  printf(\"shutting down\\n\");\n}\n\n}\n"
  },
  {
    "path": "samples/03-uniform-buffers/uniform-buffers.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"check.h\"\n#include \"nicegraf-util.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"sample-interface.h\"\n#include \"shader-loader.h\"\n\n#include <imgui.h>\n#include <math.h>\n#include <stdio.h>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nnamespace uniform_buffers {\nstruct shader_uniform_values {\n  float scale_a = 0.0f;\n  float scale_b = 0.5f;\n  float time    = 0.0f;\n  float aspect  = 1.0f;\n  float theta   = 0.0f;\n};\n\nstruct state {\n  ngf::graphics_pipeline polygon_pipeline;\n  ngf::buffer            uniform_buffer;\n  size_t                 uniform_buffer_offset     = 0u;\n  size_t                 aligned_uniform_data_size = 0u;\n  shader_uniform_values  uniform_values;\n  int                    n            = 6;\n  float                  growth_speed = 1.f;\n  bool                   growing      = true;\n};\n}  // namespace uniform_buffers\n\nstatic float theta_for_n(int n) {\n  return 2.0f * 3.1415926f / static_cast<float>(n);\n}\n\nstatic float min_scale_for_ngon(int n) {\n  float a = theta_for_n(n);\n  return (1.0f - sinf(a) * tanf(a / 2.0f));\n}\n\nvoid* sample_initialize(\n    uint32_t,\n    uint32_t,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder /*xfer_encoder*/) {\n  auto state = new uniform_buffers::state {};\n\n  /**\n   * Pre-initialize some uniform variables.\n   */\n  state->uniform_values.scale_a = state->uniform_values.scale_b * min_scale_for_ngon(state->n);\n  state->uniform_values.theta   = theta_for_n(state->n);\n\n  /**\n   * Load shader stages.\n   */\n  const ngf::shader_stage polygon_vertex_stage =\n      load_shader_stage(\"polygon\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage polygon_fragment_stage =\n      load_shader_stage(\"polygon\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  /**\n   * Create pipeline.\n   */\n  ngf_util_graphics_pipeline_data polygon_pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&polygon_pipeline_data);\n  polygon_pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n  polygon_pipeline_data.rasterization_info.cull_mode  = NGF_CULL_MODE_NONE;\n  ngf_graphics_pipeline_info& polygon_pipe_info       = polygon_pipeline_data.pipeline_info;\n  polygon_pipe_info.nshader_stages                    = 2u;\n  polygon_pipe_info.shader_stages[0]                  = polygon_vertex_stage.get();\n  polygon_pipe_info.shader_stages[1]                  = polygon_fragment_stage.get();\n  polygon_pipe_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs();\n  polygon_pipeline_data.input_assembly_info.primitive_topology = NGF_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;\n  NGF_MISC_CHECK_NGF_ERROR(state->polygon_pipeline.initialize(polygon_pipe_info));\n\n  /**\n   * Create the uniform buffer.\n   * We need to write to the buffer every frame from the CPU. However, as we're preparing the\n   * data for the next frame, the GPU might still be rendering the current frame. Modifying the\n   * buffer at that time would lead to a data race. To avoid it, we employ a triple bufferinga\n   * strategy:\n   *  - assume we need N bytes for the uniform buffer\n   *  - allocate 3*N bytes for the buffer;\n   *  - ensure that while GPU reads data at offset i*N, the CPU writes at ((i + 1) mod 3) * N.\n   * This ensures that, as long as the CPU is no more than 2 frames ahead of the GPU, no\n   * data races will happen.\n   *\n   * Note that the offset at which we read/write must have an alignment that is specific to the\n   * GPU. That alignment can be obtained from ngf_get_device_capabilities().\n   */\n  const size_t uniform_buffer_offset_alignment =\n      ngf_get_device_capabilities()->uniform_buffer_offset_alignment;\n  const size_t requested_data_size = sizeof(uniform_buffers::shader_uniform_values);\n  state->aligned_uniform_data_size =\n      requested_data_size +\n      (uniform_buffer_offset_alignment - requested_data_size % uniform_buffer_offset_alignment);\n  const size_t          uniform_buffer_size = 3 * state->aligned_uniform_data_size;\n  const ngf_buffer_info uniform_buffer_info = {\n      .size         = uniform_buffer_size,\n      .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE,\n      .buffer_usage = NGF_BUFFER_USAGE_UNIFORM_BUFFER};\n  NGF_MISC_CHECK_NGF_ERROR(state->uniform_buffer.initialize(uniform_buffer_info));\n\n  return static_cast<void*>(state);\n}\n\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float              time_delta,\n    ngf_frame_token /*frame_token*/,\n    uint32_t w,\n    uint32_t h,\n    float,\n    void* userdata) {\n  auto state = reinterpret_cast<uniform_buffers::state*>(userdata);\n\n  /**\n   * Update the values for the uniform buffer.\n   */\n  uniform_buffers::shader_uniform_values& uniforms  = state->uniform_values;\n  const float                             max_scale = uniforms.scale_b;\n  float                                   min_scale = max_scale * min_scale_for_ngon(state->n);\n  const bool                              growing   = state->growing;\n  uniforms.aspect                                   = static_cast<float>(w) / static_cast<float>(h);\n  uniforms.time += time_delta;\n  uniforms.scale_a += (growing ? 1.0f : -1.0f) * time_delta * (state->growth_speed);\n  const bool evolve_ngon =\n      (growing && uniforms.scale_a >= max_scale) || (!growing && uniforms.scale_a <= min_scale);\n  constexpr int max_ngon_sides = 96;\n  constexpr int min_ngon_sides = 6;\n  const bool    switch_phase   = evolve_ngon && ((growing && state->n == max_ngon_sides) ||\n                                            (!growing && state->n == min_ngon_sides));\n  if (switch_phase) {\n    state->growing = !state->growing;\n  } else if (evolve_ngon) {\n    state->n       = growing ? (state->n << 1) : (state->n >> 1);\n    uniforms.theta = theta_for_n(state->n);\n    state->growth_speed *= (growing ? 0.5f : 2.0f);\n    uniforms.scale_a = growing ? max_scale * min_scale_for_ngon(state->n) : max_scale;\n  }\n\n  /**\n   * Write the updated values to the uniform buffer at current offset.\n   * Map the range, write the data using memcpy, then flush and unmap.\n   */\n  void* mapped_uniform_buffer_offset = ngf_buffer_map_range(\n      state->uniform_buffer,\n      state->uniform_buffer_offset,\n      state->aligned_uniform_data_size);\n  memcpy(mapped_uniform_buffer_offset, &state->uniform_values, sizeof(state->uniform_values));\n  ngf_buffer_flush_range(state->uniform_buffer, 0, state->aligned_uniform_data_size);\n  ngf_buffer_unmap(state->uniform_buffer);\n\n  /**\n   * Record the rendering commands.\n   */\n  ngf_irect2d viewport {0, 0, w, h};\n  ngf_cmd_bind_gfx_pipeline(main_render_pass, state->polygon_pipeline);\n  ngf_cmd_viewport(main_render_pass, &viewport);\n  ngf_cmd_scissor(main_render_pass, &viewport);\n  ngf::cmd_bind_resources(\n      main_render_pass,\n      ngf::descriptor_set<0>::binding<0>::uniform_buffer(\n          state->uniform_buffer,\n          state->uniform_buffer_offset,\n          state->aligned_uniform_data_size));\n  ngf_cmd_draw(main_render_pass, false, 0u, (uint32_t)(state->n) * 3, 1u);\n\n  /**\n   * Update the uniform buffer offset so we write there on the next frame.\n   */\n  state->uniform_buffer_offset = (state->uniform_buffer_offset + state->aligned_uniform_data_size) %\n                                 (3 * state->aligned_uniform_data_size);\n}\n\nvoid sample_pre_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_draw_ui(void*) {\n}\n\nvoid sample_post_submit(void*) {\n}\n\nvoid sample_shutdown(void* userdata) {\n  auto state = static_cast<uniform_buffers::state*>(userdata);\n  delete state;\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/04-texture-sampling/texture-sampling.cpp",
    "content": "/**\n * Copyright (c) 2025 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"check.h\"\n#include \"file-utils.h\"\n#include \"imgui.h\"\n#include \"nicegraf-util.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"nicemath.h\"\n#include \"sample-interface.h\"\n#include \"shader-loader.h\"\n#include \"staging-image.h\"\n\n#include <stdio.h>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nnamespace texture_sampling {\n\nstruct matrices {\n  struct {\n    nm::float4x4 matrix;\n    char _padding[256 - sizeof(nm::float4x4)];\n  } m[4]{};\n};\n\nstruct state {\n  ngf::graphics_pipeline             pipeline;\n  ngf::image                         texture;\n  ngf::sampler                       samplers[4];\n  ngf::uniform_multibuffer<matrices> uniforms;\n  float                              tilt  = 0.0f;\n  float                              dolly = -5.0f;\n  float                              pan   = 0.0f;\n};\n\n}  // namespace texture_sampling\n\nvoid* sample_initialize(\n    uint32_t /*width*/,\n    uint32_t /*height*/,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder xfer_encoder) {\n  auto s = new texture_sampling::state {};\n\n  /* Prepare a staging buffer for the image. */\n  staging_image texture_staging_image = create_staging_image_from_tga(\"assets/tiles.tga\");\n  \n  /* Create the image object. */\n  ngf_image_info texture_image_info = {\n      .type = NGF_IMAGE_TYPE_IMAGE_2D,\n      .extent =\n          {\n              .width  = texture_staging_image.width_px,\n              .height = texture_staging_image.height_px,\n              .depth  = 1u,\n          },\n      .nmips        = texture_staging_image.nmax_mip_levels,\n      .nlayers      = 1u,\n      .format       = NGF_IMAGE_FORMAT_SRGBA8,\n      .sample_count = NGF_SAMPLE_COUNT_1,\n      .usage_hint   = NGF_IMAGE_USAGE_MIPMAP_GENERATION | NGF_IMAGE_USAGE_SAMPLE_FROM |\n                      NGF_IMAGE_USAGE_XFER_DST};\n  NGF_MISC_CHECK_NGF_ERROR(s->texture.initialize(texture_image_info));\n\n  /* Upload the data from the staging buffer into the 0th mip level of the texture. */\n  const ngf_image_write img_write = {\n      .src_offset = 0u,\n      .dst_offset = {.x = 0, .y = 0, .z = 0u},\n      .extent =\n          {.width = texture_staging_image.width_px, .height = texture_staging_image.height_px, .depth = 1u},\n      .dst_level      = 0u,\n      .dst_base_layer = 0u,\n      .nlayers        = 1u};\n  ngf_cmd_write_image(\n      xfer_encoder,\n      texture_staging_image.staging_buffer.get(),\n      s->texture.get(),\n      &img_write,\n      1u);\n\n\n  /* Populate the rest of the mip levels automatically. */\n  ngf_cmd_generate_mipmaps(xfer_encoder, s->texture.get());\n\n  /* Create the image sampler objects. */\n\n  /* Note that with the nearest-neighbor sampler, we constrain the min and max LOD to 0,\n     in order to limit ourselves to mip level 0 only and demonstrate the effect of sampling\n     without mips. */\n  NGF_MISC_CHECK_NGF_ERROR(s->samplers[0].initialize(ngf_sampler_info {\n      .min_filter        = NGF_FILTER_NEAREST,\n      .mag_filter        = NGF_FILTER_NEAREST,\n      .mip_filter        = NGF_FILTER_NEAREST,\n      .wrap_u            = NGF_WRAP_MODE_REPEAT,\n      .wrap_v            = NGF_WRAP_MODE_REPEAT,\n      .wrap_w            = NGF_WRAP_MODE_REPEAT,\n      .lod_max           = 0.0f,\n      .lod_min           = 0.0f,\n      .lod_bias          = 0.0f,\n      .max_anisotropy    = 0.0f,\n      .enable_anisotropy = false}));\n\n  /* Same comment as above regarding the min/max LOD applies in case of the bilinear sampler. */\n  NGF_MISC_CHECK_NGF_ERROR(s->samplers[1].initialize(ngf_sampler_info {\n      .min_filter        = NGF_FILTER_LINEAR,\n      .mag_filter        = NGF_FILTER_LINEAR,\n      .mip_filter        = NGF_FILTER_NEAREST,\n      .wrap_u            = NGF_WRAP_MODE_REPEAT,\n      .wrap_v            = NGF_WRAP_MODE_REPEAT,\n      .wrap_w            = NGF_WRAP_MODE_REPEAT,\n      .lod_max           = 0.0f,\n      .lod_min           = 0.0f,\n      .lod_bias          = 0.0f,\n      .max_anisotropy    = 0.0f,\n      .enable_anisotropy = false}));\n\n  NGF_MISC_CHECK_NGF_ERROR(s->samplers[2].initialize(ngf_sampler_info {\n      .min_filter        = NGF_FILTER_LINEAR,\n      .mag_filter        = NGF_FILTER_LINEAR,\n      .mip_filter        = NGF_FILTER_LINEAR,\n      .wrap_u            = NGF_WRAP_MODE_REPEAT,\n      .wrap_v            = NGF_WRAP_MODE_REPEAT,\n      .wrap_w            = NGF_WRAP_MODE_REPEAT,\n      .lod_max           = (float)texture_staging_image.nmax_mip_levels,\n      .lod_min           = 0.0f,\n      .lod_bias          = 0.0f,\n      .max_anisotropy    = 0.0f,\n      .enable_anisotropy = false}));\n\n  /* note that with anisotropic sampling, mipmaps are still needed because the\n     specific (hardware-dependent) implementation may access them. */\n  NGF_MISC_CHECK_NGF_ERROR(s->samplers[3].initialize(ngf_sampler_info {\n      .min_filter        = NGF_FILTER_LINEAR,\n      .mag_filter        = NGF_FILTER_LINEAR,\n      .mip_filter        = NGF_FILTER_LINEAR,\n      .wrap_u            = NGF_WRAP_MODE_REPEAT,\n      .wrap_v            = NGF_WRAP_MODE_REPEAT,\n      .wrap_w            = NGF_WRAP_MODE_REPEAT,\n      .lod_max           = (float)texture_staging_image.nmax_mip_levels,\n      .lod_min           = 0.0f,\n      .lod_bias          = 0.0f,\n      .max_anisotropy    = 16.0f,\n      .enable_anisotropy = true}));\n\n  /**\n   * Load the shader stages.\n   */\n  const ngf::shader_stage vertex_shader_stage =\n      load_shader_stage(\"textured-quad\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage fragment_shader_stage =\n      load_shader_stage(\"textured-quad\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  /**\n   * Prepare a template with some default values for pipeline initialization.\n   */\n  ngf_util_graphics_pipeline_data pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&pipeline_data);\n\n  /**\n   * Set shader stages.\n   */\n  pipeline_data.pipeline_info.nshader_stages   = 2;\n  pipeline_data.pipeline_info.shader_stages[0] = vertex_shader_stage.get();\n  pipeline_data.pipeline_info.shader_stages[1] = fragment_shader_stage.get();\n\n  /**\n   * Set multisampling state.\n   */\n  pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n\n  /**\n   * Set the compatible render target description.\n   */\n  pipeline_data.pipeline_info.compatible_rt_attachment_descs =\n      ngf_default_render_target_attachment_descs();\n\n  /**\n   * Initialize the pipeline object.\n   */\n  s->pipeline.initialize(pipeline_data.pipeline_info);\n\n  /**\n   * Create the uniform buffer.\n   */\n  s->uniforms.initialize(3);\n\n  return static_cast<void*>(s);\n}\n\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float /*time_delta*/,\n    ngf_frame_token /*token*/,\n    uint32_t w,\n    uint32_t h,\n    float /*time*/,\n    void* userdata) {\n  auto state = reinterpret_cast<texture_sampling::state*>(userdata);\n\n  /* Compute the perspective transform for the current frame. */\n  const nm::float4x4 camera_to_clip = nm::perspective(\n      nm::deg2rad(72.0f),\n      static_cast<float>(w) / static_cast<float>(h),\n      0.01f,\n      100.0f);\n\n  /* Build the world-to-camera transform for the current frame. */\n  nm::float4x4 world_to_camera =\n      nm::translation(nm::float3 {state->pan, 0.0f, state->dolly}) * nm::rotation_x(state->tilt);\n\n  /* Build the final transform matrices for this frame. */\n  texture_sampling::matrices uniforms_for_this_frame;\n  for (size_t i = 0; i < sizeof(uniforms_for_this_frame.m) / sizeof(uniforms_for_this_frame.m[0]);\n       ++i) {\n    const nm::float4x4 object_to_world =\n        nm::translation(nm::float3 {-3.0f + (float)i * 2.05f, 0.0f, 0.0f});\n    uniforms_for_this_frame.m[i].matrix = camera_to_clip * world_to_camera * object_to_world;\n  }\n  state->uniforms.write(uniforms_for_this_frame);\n\n  ngf_irect2d viewport {0, 0, w, h};\n  ngf_cmd_bind_gfx_pipeline(main_render_pass, state->pipeline);\n  ngf_cmd_viewport(main_render_pass, &viewport);\n  ngf_cmd_scissor(main_render_pass, &viewport);\n  for (uint32_t i = 0; i < sizeof(state->samplers) / sizeof(state->samplers[0]); ++i) {\n    ngf::cmd_bind_resources(\n        main_render_pass,\n        state->uniforms\n            .bind_op_at_current_offset(0, 0, 256 * i, sizeof(nm::float4x4)),\n        ngf::descriptor_set<0>::binding<1>::sampler(state->samplers[i]),\n        ngf::descriptor_set<1>::binding<0>::texture(state->texture));\n    ngf_cmd_draw(main_render_pass, false, 0, 6, 1);\n  }\n}\n\nvoid sample_pre_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_submit(void*) {\n}\n\nvoid sample_draw_ui(void* userdata) {\n  auto data = reinterpret_cast<texture_sampling::state*>(userdata);\n  ImGui::Begin(\"Camera control\");\n  ImGui::DragFloat(\"dolly\", &data->dolly, 0.01f, -70.0f, 0.11f);\n  ImGui::DragFloat(\"pan\", &data->pan, 0.01f, -70.0f, 70.0f);\n  ImGui::DragFloat(\"tilt\", &data->tilt, 0.01f, -(nm::PI / 2.0f + 0.01f), nm::PI / 2.0f + 0.01f);\n  ImGui::End();\n}\n\nvoid sample_shutdown(void* userdata) {\n  delete reinterpret_cast<texture_sampling::state*>(userdata);\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/05-cubemap/cubemap.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#define _CRT_SECURE_NO_WARNINGS\n#include \"check.h\"\n#include \"file-utils.h\"\n#include \"imgui.h\"\n#include \"logging.h\"\n#include \"nicegraf-util.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"nicemath.h\"\n#include \"sample-interface.h\"\n#include \"shader-loader.h\"\n#include \"targa-loader.h\"\n\n#include <stdio.h>\n#include <string>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nnamespace cubemap {\n\nstruct uniforms {\n  nm::float4x4 rotation;\n  float        aspect_ratio;\n};\n\nstruct state {\n  ngf::graphics_pipeline             pipeline;\n  ngf::image                         texture;\n  ngf::sampler                       sampler;\n  ngf::uniform_multibuffer<uniforms> uniforms_multibuf;\n  float                              yaw   = 0.0f;\n  float                              pitch = 0.0f;\n};\n\n}  // namespace cubemap\n\nvoid* sample_initialize(\n    uint32_t /*width*/,\n    uint32_t /*height*/,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder xfer_encoder) {\n  auto state = new cubemap::state {};\n\n  /* Load contents of cubemap faces into a staging buffer. */\n  uint32_t    face_width = 0, face_height = 0;\n  ngf::buffer staging_buffer;\n  char*       mapped_staging_buffer = nullptr;\n  uint32_t    staging_buffer_size   = 0u;\n  uint32_t    bytes_per_face        = 0u;\n  for (uint32_t face = NGF_CUBEMAP_FACE_POSITIVE_X; face < NGF_CUBEMAP_FACE_COUNT; face++) {\n    const std::string file_name = std::string(\"assets/cube0f\") + std::to_string(face) + \".tga\";\n    std::vector<char> cubemap_face_tga_data = load_file(file_name.c_str());\n    uint32_t          width, height;\n    load_targa(\n        cubemap_face_tga_data.data(),\n        cubemap_face_tga_data.size(),\n        nullptr,\n        0,\n        &width,\n        &height);\n    if (face_width == 0 && face_height == 0) {\n      face_width          = width;\n      face_height         = height;\n      bytes_per_face      = face_width * face_height * 4u;\n      staging_buffer_size = bytes_per_face * NGF_CUBEMAP_FACE_COUNT;\n      staging_buffer.initialize(ngf_buffer_info {\n          .size         = staging_buffer_size,\n          .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE,\n          .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC});\n      mapped_staging_buffer =\n          (char*)ngf_buffer_map_range(staging_buffer.get(), 0, staging_buffer_size);\n    } else if (face_width != width || face_height != height) {\n      loge(\"All faces of the cubemap must have the same dimensions\");\n      return nullptr;\n    }\n    std::vector<char> cubemap_face_rgba_data;\n    cubemap_face_rgba_data.resize(bytes_per_face);\n    load_targa(\n        cubemap_face_tga_data.data(),\n        cubemap_face_tga_data.size(),\n        cubemap_face_rgba_data.data(),\n        cubemap_face_rgba_data.size(),\n        &width,\n        &height);\n    memcpy(\n        mapped_staging_buffer + face * cubemap_face_rgba_data.size(),\n        cubemap_face_rgba_data.data(),\n        face_width * face_height * 4u);\n  }\n\n  /* Flush and unmap the staging buffer. */\n  ngf_buffer_flush_range(staging_buffer.get(), 0, staging_buffer_size);\n  ngf_buffer_unmap(staging_buffer.get());\n\n  /* Create the cubemap texture. */\n  NGF_MISC_CHECK_NGF_ERROR(state->texture.initialize(ngf_image_info {\n      .type         = NGF_IMAGE_TYPE_CUBE,\n      .extent       = ngf_extent3d {.width = face_width, .height = face_height, .depth = 1},\n      .nmips        = 1u,\n      .nlayers      = 1u,\n      .format       = NGF_IMAGE_FORMAT_SRGBA8,\n      .sample_count = NGF_SAMPLE_COUNT_1,\n      .usage_hint   = NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_XFER_DST}));\n\n  /* Populate the cubemap texture. */\n  const ngf_image_write img_write = {\n      .src_offset     = 0u,\n      .dst_offset     = {0, 0, 0},\n      .extent         = {face_width, face_height, 1u},\n      .dst_level      = 0u,\n      .dst_base_layer = 0u,\n      .nlayers        = NGF_CUBEMAP_FACE_COUNT};\n  ngf_cmd_write_image(xfer_encoder, staging_buffer.get(), state->texture.get(), &img_write, 1u);\n\n  /* Create the image sampler. */\n\n  /* Same comment as above regarding the min/max LOD applies in case of the bilinear sampler. */\n  NGF_MISC_CHECK_NGF_ERROR(state->sampler.initialize(ngf_sampler_info {\n      .min_filter        = NGF_FILTER_LINEAR,\n      .mag_filter        = NGF_FILTER_LINEAR,\n      .mip_filter        = NGF_FILTER_NEAREST,\n      .wrap_u            = NGF_WRAP_MODE_REPEAT,\n      .wrap_v            = NGF_WRAP_MODE_REPEAT,\n      .wrap_w            = NGF_WRAP_MODE_REPEAT,\n      .lod_max           = 0.0f,\n      .lod_min           = 0.0f,\n      .lod_bias          = 0.0f,\n      .max_anisotropy    = 0.0f,\n      .enable_anisotropy = false, .compare_op = NGF_COMPARE_OP_NEVER}));\n\n  /**\n   * Load the shader stages.\n   */\n  const ngf::shader_stage vertex_shader_stage =\n      load_shader_stage(\"cubemap\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage fragment_shader_stage =\n      load_shader_stage(\"cubemap\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  /**\n   * Prepare a template with some default values for pipeline initialization.\n   */\n  ngf_util_graphics_pipeline_data pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&pipeline_data);\n\n  /**\n   * Set shader stages.\n   */\n  pipeline_data.pipeline_info.nshader_stages   = 2;\n  pipeline_data.pipeline_info.shader_stages[0] = vertex_shader_stage.get();\n  pipeline_data.pipeline_info.shader_stages[1] = fragment_shader_stage.get();\n\n  /**\n   * Set multisampling state.\n   */\n  pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n\n  /**\n   * Set the compatible render target description.\n   */\n  pipeline_data.pipeline_info.compatible_rt_attachment_descs =\n      ngf_default_render_target_attachment_descs();\n\n  /**\n   * Initialize the pipeline object.\n   */\n  NGF_MISC_CHECK_NGF_ERROR(state->pipeline.initialize(pipeline_data.pipeline_info));\n\n  /**\n   * Create the uniform buffer.\n   */\n  NGF_MISC_CHECK_NGF_ERROR(state->uniforms_multibuf.initialize(3));\n\n  return static_cast<void*>(state);\n}\n\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float /*time_delta*/,\n    ngf_frame_token /*token*/,\n    uint32_t w,\n    uint32_t h,\n    float /*time*/,\n    void* userdata) {\n  auto state = reinterpret_cast<cubemap::state*>(userdata);\n\n  ngf_irect2d viewport {0, 0, w, h};\n  ngf_cmd_bind_gfx_pipeline(main_render_pass, state->pipeline);\n  ngf_cmd_viewport(main_render_pass, &viewport);\n  ngf_cmd_scissor(main_render_pass, &viewport);\n  state->uniforms_multibuf.write(\n      {nm::rotation_y(state->yaw) * nm::rotation_x(state->pitch), (float)w / (float)h});\n  ngf::cmd_bind_resources(\n      main_render_pass,\n      state->uniforms_multibuf.bind_op_at_current_offset(0, 0),\n      ngf::descriptor_set<0>::binding<1>::texture(state->texture.get()),\n      ngf::descriptor_set<0>::binding<2>::sampler(state->sampler.get()));\n  ngf_cmd_draw(main_render_pass, false, 0u, 3u, 1u);\n}\n\nvoid sample_pre_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_submit(void*) {\n}\n\nvoid sample_draw_ui(void* userdata) {\n  auto state = reinterpret_cast<cubemap::state*>(userdata);\n  ImGui::Begin(\"Cubemap\", nullptr, ImGuiWindowFlags_AlwaysAutoResize);\n  ImGui::SliderFloat(\"Pitch\", &state->pitch, -nm::PI, nm::PI);\n  ImGui::SliderFloat(\"Yaw\", &state->yaw, -nm::PI, nm::PI);\n  ImGui::Text(\"This sample uses textures by Emil Persson.\\n\"\n              \"Licensed under CC BY 3.0\\n\"\n              \"http://humus.name/index.php?page=Textures\");\n  ImGui::End();\n}\n\nvoid sample_shutdown(void* userdata) {\n  delete reinterpret_cast<cubemap::state*>(userdata);\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/06-vertex-attribs/vertex-attribs.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#define _CRT_SECURE_NO_WARNINGS\n#include \"check.h\"\n#include \"file-utils.h\"\n#include \"imgui.h\"\n#include \"logging.h\"\n#include \"nicegraf-util.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"nicemath.h\"\n#include \"sample-interface.h\"\n#include \"shader-loader.h\"\n#include \"targa-loader.h\"\n\n#include <stdio.h>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nnamespace vertex_attribs {\n\nstruct uniforms {\n  nm::float4x4 world_to_clip;\n  float        timestamp;\n};\n\nstruct state {\n  ngf::graphics_pipeline             pipeline;\n  ngf::image                         object_texture;\n  ngf::sampler                       trilinear_sampler;\n  ngf::uniform_multibuffer<uniforms> uniforms_multibuf;\n  ngf::buffer                        per_instance_data;\n  ngf::texel_buffer_view             per_instance_data_view;\n  ngf::buffer                        vertex_attrib_buffer;\n  ngf::buffer                        index_buffer;\n  float                              dolly = -130.0f;\n  float                              vfov  = 60.0f;\n};\n\n/**\n * The model instances are arraged in a (slightly perturbed) grid pattern,\n * this constant controls the size of the grid.\n */\nconstexpr int INSTANCES_GRID_SIZE = 128;\nconstexpr size_t INSTANCE_DATA_SIZE = sizeof(float) * 4u * INSTANCES_GRID_SIZE * INSTANCES_GRID_SIZE;\n\n/**\n * The model's raw vertex data (positions and UVs).\n * A dodecahedron.\n */\nfloat vertex_data[] = {\n    //clang-format off\n    0.577350f,  0.577350f,  -0.577350f, 0.727805f,  0.749509f,  0.356822f,  0.000000f,  -0.934172f,\n    0.727805f,  0.868727f,  0.000000f,  0.417775f,  -0.675973f, 0.645760f,  0.809118f,  0.000000f,\n    0.934172f,  -0.356822f, 0.614422f,  0.712668f,  0.934172f,  0.356822f,  0.000000f,  0.797880f,\n    0.653059f,  0.675973f,  0.000000f,  -0.417775f, 0.829219f,  0.749509f,  0.934172f,  -0.356822f,\n    0.000000f,  0.911264f,  0.689899f,  -0.577350f, -0.577350f, 0.577350f,  0.223582f,  0.285757f,\n    -0.934172f, -0.356822f, 0.000000f,  0.336965f,  0.248917f,  -0.417775f, -0.675974f, 0.000000f,\n    0.305627f,  0.345366f,  0.577350f,  -0.577350f, -0.577350f, 0.911264f,  0.809118f,  -0.577350f,\n    0.577350f,  0.577350f,  0.544347f,  0.497000f,  -0.356822f, 0.000000f,  0.934172f,  0.614422f,\n    0.400550f,  0.000000f,  0.417775f,  0.675973f,  0.645760f,  0.497000f,  -0.356822f, 0.000000f,\n    -0.934172f, 0.614422f,  0.905567f,  -0.356822f, 0.000000f,  -0.934172f, 0.520423f,  0.308526f,\n    -0.577350f, -0.577350f, -0.577350f, 0.407040f,  0.345366f,  -0.675974f, 0.000000f,  -0.417775f,\n    0.438378f,  0.248917f,  0.356822f,  0.000000f,  -0.934172f, 0.797880f,  0.845958f,  0.356822f,\n    0.000000f,  -0.934172f, 0.520423f,  0.501425f,  0.577350f,  -0.577350f, -0.577350f, 0.407040f,\n    0.538266f,  0.000000f,  -0.417775f, -0.675974f, 0.438378f,  0.441816f,  -0.577350f, 0.577350f,\n    -0.577350f, 0.544347f,  0.809118f,  0.417775f,  0.675973f,  0.000000f,  0.696467f,  0.653059f,\n    -0.356822f, 0.000000f,  0.934172f,  0.153507f,  0.189308f,  -0.675974f, 0.000000f,  0.417775f,\n    0.254920f,  0.189308f,  0.577350f,  -0.577350f, 0.577350f,  0.153507f,  0.501425f,  0.000000f,\n    -0.934172f, 0.356822f,  0.223582f,  0.404976f,  0.417775f,  -0.675973f, 0.000000f,  0.254920f,\n    0.501425f,  -0.577350f, 0.577350f,  0.577350f,  0.501039f,  0.556609f,  0.000000f,  0.934172f,\n    0.356822f,  0.614422f,  0.593450f,  -0.417775f, 0.675974f,  0.000000f,  0.532377f,  0.653059f,\n    0.000000f,  -0.417775f, 0.675974f,  0.141537f,  0.345366f,  0.577350f,  0.577350f,  0.577350f,\n    0.727805f,  0.556609f,  0.675974f,  0.000000f,  0.417775f,  0.829219f,  0.556609f,  -0.934172f,\n    0.356822f,  0.000000f,  0.430964f,  0.653059f,  -0.577350f, 0.577350f,  -0.577350f, 0.501039f,\n    0.749508f,  0.000000f,  -0.934172f, -0.356822f, 0.336965f,  0.441816f,  0.356822f,  0.000000f,\n    0.934172f,  0.727805f,  0.437391f,  -0.934172f, 0.356822f,  0.000000f,  0.407040f,  0.152467f,\n    -0.577350f, 0.577350f,  -0.577350f, 0.520423f,  0.189308f,  -0.356822f, 0.000000f,  -0.934172f,\n    0.520423f,  0.382207f,  -0.577350f, 0.577350f,  0.577350f,  0.223582f,  0.092858f,  -0.934172f,\n    0.356822f,  0.000000f,  0.336965f,  0.129698f,  0.577350f,  -0.577350f, -0.577350f, 0.336965f,\n    0.561035f,  0.934172f,  -0.356822f, 0.000000f,  0.223582f,  0.597875f,  0.577350f,  -0.577350f,\n    0.577350f,  0.110198f,  0.441816f,  0.356822f,  0.000000f,  0.934172f,  0.040124f,  0.345366f,\n    -0.356822f, 0.000000f,  0.934172f,  0.110198f,  0.248917f,  0.356822f,  0.000000f,  0.934172f,\n    0.797880f,  0.460159f,  0.577350f,  -0.577350f, 0.577350f,  0.911264f,  0.497000f,  0.934172f,\n    -0.356822f, 0.000000f,  0.911264f,  0.616218f\n    //clang-format on\n};\n\n/**\n * The model's index buffer.\n */\nuint32_t index_data[] = {\n    //clang-format off\n    0,  1,  2,  3,  0,  2,  0,  4,  5,  4,  6,  5,  7,  8,  9,  6,  10, 5,  11, 12, 13, 1,  14,\n    2,  15, 16, 17, 18, 0,  5,  19, 20, 21, 22, 3,  2,  0,  3,  23, 14, 22, 2,  7,  24, 25, 10,\n    18, 5,  26, 27, 28, 29, 30, 31, 7,  27, 32, 30, 3,  31, 4,  33, 34, 35, 29, 31, 3,  36, 31,\n    36, 35, 31, 8,  16, 9,  16, 37, 9,  37, 27, 9,  27, 7,  9,  12, 38, 13, 38, 33, 13, 33, 30,\n    13, 30, 11, 13, 16, 8,  17, 8,  39, 17, 39, 40, 17, 40, 15, 17, 20, 37, 21, 37, 16, 21, 16,\n    41, 21, 41, 19, 21, 3,  30, 23, 30, 33, 23, 33, 4,  23, 4,  0,  23, 24, 42, 25, 42, 43, 25,\n    43, 8,  25, 8,  7,  25, 27, 37, 28, 37, 44, 28, 44, 45, 28, 45, 26, 28, 27, 46, 32, 46, 47,\n    32, 47, 48, 32, 48, 7,  32, 33, 49, 34, 49, 50, 34, 50, 51, 34, 51, 4,  34,\n    //clang-format on\n};\n\n}  // namespace vertex_attribs\n\nvoid* sample_initialize(\n    uint32_t /*width*/,\n    uint32_t /*height*/,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder xfer_encoder) {\n  auto state = new vertex_attribs::state {};\n\n  /**\n   * Load the shader stages.\n   */\n  const ngf::shader_stage vertex_shader_stage =\n      load_shader_stage(\"instancing\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage fragment_shader_stage =\n      load_shader_stage(\"instancing\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  /**\n   * Prepare a template with some default values for pipeline initialization.\n   */\n  ngf_util_graphics_pipeline_data pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&pipeline_data);\n\n  /**\n   * Set shader stages.\n   */\n  pipeline_data.pipeline_info.nshader_stages   = 2;\n  pipeline_data.pipeline_info.shader_stages[0] = vertex_shader_stage.get();\n  pipeline_data.pipeline_info.shader_stages[1] = fragment_shader_stage.get();\n\n  /**\n   * Set multisampling state.\n   */\n  pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n\n  /**\n   * Enable depth testing and depth write.\n   */\n  pipeline_data.depth_stencil_info.depth_test  = true;\n  pipeline_data.depth_stencil_info.depth_write = true;\n\n  /**\n   * Set the compatible render target description.\n   */\n  pipeline_data.pipeline_info.compatible_rt_attachment_descs =\n      ngf_default_render_target_attachment_descs();\n\n  /**\n   * Set up vertex attributes.\n   */\n\n  /* attribute descriptions indicate the location and format of individual vertex attributes. */\n  const ngf_vertex_attrib_desc vertex_attrib_descriptions[] = {\n      {/* position. */\n       .location   = 0u,\n       .binding    = 0u,\n       .offset     = 0u,\n       .type       = NGF_TYPE_FLOAT,\n       .size       = 3u,\n       .normalized = false},\n      {/* UV coordinate. */\n       .location   = 1u,\n       .binding    = 0u,\n       .offset     = 3u * sizeof(float),\n       .type       = NGF_TYPE_FLOAT,\n       .size       = 2u,\n       .normalized = false}};\n\n  /* buffer binding descriptions indicate _how_ the attributes are fetched from a buffer. */\n  const ngf_vertex_buf_binding_desc vertex_buf_binding_descriptions[] = {{\n      .binding    = 0u,\n      .stride     = sizeof(float) * (3u + 2u),\n      .input_rate = NGF_INPUT_RATE_VERTEX,\n  }};\n\n  pipeline_data.vertex_input_info.nattribs =\n      sizeof(vertex_attrib_descriptions) / sizeof(vertex_attrib_descriptions[0]);\n  pipeline_data.vertex_input_info.attribs = vertex_attrib_descriptions;\n  pipeline_data.vertex_input_info.nvert_buf_bindings =\n      sizeof(vertex_buf_binding_descriptions) / sizeof(vertex_buf_binding_descriptions[0]);\n  pipeline_data.vertex_input_info.vert_buf_bindings = vertex_buf_binding_descriptions;\n\n  /**\n   * Initialize the pipeline object.\n   */\n  NGF_MISC_CHECK_NGF_ERROR(state->pipeline.initialize(pipeline_data.pipeline_info));\n\n  /**\n   * Create and populate the vertex and index buffers.\n   */\n  const ngf_buffer_info vertex_buffer_info = {\n      .size         = sizeof(vertex_attribs::vertex_data),\n      .storage_type = NGF_BUFFER_STORAGE_DEVICE_LOCAL,\n      .buffer_usage = NGF_BUFFER_USAGE_VERTEX_BUFFER | NGF_BUFFER_USAGE_XFER_DST,\n  };\n  const ngf_buffer_info index_buffer_info = {\n      .size         = sizeof(vertex_attribs::index_data),\n      .storage_type = NGF_BUFFER_STORAGE_DEVICE_LOCAL,\n      .buffer_usage = NGF_BUFFER_USAGE_INDEX_BUFFER | NGF_BUFFER_USAGE_XFER_DST,\n  };\n  const ngf_buffer_info vertex_staging_buffer_info = {\n      .size         = vertex_buffer_info.size,\n      .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE,\n      .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC,\n  };\n  const ngf_buffer_info index_staging_buffer_info = {\n      .size         = index_buffer_info.size,\n      .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE,\n      .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC,\n  };\n  ngf::buffer vertex_staging_buffer;\n  NGF_MISC_CHECK_NGF_ERROR(vertex_staging_buffer.initialize(vertex_staging_buffer_info));\n  ngf::buffer index_staging_buffer;\n  NGF_MISC_CHECK_NGF_ERROR(index_staging_buffer.initialize(index_staging_buffer_info));\n  NGF_MISC_CHECK_NGF_ERROR(state->vertex_attrib_buffer.initialize(vertex_buffer_info));\n  NGF_MISC_CHECK_NGF_ERROR(state->index_buffer.initialize(index_buffer_info));\n  void* mapped_vertex_buffer =\n      ngf_buffer_map_range(vertex_staging_buffer.get(), 0u, vertex_staging_buffer_info.size);\n  void* mapped_index_buffer =\n      ngf_buffer_map_range(index_staging_buffer.get(), 0u, index_staging_buffer_info.size);\n  memcpy(mapped_vertex_buffer, vertex_attribs::vertex_data, vertex_staging_buffer_info.size);\n  memcpy(mapped_index_buffer, vertex_attribs::index_data, index_staging_buffer_info.size);\n  ngf_buffer_flush_range(vertex_staging_buffer.get(), 0, vertex_staging_buffer_info.size);\n  ngf_buffer_flush_range(index_staging_buffer.get(), 0, index_staging_buffer_info.size);\n  ngf_buffer_unmap(vertex_staging_buffer.get());\n  ngf_buffer_unmap(index_staging_buffer.get());\n  ngf_cmd_copy_buffer(\n      xfer_encoder,\n      vertex_staging_buffer.get(),\n      state->vertex_attrib_buffer.get(),\n      vertex_buffer_info.size,\n      0,\n      0);\n  ngf_cmd_copy_buffer(\n      xfer_encoder,\n      index_staging_buffer.get(),\n      state->index_buffer.get(),\n      index_buffer_info.size,\n      0,\n      0);\n\n  /**\n   * Create and populate per-instance data.\n   */\n  const ngf_buffer_info instance_data_buffer_info = {\n      .size         = vertex_attribs::INSTANCE_DATA_SIZE,\n      .storage_type = NGF_BUFFER_STORAGE_DEVICE_LOCAL,\n      .buffer_usage = NGF_BUFFER_USAGE_TEXEL_BUFFER | NGF_BUFFER_USAGE_XFER_DST,\n  };\n  const ngf_buffer_info instance_data_staging_buffer_info = {\n      .size         = instance_data_buffer_info.size,\n      .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE,\n      .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC};\n  ngf::buffer instance_data_staging_buffer;\n  NGF_MISC_CHECK_NGF_ERROR(instance_data_staging_buffer.initialize(instance_data_staging_buffer_info));\n  NGF_MISC_CHECK_NGF_ERROR(state->per_instance_data.initialize(instance_data_buffer_info));\n  const ngf_texel_buffer_view_info instance_data_view_info = {\n    .buffer = state->per_instance_data.get(),\n    .offset = 0u,\n    .size = instance_data_buffer_info.size,\n    .texel_format = NGF_IMAGE_FORMAT_RGBA32F\n  };\n  NGF_MISC_CHECK_NGF_ERROR(state->per_instance_data_view.initialize(instance_data_view_info));\n  auto mapped_per_instance_staging_buffer = (float*)ngf_buffer_map_range(\n      instance_data_staging_buffer.get(),\n      0,\n      instance_data_staging_buffer_info.size);\n  for (uint32_t r = 0; r < vertex_attribs::INSTANCES_GRID_SIZE; ++r) {\n    for (uint32_t c = 0; c < vertex_attribs::INSTANCES_GRID_SIZE; ++c) {\n      const uint32_t idx = r * (vertex_attribs::INSTANCES_GRID_SIZE) + c;\n      assert(idx < instance_data_staging_buffer_info.size);\n      float*          p            = &mapped_per_instance_staging_buffer[4 * idx];\n      constexpr float grid_offset  = -static_cast<float>(vertex_attribs::INSTANCES_GRID_SIZE >> 1);\n      constexpr float grid_spacing = 4.0f;\n      p[0]                         = grid_offset * grid_spacing + grid_spacing * (float)c +\n             0.75f * (2.0f * (float)rand() / static_cast<float>(RAND_MAX) - 1.0f);\n      p[2] = grid_offset * (float)grid_spacing + grid_spacing * (float)r +\n             0.75f * (2.0f * (float)rand() / static_cast<float>(RAND_MAX) - 1.0f);\n      p[1] = (2.0f * (float)rand() / static_cast<float>(RAND_MAX) - 1.0f);\n    }\n  }\n  ngf_buffer_flush_range(\n      instance_data_staging_buffer.get(),\n      0,\n      instance_data_staging_buffer_info.size);\n  ngf_buffer_unmap(instance_data_staging_buffer.get());\n  ngf_cmd_copy_buffer(\n      xfer_encoder,\n      instance_data_staging_buffer,\n      state->per_instance_data,\n      instance_data_buffer_info.size,\n      0,\n      0);\n\n  /**\n   * Create the uniform buffer.\n   */\n  NGF_MISC_CHECK_NGF_ERROR(state->uniforms_multibuf.initialize(3));\n\n  /* Load contents of the model's texture into a staging buffer. */\n  char              file_name[] = \"assets/dodecahedron.tga\";\n  ngf::buffer       staging_buffer;\n  std::vector<char> cubemap_face_tga_data = load_file(file_name);\n  uint32_t          texture_width, texture_height;\n  load_targa(\n      cubemap_face_tga_data.data(),\n      cubemap_face_tga_data.size(),\n      nullptr,\n      0,\n      &texture_width,\n      &texture_height);\n  const uint32_t staging_buffer_size = 4u * texture_width * texture_height;\n  staging_buffer.initialize(ngf_buffer_info {\n      .size         = staging_buffer_size,\n      .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE,\n      .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC});\n  auto mapped_staging_buffer =\n      (char*)ngf_buffer_map_range(staging_buffer.get(), 0, staging_buffer_size);\n  std::vector<char> texture_rgba_data;\n  texture_rgba_data.resize(staging_buffer_size);\n  load_targa(\n      cubemap_face_tga_data.data(),\n      cubemap_face_tga_data.size(),\n      texture_rgba_data.data(),\n      texture_rgba_data.size(),\n      &texture_width,\n      &texture_height);\n  memcpy(mapped_staging_buffer, texture_rgba_data.data(), staging_buffer_size);\n\n  /* Flush and unmap the staging buffer. */\n  ngf_buffer_flush_range(staging_buffer.get(), 0, staging_buffer_size);\n  ngf_buffer_unmap(staging_buffer.get());\n\n  /* Create the texture. */\n  const uint32_t nmips =\n      1 + static_cast<uint32_t>(std::floor(std::log2(std::max(texture_width, texture_height))));\n  NGF_MISC_CHECK_NGF_ERROR(state->object_texture.initialize(ngf_image_info {\n      .type         = NGF_IMAGE_TYPE_IMAGE_2D,\n      .extent       = ngf_extent3d {.width = texture_width, .height = texture_height, .depth = 1},\n      .nmips        = nmips,\n      .nlayers      = 1u,\n      .format       = NGF_IMAGE_FORMAT_SRGBA8,\n      .sample_count = NGF_SAMPLE_COUNT_1,\n      .usage_hint   = NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_XFER_DST |\n                    NGF_IMAGE_USAGE_MIPMAP_GENERATION}));\n\n  /* Populate the texture. */\n  const ngf_image_write img_write = {\n      .src_offset     = 0u,\n      .dst_offset     = {0, 0, 0},\n      .extent         = {texture_width, texture_height, 1u},\n      .dst_level      = 0u,\n      .dst_base_layer = 0u,\n      .nlayers        = 1u};\n  ngf_cmd_write_image(\n      xfer_encoder,\n      staging_buffer.get(),\n      state->object_texture.get(),\n      &img_write,\n      1u);\n  ngf_cmd_generate_mipmaps(xfer_encoder, state->object_texture);\n\n  /* Create the image sampler. */\n  NGF_MISC_CHECK_NGF_ERROR(state->trilinear_sampler.initialize(ngf_sampler_info {\n      .min_filter        = NGF_FILTER_LINEAR,\n      .mag_filter        = NGF_FILTER_LINEAR,\n      .mip_filter        = NGF_FILTER_LINEAR,\n      .wrap_u            = NGF_WRAP_MODE_REPEAT,\n      .wrap_v            = NGF_WRAP_MODE_REPEAT,\n      .wrap_w            = NGF_WRAP_MODE_REPEAT,\n      .lod_max           = (float)nmips,\n      .lod_min           = 0.0f,\n      .lod_bias          = 0.0f,\n      .max_anisotropy    = 16.0f,\n      .enable_anisotropy = true}));\n\n  return static_cast<void*>(state);\n}\n\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float              time_delta,\n    ngf_frame_token /*token*/,\n    uint32_t w,\n    uint32_t h,\n    float /*time*/,\n    void* userdata) {\n  static float t = .0f;\n  t += time_delta;\n  auto state = reinterpret_cast<vertex_attribs::state*>(userdata);\n\n  ngf_irect2d viewport {0, 0, w, h};\n  ngf_cmd_bind_gfx_pipeline(main_render_pass, state->pipeline);\n  ngf_cmd_viewport(main_render_pass, &viewport);\n  ngf_cmd_scissor(main_render_pass, &viewport);\n  ngf_cmd_bind_attrib_buffer(main_render_pass, state->vertex_attrib_buffer, 0, 0);\n  ngf_cmd_bind_index_buffer(main_render_pass, state->index_buffer, 0u, NGF_TYPE_UINT32);\n  state->uniforms_multibuf.write(\n      {nm::perspective(\n           nm::deg2rad(state->vfov),\n           static_cast<float>(w) / static_cast<float>(h),\n           0.01f,\n           1000.0f) *\n           nm::look_at(\n               nm::float3 {0.0f, 50.0f, state->dolly},\n               nm::float3 {.0f, .0f, .0f},\n               nm::float3 {.0f, 1.0f, .0f}),\n       t});\n  ngf::cmd_bind_resources(\n      main_render_pass,\n      state->uniforms_multibuf.bind_op_at_current_offset(0, 0),\n      ngf::descriptor_set<0>::binding<1>::texel_buffer(state->per_instance_data_view.get()),\n      ngf::descriptor_set<0>::binding<2>::texture(state->object_texture.get()),\n      ngf::descriptor_set<0>::binding<3>::sampler(state->trilinear_sampler.get()));\n  ngf_cmd_draw(\n      main_render_pass,\n      true,\n      0u,\n      sizeof(vertex_attribs::index_data) / sizeof(vertex_attribs::index_data[0]),\n      128 * 128);\n}\n\nvoid sample_pre_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_submit(void*) {\n}\n\nvoid sample_draw_ui(void* userdata) {\n  auto data = reinterpret_cast<vertex_attribs::state*>(userdata);\n  ImGui::Begin(\"Camera control\");\n  ImGui::DragFloat(\"dolly\", &data->dolly, 0.01f, -500.0f, 1.0f);\n  ImGui::DragFloat(\"fov\", &data->vfov, 0.08f, 25.0f, 90.0f);\n  ImGui::End();\n}\n\nvoid sample_shutdown(void* userdata) {\n  delete reinterpret_cast<vertex_attribs::state*>(userdata);\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/07-blinn-phong/blinn-phong.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#define _CRT_SECURE_NO_WARNINGS\n#include \"camera-controller.h\"\n#include \"check.h\"\n#include \"imgui.h\"\n#include \"logging.h\"\n#include \"nicegraf-util.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"nicemath.h\"\n#include \"sample-interface.h\"\n#include \"shader-loader.h\"\n#include \"mesh-loader.h\"\n\n#include <stdio.h>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nnamespace blinn_phong {\n\nstruct light_data {\n  nm::float4 ambient_light_intensity { 0.01f, 0.02f, 0.03f, 0.0f };\n  nm::float4 obj_space_point_light_position { 0.0f, 0.0f, 2.0f, 1.0f };\n  nm::float4 point_light_intensity { 0.6f, 0.5f, 0.3f, 1.0f };\n  nm::float4 obj_space_directional_light_direction { 0.0f, -1.0f, 0.5f, 0.0f };\n  nm::float4 directional_light_intensity { 0.2f, 0.3f, 0.5f, 1.0f };\n};\n\nstruct material_data {\n  nm::float4 diffuse_reflectance { 0.9f, 0.9f, 0.9f, 1.0f};\n  nm::float4 specular_coefficient { 1.0f, 1.0f, 1.0f, 1.0f };\n  float      shininess = 125.0f;\n};\n\nstruct uniforms {\n  camera_matrices cam_matrices;\n  light_data    lights;\n  material_data material;\n};\n\nstruct state {\n  ngf::graphics_pipeline             vanilla_pipeline;\n  ngf::graphics_pipeline             half_lambert_pipeline;\n  mesh                               bunny_mesh;\n  light_data                         lights;\n  material_data                      material;\n  ngf::uniform_multibuffer<uniforms> uniforms_multibuf;\n  camera_state                       camera;\n  float                              dolly = 3.0f;\n  float                              vfov  = 60.0f;\n  bool                               enable_half_lambert = true;\n};\n\n}  // namespace blinn_phong\n\nvoid* sample_initialize(\n    uint32_t /*width*/,\n    uint32_t /*height*/,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder xfer_encoder) {\n  auto state = new blinn_phong::state {};\n\n  /**\n   * Load the shader stages.\n   */\n  const ngf::shader_stage vertex_shader_stage =\n      load_shader_stage(\"blinn-phong\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage fragment_shader_stage =\n      load_shader_stage(\"blinn-phong\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  /**\n   * Prepare a template with some default values for pipeline initialization.\n   */\n  ngf_util_graphics_pipeline_data pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&pipeline_data);\n\n  /**\n   * Set shader stages.\n   */\n  pipeline_data.pipeline_info.nshader_stages   = 2;\n  pipeline_data.pipeline_info.shader_stages[0] = vertex_shader_stage.get();\n  pipeline_data.pipeline_info.shader_stages[1] = fragment_shader_stage.get();\n\n  /**\n   * Set multisampling state.\n   */\n  pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n\n  /**\n   * Enable depth testing and depth write.\n   */\n  pipeline_data.depth_stencil_info.depth_test  = true;\n  pipeline_data.depth_stencil_info.depth_write = true;\n\n  /**\n   * Set the compatible render target description.\n   */\n  pipeline_data.pipeline_info.compatible_rt_attachment_descs =\n      ngf_default_render_target_attachment_descs();\n\n  /**\n   * Set up vertex attributes.\n   */\n\n  /* attribute descriptions indicate the location and format of individual vertex attributes. */\n  const ngf_vertex_attrib_desc vertex_attrib_descriptions[] = {\n      {/* position. */\n       .location   = 0u,\n       .binding    = 0u,\n       .offset     = 0u,\n       .type       = NGF_TYPE_FLOAT,\n       .size       = 3u,\n       .normalized = false},\n      {/* normal. */\n       .location   = 1u,\n       .binding    = 0u,\n       .offset     = 3u * sizeof(float),\n       .type       = NGF_TYPE_FLOAT,\n       .size       = 3u,\n       .normalized = false},\n  };\n\n  /**\n   * Note that the displayed model has positions, normals and UV coordinates,\n   * however we only use positions and normals in this sample. We still have\n   * to account for the UV coordinates when providing the stride for the vertex\n   * attribute binding.\n   */\n  const ngf_vertex_buf_binding_desc vertex_buf_binding_descriptions[] = {{\n      .binding    = 0u,\n      .stride     = sizeof(float) * (3u + 3u + 2u),\n      .input_rate = NGF_INPUT_RATE_VERTEX,\n  }};\n\n  pipeline_data.vertex_input_info.nattribs =\n      sizeof(vertex_attrib_descriptions) / sizeof(vertex_attrib_descriptions[0]);\n  pipeline_data.vertex_input_info.attribs = vertex_attrib_descriptions;\n  pipeline_data.vertex_input_info.nvert_buf_bindings =\n      sizeof(vertex_buf_binding_descriptions) / sizeof(vertex_buf_binding_descriptions[0]);\n  pipeline_data.vertex_input_info.vert_buf_bindings = vertex_buf_binding_descriptions;\n\n  /**\n   * Initialize the \"vanilla\" pipeline object.\n   */\n  NGF_MISC_CHECK_NGF_ERROR(state->vanilla_pipeline.initialize(pipeline_data.pipeline_info));\n\n  /**\n   * Set the appropriate specialization constant and initialize the half-lambert pipeline object.\n   */\n   const ngf_constant_specialization half_lambert_spec = {\n    .constant_id = 0,\n    .offset = 0,\n    .type = NGF_TYPE_UINT32\n   };\n   int half_lambert_spec_value = 1;\n   pipeline_data.spec_info.nspecializations = 1;\n   pipeline_data.spec_info.specializations =  &half_lambert_spec;\n   pipeline_data.spec_info.value_buffer = &half_lambert_spec_value;\n   NGF_MISC_CHECK_NGF_ERROR(state->half_lambert_pipeline.initialize(pipeline_data.pipeline_info));\n\n  /**\n   * Load the model from a file.\n   */\n  state->bunny_mesh = load_mesh_from_file(\"assets/bunny.mesh\", xfer_encoder);\n  NGF_MISC_ASSERT(state->bunny_mesh.have_normals);\n  NGF_MISC_ASSERT(state->bunny_mesh.num_indices > 0u);\n\n  /**\n   * Create the uniform buffer.\n   */\n  NGF_MISC_CHECK_NGF_ERROR(state->uniforms_multibuf.initialize(3));\n\n  /**\n   * Set up some initial viewing parameters.\n   */\n   state->camera.look_at[1] = 1.0f;\n  return static_cast<void*>(state);\n}\n\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float              /*time_delta*/,\n    ngf_frame_token /*token*/,\n    uint32_t w,\n    uint32_t h,\n    float /*time*/,\n    void* userdata) {\n  auto state = reinterpret_cast<blinn_phong::state*>(userdata);\n\n  ngf_irect2d viewport {0, 0, w, h};\n  ngf_cmd_bind_gfx_pipeline(main_render_pass, state->enable_half_lambert ? state->half_lambert_pipeline : state->vanilla_pipeline);\n  ngf_cmd_viewport(main_render_pass, &viewport);\n  ngf_cmd_scissor(main_render_pass, &viewport);\n  ngf_cmd_bind_attrib_buffer(main_render_pass, state->bunny_mesh.vertex_data.get(), 0, 0);\n  ngf_cmd_bind_index_buffer(main_render_pass, state->bunny_mesh.index_data.get(), 0, NGF_TYPE_UINT32);\n  blinn_phong::uniforms uniforms;\n  uniforms.cam_matrices = compute_camera_matrices(state->camera, \n           static_cast<float>(w) / static_cast<float>(h));\n  uniforms.material = state->material;\n  uniforms.lights = state->lights;\n  uniforms.lights.obj_space_point_light_position =\n    uniforms.cam_matrices.world_to_view_transform * uniforms.lights.obj_space_point_light_position;\n  uniforms.lights.obj_space_directional_light_direction =\n    uniforms.cam_matrices.world_to_view_transform * uniforms.lights.obj_space_directional_light_direction;\n  state->uniforms_multibuf.write(uniforms);\n  ngf::cmd_bind_resources(\n      main_render_pass,\n      state->uniforms_multibuf.bind_op_at_current_offset(0, 0));\n  ngf_cmd_draw(\n      main_render_pass,\n      true,\n      0u,\n      (uint32_t)state->bunny_mesh.num_indices,\n      1u);\n}\n\nvoid sample_pre_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_submit(void*) {}\n\nvoid sample_draw_ui(void* userdata) {\n  auto data = reinterpret_cast<blinn_phong::state*>(userdata);\n  ImGui::Begin(\"Controls\");\n  ImGui::Separator();\n  ImGui::Checkbox(\"enable half-lambert trick\", &data->enable_half_lambert);\n  ImGui::Separator();\n  camera_ui(data->camera, std::make_pair(-5.f, 5.f), .1f, std::make_pair(1.0f, 10.0f), .1f);\n  ImGui::Separator();\n  ImGui::Text(\"point light\");\n  ImGui::DragFloat3(\"position\", data->lights.obj_space_point_light_position.data, 0.1f, -2.0f, 2.0f, \"%.1f\", 0);\n  ImGui::ColorEdit3(\"intensity##0\", data->lights.point_light_intensity.data);\n  ImGui::Text(\"directional light\");\n  ImGui::DragFloat3(\"direction\", data->lights.obj_space_directional_light_direction.data, 0.1f, -2.0f, 2.0f, \"%.1f\", 0);\n  ImGui::ColorEdit3(\"intensity##1\", data->lights.directional_light_intensity.data);\n  ImGui::Text(\"ambient light\");\n  ImGui::ColorEdit3(\"intensity##2\", data->lights.ambient_light_intensity.data);\n  ImGui::Separator();\n  ImGui::Text(\"material\");\n  ImGui::ColorEdit3(\"diffuse reflectance\", data->material.diffuse_reflectance.data);\n  ImGui::ColorEdit3(\"specular coefficient\", data->material.specular_coefficient.data);\n  ImGui::SliderFloat(\"shininess\", &data->material.shininess, 0.1f, 1000.0f, \"%.1f\", 0);\n  ImGui::End();\n}\n\nvoid sample_shutdown(void* userdata) {\n  delete reinterpret_cast<blinn_phong::state*>(userdata);\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/08-image-arrays/image-arrays.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#define _CRT_SECURE_NO_WARNINGS\n#include \"camera-controller.h\"\n#include \"check.h\"\n#include \"imgui.h\"\n#include \"logging.h\"\n#include \"nicegraf-util.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"nicemath.h\"\n#include \"sample-interface.h\"\n#include \"shader-loader.h\"\n#include \"staging-image.h\"\n\n#include <stdio.h>\n#include <string>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nnamespace image_arrays {\n\nstruct img_array_uniforms {\n  nm::float4x4 matrix;\n  float        image_array_idx = 0.0f;\n  uint32_t     index           = 0u;\n};\n\nstruct multiple_imgs_uniforms {\n  nm::float4x4 matrix;\n  uint32_t     index = 0u;\n};\n\nstruct cube_array_uniforms {\n  nm::float4x4 matrix;\n  float        aspect    = 1.0f;\n  float        array_idx = 0.0f;\n};\n\nconstexpr int NUM_IMAGE_LAYERS = 4;\nstruct state {\n  ngf::graphics_pipeline                           img_array_pipeline;\n  ngf::graphics_pipeline                           cubemap_array_pipeline;\n  ngf::graphics_pipeline                           multiple_images_pipeline;\n  ngf::image                                       image_array;\n  ngf::image                                       cubemap_array;\n  ngf::image                                       multiple_images[NUM_IMAGE_LAYERS];\n  ngf::sampler                                     image_sampler;\n  ngf::uniform_multibuffer<img_array_uniforms>     img_array_uniforms_multibuf;\n  ngf::uniform_multibuffer<cube_array_uniforms>    cube_array_uniforms_multibuf;\n  ngf::uniform_multibuffer<multiple_imgs_uniforms> multi_img_uniforms_multibuf;\n  float                                            dolly             = -5.0f;\n  float                                            image_array_idx   = 0.0f;\n  float                                            cubemap_array_idx = 0.0f;\n  uint32_t                                         image_idx         = 0;\n  float                                            yaw               = 0.0f;\n  float                                            pitch             = 0.0f;\n};\n\n}  // namespace image_arrays\n\nvoid* sample_initialize(\n    uint32_t /*width*/,\n    uint32_t /*height*/,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder xfer_encoder) {\n  auto state = new image_arrays::state {};\n\n  /**\n   * Create staging buffers for all the layers in the array.\n   */\n  staging_image staging_images[image_arrays::NUM_IMAGE_LAYERS];\n  uint32_t      image_array_width = 0, image_array_height = 0, nmips = 0;\n  for (uint32_t i = 0; i < image_arrays::NUM_IMAGE_LAYERS; ++i) {\n    const std::string file_name = std::string(\"assets/imgarr\") + std::to_string(i) + \".tga\";\n    staging_images[i]           = create_staging_image_from_tga(file_name.c_str());\n    /** Ensure the dimensions of the image are valid. */\n    if (i > 0 && (staging_images[i].width_px != image_array_width ||\n                  staging_images[i].height_px != image_array_height)) {\n      loge(\"all images in the array must have the same dimensions\");\n      return nullptr;\n    } else {\n      image_array_width  = staging_images[i].width_px;\n      image_array_height = staging_images[i].height_px;\n      nmips              = staging_images[i].nmax_mip_levels;\n    }\n  }\n\n  /**\n   * Create the image object with several array layers.\n   */\n  ngf_image_info image_array_info = {\n      .type = NGF_IMAGE_TYPE_IMAGE_2D,\n      .extent =\n          {\n              .width  = image_array_width,\n              .height = image_array_height,\n              .depth  = 1u,\n          },\n      .nmips        = nmips,\n      .nlayers      = image_arrays::NUM_IMAGE_LAYERS,\n      .format       = NGF_IMAGE_FORMAT_SRGBA8,\n      .sample_count = NGF_SAMPLE_COUNT_1,\n      .usage_hint   = NGF_IMAGE_USAGE_MIPMAP_GENERATION | NGF_IMAGE_USAGE_SAMPLE_FROM |\n                    NGF_IMAGE_USAGE_XFER_DST};\n  NGF_MISC_CHECK_NGF_ERROR(state->image_array.initialize(image_array_info));\n\n  /**\n   * Initialize individual array members for the descriptor with multiple images.\n   */\n  image_array_info.nlayers = 1u;\n  for (uint32_t i = 0u; i < image_arrays::NUM_IMAGE_LAYERS; ++i) {\n    NGF_MISC_CHECK_NGF_ERROR(state->multiple_images[i].initialize(image_array_info));\n  }\n\n  /**\n   * Populate the first mip level for each layer of each image.\n   */\n  for (uint32_t i = 0; i < image_arrays::NUM_IMAGE_LAYERS; ++i) {\n    const ngf_image_write img_array_write = {\n        .src_offset     = 0u,\n        .dst_offset     = {0, 0, 0},\n        .extent         = {image_array_width, image_array_height, 1u},\n        .dst_base_layer = i,\n        .nlayers        = 1u};\n    ngf_cmd_write_image(\n        xfer_encoder,\n        staging_images[i].staging_buffer.get(),\n        state->image_array.get(),\n        &img_array_write,\n        1u);\n    const ngf_image_write img_write = {\n        .src_offset     = 0u,\n        .dst_offset     = {0, 0, 0},\n        .extent         = {image_array_width, image_array_height, 1u},\n        .dst_base_layer = 0,\n        .nlayers        = 1u};\n    ngf_cmd_write_image(\n        xfer_encoder,\n        staging_images[i].staging_buffer.get(),\n        state->multiple_images[i].get(),\n        &img_write,\n        1u);\n    ngf_cmd_generate_mipmaps(xfer_encoder, state->multiple_images[i].get());\n  }\n\n  /** Populate the rest of the mip levels automatically. **/\n  ngf_cmd_generate_mipmaps(xfer_encoder, state->image_array.get());\n\n  /** Create a cubemap object with several array layers. */\n  ngf_image_info cubemap_array_info = {\n      .type = NGF_IMAGE_TYPE_CUBE,\n      .extent =\n          {\n              .width  = image_array_width,\n              .height = image_array_height,\n              .depth  = 1u,\n          },\n      .nmips        = nmips,\n      .nlayers      = image_arrays::NUM_IMAGE_LAYERS,\n      .format       = NGF_IMAGE_FORMAT_SRGBA8,\n      .sample_count = NGF_SAMPLE_COUNT_1,\n      .usage_hint   = NGF_IMAGE_USAGE_MIPMAP_GENERATION | NGF_IMAGE_USAGE_SAMPLE_FROM |\n                    NGF_IMAGE_USAGE_XFER_DST};\n  NGF_MISC_CHECK_NGF_ERROR(state->cubemap_array.initialize(cubemap_array_info));\n\n  /** Upload the first mip level for each layer on each face. */\n  for (uint32_t i = 0; i < image_arrays::NUM_IMAGE_LAYERS; ++i) {\n    for (uint32_t face = NGF_CUBEMAP_FACE_POSITIVE_X; face < NGF_CUBEMAP_FACE_COUNT; ++face) {\n      const ngf_image_write img_write = {\n          .src_offset     = 0u,\n          .dst_offset     = {0, 0, 0},\n          .extent         = {image_array_width, image_array_height, 1u},\n          .dst_level      = 0u,\n          .dst_base_layer = 6u * i + face,\n          .nlayers        = 1u};\n      ngf_cmd_write_image(\n          xfer_encoder,\n          staging_images[i].staging_buffer.get(),\n          state->cubemap_array.get(),\n          &img_write,\n          1u);\n    }\n  }\n  /** Generate the rest of the mips automatically. */\n  ngf_cmd_generate_mipmaps(xfer_encoder, state->cubemap_array.get());\n\n  /** Create an image sampler. */\n  NGF_MISC_CHECK_NGF_ERROR(state->image_sampler.initialize(ngf_sampler_info {\n      .min_filter        = NGF_FILTER_LINEAR,\n      .mag_filter        = NGF_FILTER_LINEAR,\n      .mip_filter        = NGF_FILTER_LINEAR,\n      .wrap_u            = NGF_WRAP_MODE_REPEAT,\n      .wrap_v            = NGF_WRAP_MODE_REPEAT,\n      .wrap_w            = NGF_WRAP_MODE_REPEAT,\n      .lod_max           = (float)nmips,\n      .lod_min           = 0.0f,\n      .lod_bias          = 0.0f,\n      .max_anisotropy    = 0.0f,\n      .enable_anisotropy = false}));\n\n  /**\n   * Load the shader stages for the regular image array pipeline.\n   */\n  const ngf::shader_stage img_array_vertex_shader_stage =\n      load_shader_stage(\"textured-quad-image-array\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage img_array_fragment_shader_stage =\n      load_shader_stage(\"textured-quad-image-array\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  /**\n   * Prepare a template with some default values for pipeline initialization.\n   */\n  ngf_util_graphics_pipeline_data pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&pipeline_data);\n\n  /**\n   * Set shader stages.\n   */\n  pipeline_data.pipeline_info.nshader_stages   = 2;\n  pipeline_data.pipeline_info.shader_stages[0] = img_array_vertex_shader_stage.get();\n  pipeline_data.pipeline_info.shader_stages[1] = img_array_fragment_shader_stage.get();\n\n  /**\n   * Set multisampling state.\n   */\n  pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n\n  /**\n   * Set the compatible render target description.\n   */\n  pipeline_data.pipeline_info.compatible_rt_attachment_descs =\n      ngf_default_render_target_attachment_descs();\n\n  /**\n   * Initialize the image array pipeline object.\n   */\n  NGF_MISC_CHECK_NGF_ERROR(state->img_array_pipeline.initialize(pipeline_data.pipeline_info));\n\n  /**\n   * Load the shader stages for the multiple images pipeline.\n   */\n  const ngf::shader_stage multiple_images_vertex_shader_stage =\n      load_shader_stage(\"textured-quad-multiple-images\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage multiple_images_fragment_shader_stage =\n      load_shader_stage(\"textured-quad-multiple-images\", \"PSMain\", NGF_STAGE_FRAGMENT);\n  /**\n   * Set shader stages.\n   */\n  pipeline_data.pipeline_info.shader_stages[0] = multiple_images_vertex_shader_stage.get();\n  pipeline_data.pipeline_info.shader_stages[1] = multiple_images_fragment_shader_stage.get();\n\n  /**\n   * Initialize the multiple images pipeline object.\n   */\n  NGF_MISC_CHECK_NGF_ERROR(state->multiple_images_pipeline.initialize(pipeline_data.pipeline_info));\n\n  /**\n   * Load the shader stages for the cubemap array pipeline.\n   */\n  const ngf::shader_stage cubemap_vertex_shader_stage =\n      load_shader_stage(\"cubemap-array\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage cubemap_fragment_shader_stage =\n      load_shader_stage(\"cubemap-array\", \"PSMain\", NGF_STAGE_FRAGMENT);\n  /**\n   * Set shader stages.\n   */\n  pipeline_data.pipeline_info.shader_stages[0] = cubemap_vertex_shader_stage.get();\n  pipeline_data.pipeline_info.shader_stages[1] = cubemap_fragment_shader_stage.get();\n\n  /**\n   * Initialize the cubemap array pipeline object.\n   */\n  NGF_MISC_CHECK_NGF_ERROR(state->cubemap_array_pipeline.initialize(pipeline_data.pipeline_info));\n\n  /**\n   * Create the uniform buffers.\n   */\n  NGF_MISC_CHECK_NGF_ERROR(state->img_array_uniforms_multibuf.initialize(3));\n  NGF_MISC_CHECK_NGF_ERROR(state->cube_array_uniforms_multibuf.initialize(3));\n  NGF_MISC_CHECK_NGF_ERROR(state->multi_img_uniforms_multibuf.initialize(3));\n\n  return static_cast<void*>(state);\n}\n\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float /*time_delta*/,\n    ngf_frame_token /*token*/,\n    uint32_t w,\n    uint32_t h,\n    float /*time*/,\n    void* userdata) {\n  auto state = reinterpret_cast<image_arrays::state*>(userdata);\n\n  /* Compute the perspective transform for the current frame. */\n  const nm::float4x4 camera_to_clip = nm::perspective(\n      nm::deg2rad(72.0f),\n      static_cast<float>(w) / static_cast<float>(h),\n      0.01f,\n      100.0f);\n  /* Build the world-to-camera transform for the current frame. */\n  nm::float4x4 world_to_camera = nm::translation(nm::float3 {-3.0f, 0.0f, state->dolly});\n\n  image_arrays::img_array_uniforms img_arr_uniforms;\n  img_arr_uniforms.matrix          = camera_to_clip * world_to_camera;\n  img_arr_uniforms.image_array_idx = state->image_array_idx;\n  state->img_array_uniforms_multibuf.write(img_arr_uniforms);\n\n  image_arrays::multiple_imgs_uniforms multiimg_uniforms;\n  multiimg_uniforms.matrix = camera_to_clip * nm::translation(nm::float3{3.0f, 0.0f, state->dolly});\n  multiimg_uniforms.index = state->image_idx;\n  state->multi_img_uniforms_multibuf.write(multiimg_uniforms);\n\n  image_arrays::cube_array_uniforms cube_arr_uniforms;\n  cube_arr_uniforms.aspect    = (float)w / (float)h;\n  cube_arr_uniforms.array_idx = state->cubemap_array_idx;\n  cube_arr_uniforms.matrix    = nm::rotation_y(state->yaw) * nm::rotation_x(state->pitch);\n  state->cube_array_uniforms_multibuf.write(cube_arr_uniforms);\n\n  ngf_irect2d viewport {0, 0, w, h};\n\n  ngf_cmd_bind_gfx_pipeline(main_render_pass, state->cubemap_array_pipeline);\n  ngf_cmd_viewport(main_render_pass, &viewport);\n  ngf_cmd_scissor(main_render_pass, &viewport);\n  ngf::cmd_bind_resources(\n      main_render_pass,\n      state->cube_array_uniforms_multibuf.bind_op_at_current_offset(0, 0),\n      ngf::descriptor_set<0>::binding<1>::texture(state->cubemap_array.get()),\n      ngf::descriptor_set<0>::binding<2>::sampler(state->image_sampler.get()));\n  ngf_cmd_draw(main_render_pass, false, 0u, 3u, 1u);\n\n  \n  ngf_cmd_bind_gfx_pipeline(main_render_pass, state->multiple_images_pipeline);\n  ngf_cmd_viewport(main_render_pass, &viewport);\n  ngf_cmd_scissor(main_render_pass, &viewport);\n  ngf::cmd_bind_resources(\n      main_render_pass,\n      state->multi_img_uniforms_multibuf\n          .bind_op_at_current_offset(0, 0, 0, sizeof(image_arrays::multiple_imgs_uniforms)),\n      ngf::descriptor_set<0>::binding<1>::sampler(state->image_sampler),\n      ngf::descriptor_set<1>::binding<0>::texture(state->multiple_images[0], 0),\n      ngf::descriptor_set<1>::binding<0>::texture(state->multiple_images[1], 1),\n      ngf::descriptor_set<1>::binding<0>::texture(state->multiple_images[2], 2),\n      ngf::descriptor_set<1>::binding<0>::texture(state->multiple_images[3], 3));\n  ngf_cmd_draw(main_render_pass, false, 0, 6, 1);\n\n  \n  ngf_cmd_bind_gfx_pipeline(main_render_pass, state->img_array_pipeline);\n  ngf_cmd_viewport(main_render_pass, &viewport);\n  ngf_cmd_scissor(main_render_pass, &viewport);\n  ngf::cmd_bind_resources(\n      main_render_pass,\n      state->img_array_uniforms_multibuf\n          .bind_op_at_current_offset(0, 0, 0, sizeof(image_arrays::img_array_uniforms)),\n      ngf::descriptor_set<0>::binding<1>::sampler(state->image_sampler),\n      ngf::descriptor_set<1>::binding<0>::texture(state->image_array));\n  ngf_cmd_draw(main_render_pass, false, 0, 6, 1);\n}\n\nvoid sample_pre_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_submit(void*) {\n}\n\nvoid sample_draw_ui(void* userdata) {\n  auto data = reinterpret_cast<image_arrays::state*>(userdata);\n  ImGui::Begin(\"Image Arrays\");\n  ImGui::DragFloat(\"dolly\", &data->dolly, 0.01f, -70.0f, 0.11f);\n  ImGui::DragFloat(\"image array index\", &data->image_array_idx, 0.1f, 0.0f, 3.0f);\n  ImGui::DragFloat(\"cubemap array index\", &data->cubemap_array_idx, 0.1f, 0.0f, 3.0f);\n  ImGui::DragInt(\"image index\", (int*)&data->image_idx, .1f, 0, 3);\n  ImGui::SliderFloat(\"cubemap pitch\", &data->pitch, -nm::PI, nm::PI);\n  ImGui::SliderFloat(\"cubemap yaw\", &data->yaw, -nm::PI, nm::PI);\n  ImGui::End();\n}\n\nvoid sample_shutdown(void* userdata) {\n  delete reinterpret_cast<image_arrays::state*>(userdata);\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/09-volume-rendering/volume-rendering.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#define _CRT_SECURE_NO_WARNINGS\n#include \"check.h\"\n#include \"imgui.h\"\n#include \"logging.h\"\n#include \"nicegraf-util.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"nicemath.h\"\n#include \"sample-interface.h\"\n#include \"shader-loader.h\"\n\n#include <stdio.h>\n#include <string>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nnamespace volume_rendering {\n\nstruct uniforms {\n  nm::float4x4 transform_matrix;\n  float        aspect_ratio;\n};\n\nstruct state {\n  ngf::image                         volume;\n  ngf::sampler                       sampler;\n  ngf::graphics_pipeline             pipeline;\n  ngf::uniform_multibuffer<uniforms> uniforms_multibuffer;\n  uint16_t                           volume_voxel_dimensions[3];\n};\n\n}  // namespace volume_rendering\n\nvoid* sample_initialize(\n    uint32_t /*width*/,\n    uint32_t /*height*/,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder xfer_encoder) {\n  auto state = new volume_rendering::state {};\n\n  /** Open the file containing the volume data and read in the dimensions. */\n  FILE* volume_data_file = fopen(\"assets/stag-beetle-volume.dat\", \"rb\");\n  if (volume_data_file == nullptr) {\n    loge(\"failed to open the volume data file.\");\n    return nullptr;\n  }\n  fread(state->volume_voxel_dimensions, sizeof(uint16_t), 3, volume_data_file);\n\n  /** Prepare a staging buffer. */\n  const size_t staging_buffer_size = sizeof(uint16_t) * state->volume_voxel_dimensions[0] *\n                                     state->volume_voxel_dimensions[1] * state->volume_voxel_dimensions[2];\n  const ngf_buffer_info staging_buffer_info = {\n      .size         = staging_buffer_size,\n      .storage_type = NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE,\n      .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC,\n  };\n  ngf::buffer staging_buffer;\n  NGF_MISC_CHECK_NGF_ERROR(staging_buffer.initialize(staging_buffer_info));\n\n  /** Map the staging buffer and read the volume data directly into the memory. */\n  void* mapped_staging_buffer_ptr = ngf_buffer_map_range(staging_buffer, 0, staging_buffer_size);\n  const uint64_t read_bytes =\n      fread(mapped_staging_buffer_ptr, 1, staging_buffer_size, volume_data_file);\n  if (ferror(volume_data_file)) {\n    loge(\"error reading volume data file: %d\", errno);\n    return nullptr;\n  }\n  if (read_bytes != staging_buffer_size) {\n    loge(\"failed to read the entire volume data. EOF: %d\", feof(volume_data_file));\n    return nullptr;\n  }\n  fclose(volume_data_file);\n\n  /** Flush and unmap the staging buffer to prepare it for the upcoming transfer. */\n  ngf_buffer_flush_range(staging_buffer, 0, staging_buffer_size);\n  ngf_buffer_unmap(staging_buffer);\n\n  /** Prepare a 3D image. */\n  const ngf_image_info img_info = {\n      .type = NGF_IMAGE_TYPE_IMAGE_3D,\n      .extent =\n          {.width  = state->volume_voxel_dimensions[0],\n           .height = state->volume_voxel_dimensions[1],\n           .depth  = state->volume_voxel_dimensions[2]},\n      .nmips        = 1u,\n      .nlayers      = 1u,\n      .format       = NGF_IMAGE_FORMAT_R16_UNORM,\n      .sample_count = NGF_SAMPLE_COUNT_1,\n      .usage_hint   = NGF_IMAGE_USAGE_XFER_DST | NGF_IMAGE_USAGE_SAMPLE_FROM,\n  };\n  NGF_MISC_CHECK_NGF_ERROR(state->volume.initialize(img_info));\n\n  /** Upload the volume data into the image. */\n  const ngf_image_write img_write = {\n      .src_offset     = 0u,\n      .dst_offset     = {0, 0, 0},\n      .extent         = img_info.extent,\n      .dst_level      = 0u,\n      .dst_base_layer = 0u,\n      .nlayers        = 1u};\n  ngf_cmd_write_image(xfer_encoder, staging_buffer, state->volume.get(), &img_write, 1u);\n\n  /**\n   * Initialize the sampler.\n   */\n  NGF_MISC_CHECK_NGF_ERROR(state->sampler.initialize(ngf_sampler_info {\n      .min_filter        = NGF_FILTER_LINEAR,\n      .mag_filter        = NGF_FILTER_LINEAR,\n      .mip_filter        = NGF_FILTER_NEAREST,\n      .wrap_u            = NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      .wrap_v            = NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      .wrap_w            = NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      .lod_max           = 0.0f,\n      .lod_min           = 0.0f,\n      .lod_bias          = 0.0f,\n      .max_anisotropy    = 0.0f,\n      .enable_anisotropy = false}));\n\n  /**\n   * Load the shader stages.\n   */\n  const ngf::shader_stage vertex_shader_stage =\n      load_shader_stage(\"volume-renderer\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage fragment_shader_stage =\n      load_shader_stage(\"volume-renderer\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  /**\n   * Prepare a template with some default values for pipeline initialization.\n   */\n  ngf_util_graphics_pipeline_data pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&pipeline_data);\n\n  /**\n   * Set shader stages.\n   */\n  pipeline_data.pipeline_info.nshader_stages   = 2;\n  pipeline_data.pipeline_info.shader_stages[0] = vertex_shader_stage.get();\n  pipeline_data.pipeline_info.shader_stages[1] = fragment_shader_stage.get();\n\n  /**\n   * Set multisampling state.\n   */\n  pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n\n  /**\n   * Set the compatible render target description.\n   */\n  pipeline_data.pipeline_info.compatible_rt_attachment_descs =\n      ngf_default_render_target_attachment_descs();\n\n  /**\n   * Set up blending.\n   */\n  ngf_blend_info blend_info;\n  blend_info.enable                 = true;\n  blend_info.blend_op_color         = NGF_BLEND_OP_ADD;\n  blend_info.dst_color_blend_factor = NGF_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;\n  blend_info.src_color_blend_factor = NGF_BLEND_FACTOR_SRC_ALPHA;\n  blend_info.blend_op_alpha         = NGF_BLEND_OP_ADD;\n  blend_info.src_alpha_blend_factor = NGF_BLEND_FACTOR_ZERO;\n  blend_info.dst_alpha_blend_factor = NGF_BLEND_FACTOR_ONE;\n  blend_info.color_write_mask       = NGF_COLOR_MASK_WRITE_BIT_R | NGF_COLOR_MASK_WRITE_BIT_G |\n                                NGF_COLOR_MASK_WRITE_BIT_B | NGF_COLOR_MASK_WRITE_BIT_A;\n  pipeline_data.pipeline_info.color_attachment_blend_states = &blend_info;\n\n  /**\n   * Initialize the pipeline object.\n   */\n  state->pipeline.initialize(pipeline_data.pipeline_info);\n\n  /**\n   * Initialize uniforms multibuffer.\n   */\n  state->uniforms_multibuffer.initialize(3);\n\n  return static_cast<void*>(state);\n}\n\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float time_delta,\n    ngf_frame_token /*token*/,\n    uint32_t w,\n    uint32_t h,\n    float /*time*/,\n    void* userdata) {\n    static float t = 0.0;\n    t += time_delta;\n  auto                       state = reinterpret_cast<volume_rendering::state*>(userdata);\n  volume_rendering::uniforms u {\n      nm::rotation_x(-1.620f) * nm::rotation_y(t) *\n      nm::translation(nm::float3(0.0, -0.5, 0.0)),\n      (float)w / (float)h };\n  state->uniforms_multibuffer.write(u);\n  const ngf_irect2d viewport {0, 0, w, h};\n  ngf_cmd_bind_gfx_pipeline(main_render_pass, state->pipeline);\n  ngf_cmd_viewport(main_render_pass, &viewport);\n  ngf_cmd_scissor(main_render_pass, &viewport);\n  ngf::cmd_bind_resources(\n      main_render_pass,\n      ngf::descriptor_set<0>::binding<0>::texture(state->volume),\n      ngf::descriptor_set<0>::binding<1>::sampler(state->sampler),\n      state->uniforms_multibuffer.bind_op_at_current_offset(1, 0, 0, sizeof(volume_rendering::uniforms)));\n  ngf_cmd_draw(main_render_pass, false, 0, 6, state->volume_voxel_dimensions[2]);\n}\n\nvoid sample_pre_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_submit(void*) {\n}\n\nvoid sample_draw_ui(void* /*userdata*/) {\n}\n\nvoid sample_shutdown(void* userdata) {\n  delete reinterpret_cast<volume_rendering::state*>(userdata);\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/0a-compute-demo/compute-demo.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#define _CRT_SECURE_NO_WARNINGS\n#include \"check.h\"\n#include \"imgui.h\"\n#include \"logging.h\"\n#include \"nicegraf-util.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"nicemath.h\"\n#include \"sample-interface.h\"\n#include \"shader-loader.h\"\n\n#include <stdio.h>\n#include <string>\n\nnamespace ngf_samples {\n\nnamespace compute_demo {\n\nstruct state {\n  ngf::image             image;\n  ngf::compute_pipeline  compute_pipeline;\n  ngf::graphics_pipeline blit_pipeline;\n  ngf::sampler           sampler;\n  ngf_compute_encoder    prev_compute_enc;\n  ngf_image_ref          image_ref;\n  uint32_t               frame;\n};\n\n}  // namespace compute_demo\n\nvoid* sample_initialize(\n    uint32_t /*width*/,\n    uint32_t /*height*/,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder /* xfer_encoder*/) {\n  auto state = new compute_demo::state {};\n\n  /**\n   * Load the shader stages.\n   */\n  const ngf::shader_stage compute_shader =\n      load_shader_stage(\"compute-demo\", \"CSMain\", NGF_STAGE_COMPUTE);\n\n  /**\n   * Create the compute pipeline.\n   */\n  ngf_compute_pipeline_info pipeline_info;\n  pipeline_info.shader_stage = compute_shader.get();\n  pipeline_info.spec_info    = nullptr;\n  NGF_SAMPLES_CHECK_NGF_ERROR(state->compute_pipeline.initialize(pipeline_info));\n\n  /**\n   * Load shader stages.\n   */\n  const ngf::shader_stage blit_vertex_stage =\n      load_shader_stage(\"simple-texture\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage blit_fragment_stage =\n      load_shader_stage(\"simple-texture\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  /**\n   * Create pipeline for blit.\n   */\n  ngf_util_graphics_pipeline_data blit_pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&blit_pipeline_data);\n  blit_pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n  ngf_graphics_pipeline_info& blit_pipe_info       = blit_pipeline_data.pipeline_info;\n  blit_pipe_info.nshader_stages                    = 2u;\n  blit_pipe_info.shader_stages[0]                  = blit_vertex_stage.get();\n  blit_pipe_info.shader_stages[1]                  = blit_fragment_stage.get();\n  blit_pipe_info.compatible_rt_attachment_descs    = ngf_default_render_target_attachment_descs();\n  NGF_SAMPLES_CHECK_NGF_ERROR(state->blit_pipeline.initialize(blit_pipe_info));\n\n  /**\n   * Initialize the image.\n   */\n  ngf_image_info image_info;\n  image_info.format        = NGF_IMAGE_FORMAT_RGBA8;\n  image_info.extent.depth  = 1;\n  image_info.extent.width  = 4 * 128;\n  image_info.extent.height = 4 * 128;\n  image_info.nlayers       = 1u;\n  image_info.nmips         = 1u;\n  image_info.sample_count  = NGF_SAMPLE_COUNT_1;\n  image_info.type          = NGF_IMAGE_TYPE_IMAGE_2D;\n  image_info.usage_hint    = NGF_IMAGE_USAGE_STORAGE | NGF_IMAGE_USAGE_SAMPLE_FROM;\n  NGF_SAMPLES_CHECK_NGF_ERROR(state->image.initialize(image_info));\n  state->image_ref.image     = state->image;\n  state->image_ref.layer     = 0u;\n  state->image_ref.mip_level = 0u;\n\n  /* Create sampler.*/\n  const ngf_sampler_info samp_info {\n      NGF_FILTER_LINEAR,\n      NGF_FILTER_LINEAR,\n      NGF_FILTER_NEAREST,\n      NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      0.0f,\n      0.0f,\n      0.0f,\n      1.0f,\n      false};\n  NGF_SAMPLES_CHECK_NGF_ERROR(state->sampler.initialize(samp_info));\n\n  state->frame = 0u;\n\n  return static_cast<void*>(state);\n}\n\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float /*time_delta*/,\n    ngf_frame_token /* token*/,\n    uint32_t w,\n    uint32_t h,\n    float /*time*/,\n    void* userdata) {\n  auto state = reinterpret_cast<compute_demo::state*>(userdata);\n  if (state->frame > 0u) {\n    ngf_irect2d onsc_viewport {0, 0, w, h};\n    ngf_cmd_bind_gfx_pipeline(main_render_pass, state->blit_pipeline);\n    ngf_cmd_viewport(main_render_pass, &onsc_viewport);\n    ngf_cmd_scissor(main_render_pass, &onsc_viewport);\n    ngf::cmd_bind_resources(\n        main_render_pass,\n        ngf::descriptor_set<0>::binding<1>::texture(state->image.get()),\n        ngf::descriptor_set<0>::binding<2>::sampler(state->sampler.get()));\n    ngf_cmd_draw(main_render_pass, false, 0u, 3u, 1u);\n  }\n}\n\nvoid sample_pre_draw_frame(ngf_cmd_buffer, ngf_sync_op* sync_op, void* userdata) {\n  auto state = reinterpret_cast<compute_demo::state*>(userdata);\n  if (state->frame > 0u) {\n    sync_op->nwait_compute_encoders = 1u;\n    sync_op->wait_compute_encoders  = &state->prev_compute_enc;\n    sync_op->nimage_refs            = 1u;\n    sync_op->image_refs             = &state->image_ref;\n  }\n}\n\nvoid sample_post_draw_frame(\n    ngf_cmd_buffer     cmd_buffer,\n    ngf_render_encoder prev_render_encoder,\n    void*              userdata) {\n  auto              state = reinterpret_cast<compute_demo::state*>(userdata);\n  const ngf_sync_op compute_sync_op {\n      .nwait_render_encoders = 1u,\n      .wait_render_encoders  = &prev_render_encoder,\n      .nimage_refs           = 1u,\n      .image_refs            = &state->image_ref};\n\n  ngf_compute_encoder compute_enc;\n  NGF_SAMPLES_CHECK_NGF_ERROR(\n      ngf_cmd_begin_compute_pass(cmd_buffer, &compute_sync_op, &compute_enc));\n  ngf_resource_bind_op bind_op;\n  bind_op.info.image_sampler.image = state->image;\n  bind_op.target_set               = 0;\n  bind_op.target_binding           = 0;\n  bind_op.type                     = NGF_DESCRIPTOR_STORAGE_IMAGE;\n  ngf_cmd_bind_compute_pipeline(compute_enc, state->compute_pipeline.get());\n  ngf_cmd_bind_compute_resources(compute_enc, &bind_op, 1);\n  ngf_cmd_dispatch(compute_enc, 128, 128, 1);\n  ngf_cmd_end_compute_pass(compute_enc);\n  state->prev_compute_enc = compute_enc;\n  state->frame++;\n}\n\nvoid sample_post_submit(void*) {\n}\n\nvoid sample_draw_ui(void* /*userdata*/) {\n}\n\nvoid sample_shutdown(void* userdata) {\n  delete reinterpret_cast<compute_demo::state*>(userdata);\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/0a-compute-mandelbrot/compute-mandelbrot.cpp",
    "content": "/**\n * Copyright (c) 2025 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#define _CRT_SECURE_NO_WARNINGS\n#include \"check.h\"\n#include \"imgui.h\"\n#include \"logging.h\"\n#include \"nicegraf-util.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"nicemath.h\"\n#include \"sample-interface.h\"\n#include \"shader-loader.h\"\n\n#include <stdio.h>\n#include <string>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nnamespace compute_demo {\n\nstruct state {\n  ngf::image             image;\n  ngf::compute_pipeline  compute_pipeline;\n  ngf::graphics_pipeline blit_pipeline;\n  ngf::sampler           sampler;\n  ngf_compute_encoder    prev_compute_enc;\n  ngf_image_ref          image_ref;\n  uint32_t               frame;\n};\n\n}  // namespace compute_demo\n\nvoid* sample_initialize(\n    uint32_t /*width*/,\n    uint32_t /*height*/,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder /* xfer_encoder*/) {\n  auto state = new compute_demo::state {};\n\n  /**\n   * Load the shader stages.\n   */\n  const ngf::shader_stage compute_shader =\n      load_shader_stage(\"compute-demo\", \"CSMain\", NGF_STAGE_COMPUTE);\n\n  /**\n   * Create the compute pipeline.\n   */\n  ngf_compute_pipeline_info pipeline_info{};\n  pipeline_info.shader_stage = compute_shader.get();\n  pipeline_info.spec_info    = nullptr;\n  NGF_MISC_CHECK_NGF_ERROR(state->compute_pipeline.initialize(pipeline_info));\n\n  /**\n   * Load shader stages.\n   */\n  const ngf::shader_stage blit_vertex_stage =\n      load_shader_stage(\"simple-texture\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage blit_fragment_stage =\n      load_shader_stage(\"simple-texture\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  /**\n   * Create pipeline for blit.\n   */\n  ngf_util_graphics_pipeline_data blit_pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&blit_pipeline_data);\n  blit_pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n  ngf_graphics_pipeline_info& blit_pipe_info       = blit_pipeline_data.pipeline_info;\n  blit_pipe_info.nshader_stages                    = 2u;\n  blit_pipe_info.shader_stages[0]                  = blit_vertex_stage.get();\n  blit_pipe_info.shader_stages[1]                  = blit_fragment_stage.get();\n  blit_pipe_info.compatible_rt_attachment_descs    = ngf_default_render_target_attachment_descs();\n  NGF_MISC_CHECK_NGF_ERROR(state->blit_pipeline.initialize(blit_pipe_info));\n\n  /**\n   * Initialize the image.\n   */\n  ngf_image_info image_info;\n  image_info.format        = NGF_IMAGE_FORMAT_RGBA8;\n  image_info.extent.depth  = 1;\n  image_info.extent.width  = 4 * 128;\n  image_info.extent.height = 4 * 128;\n  image_info.nlayers       = 1u;\n  image_info.nmips         = 1u;\n  image_info.sample_count  = NGF_SAMPLE_COUNT_1;\n  image_info.type          = NGF_IMAGE_TYPE_IMAGE_2D;\n  image_info.usage_hint    = NGF_IMAGE_USAGE_STORAGE | NGF_IMAGE_USAGE_SAMPLE_FROM;\n  NGF_MISC_CHECK_NGF_ERROR(state->image.initialize(image_info));\n  state->image_ref.image     = state->image;\n  state->image_ref.layer     = 0u;\n  state->image_ref.mip_level = 0u;\n\n  /* Create sampler.*/\n  const ngf_sampler_info samp_info {\n      NGF_FILTER_LINEAR,\n      NGF_FILTER_LINEAR,\n      NGF_FILTER_NEAREST,\n      NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      0.0f,\n      0.0f,\n      0.0f,\n      1.0f,\n      false};\n  NGF_MISC_CHECK_NGF_ERROR(state->sampler.initialize(samp_info));\n\n  state->frame = 0u;\n\n  return static_cast<void*>(state);\n}\n\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float /*time_delta*/,\n    ngf_frame_token /* token*/,\n    uint32_t w,\n    uint32_t h,\n    float /*time*/,\n    void* userdata) {\n  auto state = reinterpret_cast<compute_demo::state*>(userdata);\n  if (state->frame > 0u) {\n    ngf_irect2d onsc_viewport {0, 0, w, h};\n    ngf_cmd_bind_gfx_pipeline(main_render_pass, state->blit_pipeline);\n    ngf_cmd_viewport(main_render_pass, &onsc_viewport);\n    ngf_cmd_scissor(main_render_pass, &onsc_viewport);\n    ngf::cmd_bind_resources(\n        main_render_pass,\n        ngf::descriptor_set<0>::binding<1>::texture(state->image.get()),\n        ngf::descriptor_set<0>::binding<2>::sampler(state->sampler.get()));\n    ngf_cmd_draw(main_render_pass, false, 0u, 3u, 1u);\n  }\n}\n\nvoid sample_pre_draw_frame(ngf_cmd_buffer, void*) { }\n\nvoid sample_post_draw_frame(\n    ngf_cmd_buffer     cmd_buffer,\n    void*              userdata) {\n  auto              state = reinterpret_cast<compute_demo::state*>(userdata);\n  const ngf_compute_pass_info pass_info {};\n\n  ngf_compute_encoder compute_enc;\n  NGF_MISC_CHECK_NGF_ERROR(\n      ngf_cmd_begin_compute_pass(cmd_buffer, &pass_info, &compute_enc));\n  ngf_resource_bind_op bind_op{};\n  bind_op.info.image_sampler.is_image_view = false;\n  bind_op.info.image_sampler.resource.image = state->image;\n  bind_op.target_set               = 0;\n  bind_op.target_binding           = 0;\n  bind_op.type                     = NGF_DESCRIPTOR_STORAGE_IMAGE;\n  ngf_cmd_bind_compute_pipeline(compute_enc, state->compute_pipeline.get());\n  ngf_cmd_bind_compute_resources(compute_enc, &bind_op, 1);\n  ngf_cmd_dispatch(compute_enc, 128, 128, 1);\n  ngf_cmd_end_compute_pass(compute_enc);\n  state->prev_compute_enc = compute_enc;\n  state->frame++;\n}\n\nvoid sample_post_submit(void*) {\n}\n\nvoid sample_draw_ui(void* /*userdata*/) {\n}\n\nvoid sample_shutdown(void* userdata) {\n  delete reinterpret_cast<compute_demo::state*>(userdata);\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/0b-compute-vertices/compute-vertices.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#define _CRT_SECURE_NO_WARNINGS\n#include \"camera-controller.h\"\n#include \"check.h\"\n#include \"imgui.h\"\n#include \"logging.h\"\n#include \"nicegraf-util.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"nicemath.h\"\n#include \"sample-interface.h\"\n#include \"shader-loader.h\"\n\n#include <stdio.h>\n#include <string>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nnamespace compute_verts {\n\nconstexpr int nverts_per_side    = 512;\nconstexpr int ntotal_verts       = nverts_per_side * nverts_per_side;\nconstexpr int nindices_per_strip = 2 * nverts_per_side + 1u;\nconstexpr int nstrips            = nverts_per_side - 1;\nconstexpr int ntotal_indices     = nstrips * nindices_per_strip;\n\nstruct render_uniforms {\n  camera_matrices cam_matrices;\n};\n\nstruct compute_uniforms {\n  float time;\n  float pad[3];\n};\n\nstruct state {\n  ngf::compute_pipeline                      compute_pipeline;\n  ngf::graphics_pipeline                     render_pipeline;\n  ngf::uniform_multibuffer<render_uniforms>  render_uniforms_multibuf;\n  ngf::uniform_multibuffer<compute_uniforms> compute_uniforms_multibuf;\n  ngf::buffer                                index_buffer;\n  ngf::buffer                                vertex_buffer;\n  ngf_buffer_slice                           compute_buffer_slice;\n  ngf_compute_encoder                        prev_compute_encoder;\n  camera_state                               camera;\n  uint32_t                                   frame = 0u;\n};\n\n}  // namespace compute_verts\n\nvoid* sample_initialize(\n    uint32_t /*width*/,\n    uint32_t /*height*/,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder xfer_encoder) {\n  auto state = new compute_verts::state {};\n\n  /**\n   * Load the shader stages.\n   */\n  const ngf::shader_stage compute_shader =\n      load_shader_stage(\"compute-vertices\", \"CSMain\", NGF_STAGE_COMPUTE);\n\n  /**\n   * Create the compute pipeline.\n   */\n  ngf_compute_pipeline_info pipeline_info{};\n  pipeline_info.shader_stage = compute_shader.get();\n  pipeline_info.spec_info    = nullptr;\n  NGF_MISC_CHECK_NGF_ERROR(state->compute_pipeline.initialize(pipeline_info));\n\n  /**\n   * Load shader stages.\n   */\n  const ngf::shader_stage render_vertex_stage =\n      load_shader_stage(\"render-vertices\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage render_fragment_stage =\n      load_shader_stage(\"render-vertices\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  /**\n   * Create pipeline for rendering vertex data.\n   */\n  const ngf_vertex_attrib_desc position_attrib_desc {\n      .location   = 0u,\n      .binding    = 0u,\n      .offset     = 0u,\n      .type       = NGF_TYPE_FLOAT,\n      .size       = 4u,\n      .normalized = false};\n  const ngf_vertex_buf_binding_desc vert_buf_binding_desc {\n      .binding    = 0u,\n      .stride     = 4u * sizeof(float),\n      .input_rate = NGF_INPUT_RATE_VERTEX};\n  ngf_util_graphics_pipeline_data render_pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&render_pipeline_data);\n  render_pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n  render_pipeline_data.input_assembly_info.enable_primitive_restart = true;\n  render_pipeline_data.input_assembly_info.primitive_topology =\n      NGF_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;\n  render_pipeline_data.depth_stencil_info.depth_test        = true;\n  render_pipeline_data.depth_stencil_info.depth_write       = true;\n  render_pipeline_data.depth_stencil_info.depth_compare     = NGF_COMPARE_OP_LESS;\n  render_pipeline_data.rasterization_info.cull_mode         = NGF_CULL_MODE_NONE;\n  render_pipeline_data.vertex_input_info.nattribs           = 1u;\n  render_pipeline_data.vertex_input_info.attribs            = &position_attrib_desc;\n  render_pipeline_data.vertex_input_info.nvert_buf_bindings = 1u;\n  render_pipeline_data.vertex_input_info.vert_buf_bindings  = &vert_buf_binding_desc;\n  ngf_graphics_pipeline_info& render_pipe_info              = render_pipeline_data.pipeline_info;\n  render_pipe_info.nshader_stages                           = 2u;\n  render_pipe_info.shader_stages[0]                         = render_vertex_stage.get();\n  render_pipe_info.shader_stages[1]                         = render_fragment_stage.get();\n  render_pipe_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs();\n  NGF_MISC_CHECK_NGF_ERROR(state->render_pipeline.initialize(render_pipe_info));\n\n  /**\n   * Initialize the index buffer.\n   */\n  const ngf_buffer_info staging_index_buffer_info {\n      .size         = compute_verts::ntotal_indices * sizeof(uint32_t),\n      .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE,\n      .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC};\n  const ngf_buffer_info index_buffer_info {\n      .size         = compute_verts::ntotal_indices * sizeof(uint32_t),\n      .storage_type = NGF_BUFFER_STORAGE_DEVICE_LOCAL,\n      .buffer_usage = NGF_BUFFER_USAGE_XFER_DST | NGF_BUFFER_USAGE_INDEX_BUFFER};\n  ngf::buffer staging_index_buffer;\n  NGF_MISC_CHECK_NGF_ERROR(staging_index_buffer.initialize(staging_index_buffer_info));\n  NGF_MISC_CHECK_NGF_ERROR(state->index_buffer.initialize(index_buffer_info));\n  auto mapped_staging_index_buffer = (uint32_t*)\n      ngf_buffer_map_range(staging_index_buffer.get(), 0u, staging_index_buffer_info.size);\n  uint32_t idx = 0u;\n  for (uint32_t strip = 0u; strip < compute_verts::nverts_per_side - 1; ++strip) {\n    for (uint32_t v = 0u; v < compute_verts::nverts_per_side; ++v) {\n      NGF_MISC_ASSERT(idx < compute_verts::ntotal_indices);\n      mapped_staging_index_buffer[idx++] = (strip + 1u) * compute_verts::nverts_per_side + v;\n      NGF_MISC_ASSERT(idx < compute_verts::ntotal_indices);\n      mapped_staging_index_buffer[idx++] = strip * compute_verts::nverts_per_side + v;\n    }\n    NGF_MISC_ASSERT(idx < compute_verts::ntotal_indices);\n    mapped_staging_index_buffer[idx++] = ~0u;\n  }\n  ngf_buffer_flush_range(staging_index_buffer.get(), 0, staging_index_buffer_info.size);\n  ngf_buffer_unmap(staging_index_buffer.get());\n  ngf_cmd_copy_buffer(\n      xfer_encoder,\n      staging_index_buffer.get(),\n      state->index_buffer.get(),\n      staging_index_buffer_info.size,\n      0u,\n      0u);\n\n  /**\n   * Create the vertex buffer.\n   */\n  const ngf_buffer_info vertex_buffer_info {\n      .size         = compute_verts::ntotal_verts * (4u * sizeof(float)) * 2,\n      .storage_type = NGF_BUFFER_STORAGE_DEVICE_LOCAL,\n      .buffer_usage = NGF_BUFFER_USAGE_VERTEX_BUFFER | NGF_BUFFER_USAGE_STORAGE_BUFFER};\n  NGF_MISC_CHECK_NGF_ERROR(state->vertex_buffer.initialize(vertex_buffer_info));\n  state->compute_buffer_slice.buffer = state->vertex_buffer.get();\n  state->compute_buffer_slice.range  = compute_verts::ntotal_verts * (4u * sizeof(float));\n\n  /**\n   * Set up some initial viewing parameters.\n   */\n  state->camera.look_at[1] = 1.0f;\n\n  state->render_uniforms_multibuf.initialize(3);\n  state->compute_uniforms_multibuf.initialize(3);\n\n  return static_cast<void*>(state);\n}\n\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float /* time_delta */,\n    ngf_frame_token /* token */,\n    uint32_t w,\n    uint32_t h,\n    float /* time */,\n    void* userdata) {\n  auto           state  = reinterpret_cast<compute_verts::state*>(userdata);\n  const uint32_t f_prev = (state->frame + 1u) % 2;\n  if (state->frame > 0u) {\n    compute_verts::render_uniforms render_uniforms;\n    render_uniforms.cam_matrices =\n        compute_camera_matrices(state->camera, static_cast<float>(w) / static_cast<float>(h));\n    state->render_uniforms_multibuf.write(render_uniforms);\n\n    ngf_irect2d onsc_viewport {0, 0, w, h};\n    ngf_cmd_bind_gfx_pipeline(main_render_pass, state->render_pipeline);\n    ngf::cmd_bind_resources(\n        main_render_pass,\n        state->render_uniforms_multibuf.bind_op_at_current_offset(0, 0));\n    ngf_cmd_viewport(main_render_pass, &onsc_viewport);\n    ngf_cmd_scissor(main_render_pass, &onsc_viewport);\n    ngf_cmd_bind_index_buffer(main_render_pass, state->index_buffer, 0u, NGF_TYPE_UINT32);\n    ngf_cmd_bind_attrib_buffer(\n        main_render_pass,\n        state->vertex_buffer,\n        0u,\n        f_prev * sizeof(float) * 4u * compute_verts::ntotal_verts);\n    ngf_cmd_draw(main_render_pass, true, 0u, compute_verts::ntotal_indices, 1u);\n  }\n}\n\nvoid sample_pre_draw_frame(ngf_cmd_buffer, void*) { }\n\nvoid sample_post_draw_frame(\n    ngf_cmd_buffer     cmd_buffer,\n    void*              userdata) {\n  static float   time   = 0.f;\n  auto           state  = reinterpret_cast<compute_verts::state*>(userdata);\n  const uint32_t f_curr = (state->frame) % 2;\n  time += 0.01f;\n  compute_verts::compute_uniforms compute_uniforms;\n  compute_uniforms.time = time;\n  state->compute_uniforms_multibuf.write(compute_uniforms);\n\n  ngf_compute_pass_info pass_info {};\n  ngf_compute_encoder compute_enc;\n  ngf_cmd_begin_compute_pass(cmd_buffer, &pass_info, &compute_enc);\n  ngf_cmd_bind_compute_pipeline(compute_enc, state->compute_pipeline);\n  ngf::cmd_bind_resources(\n      compute_enc,\n      state->compute_uniforms_multibuf.bind_op_at_current_offset(1, 1),\n      ngf_resource_bind_op {\n\n          .target_set     = 1u,\n          .target_binding = 0u,\n          .type           = NGF_DESCRIPTOR_STORAGE_BUFFER,\n          .info           = {\n              .buffer = {\n                  .buffer = state->vertex_buffer.get(),\n                  .offset = f_curr * 4u * sizeof(float) * compute_verts::ntotal_verts,\n                  .range  = compute_verts::ntotal_verts * (4u * sizeof(float))}}});\n  ngf_cmd_dispatch(\n      compute_enc,\n      compute_verts::nverts_per_side / 2,\n      compute_verts::nverts_per_side / 2,\n      1u);\n  ngf_cmd_end_compute_pass(compute_enc);\n  state->prev_compute_encoder = compute_enc;\n  state->frame += 1u;\n}\n\nvoid sample_post_submit(void*) {\n}\n\nvoid sample_draw_ui(void* userdata) {\n  auto data = reinterpret_cast<compute_verts::state*>(userdata);\n  ImGui::Begin(\"Controls\");\n  camera_ui(data->camera, std::make_pair(-5.f, 5.f), .1f, std::make_pair(1.0f, 10.0f), .1f);\n  ImGui::End();\n}\n\nvoid sample_shutdown(void* userdata) {\n  delete reinterpret_cast<compute_verts::state*>(userdata);\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/0c-render-to-multisample-texture/render-to-multisample-texture.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"check.h\"\n#include \"imgui.h\"\n#include \"nicegraf-util.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"sample-interface.h\"\n#include \"shader-loader.h\"\n\n#include <stdio.h>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nstruct render_to_multisample_texture_data {\n  ngf::render_target     default_rt;\n  ngf::render_target     offscreen_rt;\n  ngf::render_target     offscreen_multisample_rt;\n  ngf::graphics_pipeline blit_pipeline;\n  ngf::graphics_pipeline offscreen_pipeline;\n  ngf::graphics_pipeline offscreen_multisample_pipeline;\n  ngf::image             rt_texture;\n  ngf::image             resolve_texture;\n  ngf::sampler           sampler;\n  bool                   is_multisample = true;\n};\n\nvoid* sample_initialize(\n    uint32_t,\n    uint32_t,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder /*xfer_encoder*/) {\n  auto state = new render_to_multisample_texture_data {};\n\n  /* Create the image to render to. */\n  const ngf_extent3d   img_size {512u, 512u, 1u};\n  const ngf_image_info img_info {\n      NGF_IMAGE_TYPE_IMAGE_2D,\n      img_size,\n      1u,\n      1u,\n      NGF_IMAGE_FORMAT_BGRA8_SRGB,\n      main_render_target_sample_count,\n      NGF_IMAGE_USAGE_ATTACHMENT};\n  NGF_MISC_CHECK_NGF_ERROR(state->rt_texture.initialize(img_info));\n  const ngf_image_info resolve_img_info {\n      NGF_IMAGE_TYPE_IMAGE_2D,\n      img_size,\n      1u,\n      1u,\n      NGF_IMAGE_FORMAT_BGRA8_SRGB,\n      NGF_SAMPLE_COUNT_1,\n      NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_ATTACHMENT};\n  NGF_MISC_CHECK_NGF_ERROR(state->resolve_texture.initialize(resolve_img_info));\n\n  const ngf_attachment_description offscreen_attachments = {\n      .type         = NGF_ATTACHMENT_COLOR,\n      .format       = NGF_IMAGE_FORMAT_BGRA8_SRGB,\n      .sample_count = NGF_SAMPLE_COUNT_1,\n      .is_resolve   = false};\n\n  const ngf_image_ref offscreen_img_ref = {\n      .image        = state->resolve_texture.get(),\n      .mip_level    = 0u,\n      .layer        = 0u,\n      .cubemap_face = NGF_CUBEMAP_FACE_COUNT};\n\n  const ngf_attachment_descriptions offscreen_attachments_list = {\n      .descs  = &offscreen_attachments,\n      .ndescs = 1u,\n  };\n\n  ngf_render_target_info rt_info {&offscreen_attachments_list, &offscreen_img_ref};\n  NGF_MISC_CHECK_NGF_ERROR(state->offscreen_rt.initialize(rt_info));\n\n  const ngf_attachment_description offscreen_multisample_attachments[2] = {\n      {.type         = NGF_ATTACHMENT_COLOR,\n       .format       = NGF_IMAGE_FORMAT_BGRA8_SRGB,\n       .sample_count = main_render_target_sample_count,\n       .is_resolve   = false},\n      {.type         = NGF_ATTACHMENT_COLOR,\n       .format       = NGF_IMAGE_FORMAT_BGRA8_SRGB,\n       .sample_count = NGF_SAMPLE_COUNT_1,\n       .is_resolve   = true}};\n\n  const ngf_attachment_descriptions offscreen_multisample_attachments_list = {\n      .descs  = offscreen_multisample_attachments,\n      .ndescs = 2u,\n  };\n\n  const ngf_image_ref offscreen_multisample_img_refs[2] = {\n      {.image        = state->rt_texture.get(),\n       .mip_level    = 0u,\n       .layer        = 0u,\n       .cubemap_face = NGF_CUBEMAP_FACE_COUNT},\n      {.image        = state->resolve_texture.get(),\n       .mip_level    = 0u,\n       .layer        = 0u,\n       .cubemap_face = NGF_CUBEMAP_FACE_COUNT}};\n\n  ngf_render_target_info multisample_rt_info {\n      &offscreen_multisample_attachments_list,\n      offscreen_multisample_img_refs};\n  NGF_MISC_CHECK_NGF_ERROR(state->offscreen_multisample_rt.initialize(multisample_rt_info));\n\n  /**\n   * Load shader stages.\n   */\n  const ngf::shader_stage blit_vertex_stage =\n      load_shader_stage(\"simple-texture\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage blit_fragment_stage =\n      load_shader_stage(\"simple-texture\", \"PSMain\", NGF_STAGE_FRAGMENT);\n  const ngf::shader_stage offscreen_vertex_stage =\n      load_shader_stage(\"small-triangle\", \"VSMain\", NGF_STAGE_VERTEX);\n  const ngf::shader_stage offscreen_fragment_stage =\n      load_shader_stage(\"small-triangle\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  /**\n   * Create pipeline for blit.\n   */\n  ngf_util_graphics_pipeline_data blit_pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&blit_pipeline_data);\n  blit_pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n  ngf_graphics_pipeline_info& blit_pipe_info       = blit_pipeline_data.pipeline_info;\n  blit_pipe_info.nshader_stages                    = 2u;\n  blit_pipe_info.shader_stages[0]                  = blit_vertex_stage.get();\n  blit_pipe_info.shader_stages[1]                  = blit_fragment_stage.get();\n  blit_pipe_info.compatible_rt_attachment_descs    = ngf_default_render_target_attachment_descs();\n  NGF_MISC_CHECK_NGF_ERROR(state->blit_pipeline.initialize(blit_pipe_info));\n\n  /**\n   * Create pipeline for offscreen pass.\n   */\n  ngf_util_graphics_pipeline_data offscreen_pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&offscreen_pipeline_data);\n  ngf_graphics_pipeline_info& offscreen_pipe_info    = offscreen_pipeline_data.pipeline_info;\n  offscreen_pipe_info.nshader_stages                 = 2u;\n  offscreen_pipe_info.shader_stages[0]               = offscreen_vertex_stage.get();\n  offscreen_pipe_info.shader_stages[1]               = offscreen_fragment_stage.get();\n  offscreen_pipe_info.compatible_rt_attachment_descs = &offscreen_attachments_list;\n  NGF_MISC_CHECK_NGF_ERROR(state->offscreen_pipeline.initialize(offscreen_pipe_info));\n\n  /**\n   * Create pipeline for multisample offscreen pass.\n   */\n  ngf_util_graphics_pipeline_data offscreen_multisample_pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&offscreen_multisample_pipeline_data);\n  offscreen_multisample_pipeline_data.multisample_info.sample_count =\n      main_render_target_sample_count;\n  ngf_graphics_pipeline_info& offscreen_multisample_pipe_info =\n      offscreen_multisample_pipeline_data.pipeline_info;\n  offscreen_multisample_pipe_info.nshader_stages   = 2u;\n  offscreen_multisample_pipe_info.shader_stages[0] = offscreen_vertex_stage.get();\n  offscreen_multisample_pipe_info.shader_stages[1] = offscreen_fragment_stage.get();\n  offscreen_multisample_pipe_info.compatible_rt_attachment_descs =\n      &offscreen_multisample_attachments_list;\n  NGF_MISC_CHECK_NGF_ERROR(\n      state->offscreen_multisample_pipeline.initialize(offscreen_multisample_pipe_info));\n\n  /* Create sampler.*/\n  const ngf_sampler_info samp_info {\n      NGF_FILTER_LINEAR,\n      NGF_FILTER_LINEAR,\n      NGF_FILTER_NEAREST,\n      NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      0.0f,\n      0.0f,\n      0.0f,\n      1.0f,\n      false};\n  NGF_MISC_CHECK_NGF_ERROR(state->sampler.initialize(samp_info));\n\n  return static_cast<void*>(state);\n}\n\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float /* time_delta */,\n    ngf_frame_token frame_token,\n    uint32_t        w,\n    uint32_t        h,\n    float,\n    void* userdata) {\n  auto                state = reinterpret_cast<render_to_multisample_texture_data*>(userdata);\n  ngf_irect2d         offsc_viewport {0, 0, 512, 512};\n  ngf_irect2d         onsc_viewport {0, 0, w, h};\n  ngf_cmd_buffer      offscr_cmd_buf = nullptr;\n  ngf_cmd_buffer_info cmd_info       = {};\n  ngf_create_cmd_buffer(&cmd_info, &offscr_cmd_buf);\n  ngf_start_cmd_buffer(offscr_cmd_buf, frame_token);\n  {\n    ngf::render_encoder renc {\n        offscr_cmd_buf,\n        state->is_multisample ? state->offscreen_multisample_rt : state->offscreen_rt,\n        .0f,\n        0.0f,\n        0.0f,\n        0.0f,\n        1.0,\n        0u};\n    ngf_cmd_bind_gfx_pipeline(\n        renc,\n        state->is_multisample ? state->offscreen_multisample_pipeline : state->offscreen_pipeline);\n    ngf_cmd_viewport(renc, &offsc_viewport);\n    ngf_cmd_scissor(renc, &offsc_viewport);\n    ngf_cmd_draw(renc, false, 0u, 3u, 1u);\n  }\n  ngf_submit_cmd_buffers(1, &offscr_cmd_buf);\n  ngf_destroy_cmd_buffer(offscr_cmd_buf);\n\n  ngf_cmd_bind_gfx_pipeline(main_render_pass, state->blit_pipeline);\n  ngf_cmd_viewport(main_render_pass, &onsc_viewport);\n  ngf_cmd_scissor(main_render_pass, &onsc_viewport);\n  ngf::cmd_bind_resources(\n      main_render_pass,\n      ngf::descriptor_set<0>::binding<1>::texture(state->resolve_texture.get()),\n      ngf::descriptor_set<0>::binding<2>::sampler(state->sampler.get()));\n  ngf_cmd_draw(main_render_pass, false, 0u, 3u, 1u);\n}\n\nvoid sample_pre_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_post_draw_frame(ngf_cmd_buffer, void*) {\n}\n\nvoid sample_draw_ui(void* userdata) {\n  auto data = reinterpret_cast<render_to_multisample_texture_data*>(userdata);\n  ImGui::Begin(\"Multisampling\");\n  ImGui::Checkbox(\"On/Off\", &data->is_multisample);\n  ImGui::End();\n}\n\nvoid sample_post_submit(void*) {\n}\n\nvoid sample_shutdown(void* userdata) {\n  auto data = static_cast<render_to_multisample_texture_data*>(userdata);\n  delete data;\n  printf(\"shutting down\\n\");\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/common/camera-controller.cpp",
    "content": "/**\n * Copyright (c) 2021 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"camera-controller.h\"\n\n#include \"check.h\"\n#include \"imgui.h\"\n#define _USE_MATH_DEFINES\n#include <math.h>\n\nnamespace ngf_samples {\n\ncamera_matrices compute_camera_matrices(const camera_state& state, float aspect_ratio) {\n  const float      r = state.radius, azimuth = state.azimuth, incline = state.inclination;\n  const nm::float3 point_on_sphere {\n      r * sinf(azimuth) * sinf(incline),\n      r * cosf(incline),\n      r * sinf(incline) * cosf(azimuth)};\n  return {\n      nm::look_at(state.look_at + point_on_sphere, state.look_at, nm::float3 {0.0f, 1.0f, 0.0f}),\n      nm::perspective(nm::deg2rad(state.vfov), aspect_ratio, 0.01f, 1000.0f)};\n}\n\nvoid camera_ui(\n    camera_state&           state,\n    std::pair<float, float> look_at_range,\n    float                   look_at_speed,\n    std::pair<float, float> radius_range,\n    float                   radius_speed) {\n  NGF_MISC_ASSERT(look_at_range.first < look_at_range.second);\n  NGF_MISC_ASSERT(radius_range.first < radius_range.second);\n  ImGui::Text(\"camera\");\n  ImGui::DragFloat3(\n      \"look at\",\n      state.look_at.data,\n      look_at_speed,\n      look_at_range.first,\n      look_at_range.second,\n      \"%.1f\",\n      0);\n  ImGui::SliderFloat(\"azimuth\", &state.azimuth, 0.0f, (float)M_PI * 2.0f, \"%.1f\", ImGuiSliderFlags_NoRoundToFormat);\n  ImGui::SliderFloat(\"inclination\", &state.inclination, 0.0f, (float)M_PI, \"%.1f\", ImGuiSliderFlags_NoRoundToFormat);\n  ImGui::DragFloat(\n      \"radius\",\n      &state.radius,\n      radius_speed,\n      radius_range.first,\n      radius_range.second,\n      \"%.1f\",\n      0);\n  ImGui::SliderFloat(\"fov\", &state.vfov, 25.0f, 90.0f, \"%.1f\", 0);\n}\n\n}  // namespace ngf_samples"
  },
  {
    "path": "samples/common/camera-controller.h",
    "content": "/**\n * Copyright (c) 2021 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"nicemath.h\"\n\n#include <utility>\n\nnamespace ngf_samples {\n\nstruct camera_state {\n  nm::float3 look_at {0.0f, 0.0f, 0.0f};\n  float      radius      = 3.0f;\n  float      azimuth     = 0.0f;\n  float      inclination = 3.14f / 2.0f;\n  float      vfov        = 60.0f;\n};\n\nstruct camera_matrices {\n  nm::float4x4 world_to_view_transform;\n  nm::float4x4 view_to_clip_transform;\n};\n\ncamera_matrices compute_camera_matrices(const camera_state& state, float aspect_ratio);\nvoid            camera_ui(\n               camera_state&           state,\n               std::pair<float, float> look_at_range,\n               float                   look_at_speed,\n               std::pair<float, float> radius_range,\n               float                   radius_speed);\n\n}  // namespace ngf_samples"
  },
  {
    "path": "samples/common/diagnostic-callback.cpp",
    "content": "/**\n * Copyright (c) 2021 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"diagnostic-callback.h\"\n\n#include \"logging.h\"\n\n#include <stdarg.h>\n\nnamespace ngf_samples {\n\nvoid sample_diagnostic_callback(ngf_diagnostic_message_type msg_type, void*, const char* fmt, ...) {\n  va_list args;\n  va_start(args, fmt);\n  switch (msg_type) {\n  case NGF_DIAGNOSTIC_ERROR:\n  case NGF_DIAGNOSTIC_WARNING:\n    ngf_misc::vloge(fmt, args);\n    break;\n  case NGF_DIAGNOSTIC_INFO:\n    ngf_misc::vlogi(fmt, args);\n    break;\n  default:;\n  }\n  va_end(args);\n}\n\n}  // namespace ngf_samples"
  },
  {
    "path": "samples/common/diagnostic-callback.h",
    "content": "/**\n * Copyright (c) 2021 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include \"nicegraf.h\"\n\nnamespace ngf_samples {\n\n/**\n * A sample diagnostic callback implementation, which forwards received messages to the log.\n */\nvoid sample_diagnostic_callback(\n    ngf_diagnostic_message_type msg_type,\n    void*                       userdata,\n    const char*                 fmt,\n    ...);\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/common/imgui-backend.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"imgui-backend.h\"\n\n#include \"check.h\"\n#include \"nicegraf-util.h\"\n#include \"shader-loader.h\"\n\n#include <vector>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nngf_imgui::ngf_imgui(\n    ngf_xfer_encoder     enc,\n    ngf_sample_count     main_render_target_sample_count,\n    const unsigned char* font_atlas_bytes,\n    uint32_t             font_atlas_width,\n    uint32_t             font_atlas_height) {\n#if !defined(NGF_NO_IMGUI)\n  vertex_stage_   = load_shader_stage(\"imgui\", \"VSMain\", NGF_STAGE_VERTEX);\n  fragment_stage_ = load_shader_stage(\"imgui\", \"PSMain\", NGF_STAGE_FRAGMENT);\n\n  ngf_error err = NGF_ERROR_OK;\n\n  // Initialize the streamed uniform object.\n  uniform_data_.initialize(3);\n\n  // Initial pipeline configuration with OpenGL-style defaults.\n  ngf_util_graphics_pipeline_data pipeline_data;\n  ngf_util_create_default_graphics_pipeline_data(&pipeline_data);\n\n  // Set up blend state.\n  ngf_blend_info blend_info;\n  blend_info.enable                 = true;\n  blend_info.src_color_blend_factor = NGF_BLEND_FACTOR_SRC_ALPHA;\n  blend_info.dst_color_blend_factor = NGF_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;\n  blend_info.src_alpha_blend_factor = NGF_BLEND_FACTOR_SRC_ALPHA;\n  blend_info.dst_alpha_blend_factor = NGF_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;\n  blend_info.blend_op_color         = NGF_BLEND_OP_ADD;\n  blend_info.blend_op_alpha         = NGF_BLEND_OP_ADD;\n  blend_info.color_write_mask       = NGF_COLOR_MASK_WRITE_BIT_R | NGF_COLOR_MASK_WRITE_BIT_G |\n                                NGF_COLOR_MASK_WRITE_BIT_B | NGF_COLOR_MASK_WRITE_BIT_A;\n  pipeline_data.pipeline_info.color_attachment_blend_states = &blend_info;\n  memset(\n      pipeline_data.pipeline_info.blend_consts,\n      0,\n      sizeof(pipeline_data.pipeline_info.blend_consts));\n\n  // Set up depth & stencil state.\n  pipeline_data.depth_stencil_info.depth_test   = false;\n  pipeline_data.depth_stencil_info.stencil_test = false;\n\n  // Set up multisampling.\n  pipeline_data.multisample_info.sample_count = main_render_target_sample_count;\n\n  // Assign programmable stages.\n  ngf_graphics_pipeline_info& pipeline_info = pipeline_data.pipeline_info;\n  pipeline_info.nshader_stages              = 2u;\n  pipeline_info.shader_stages[0]            = vertex_stage_.get();\n  pipeline_info.shader_stages[1]            = fragment_stage_.get();\n\n  // Disable backface culling.\n  pipeline_data.rasterization_info.cull_mode = NGF_CULL_MODE_NONE;\n\n  // Configure vertex input.\n  ngf_vertex_attrib_desc vertex_attribs[] = {\n      {0u, 0u, offsetof(ImDrawVert, pos), NGF_TYPE_FLOAT, 2u, false},\n      {1u, 0u, offsetof(ImDrawVert, uv), NGF_TYPE_FLOAT, 2u, false},\n      {2u, 0u, offsetof(ImDrawVert, col), NGF_TYPE_UINT8, 4u, true},\n  };\n  pipeline_data.vertex_input_info.attribs  = vertex_attribs;\n  pipeline_data.vertex_input_info.nattribs = 3u;\n  ngf_vertex_buf_binding_desc binding_desc = {\n      0u,                    // binding\n      sizeof(ImDrawVert),    // stride\n      NGF_INPUT_RATE_VERTEX  // input rate\n  };\n  pipeline_data.vertex_input_info.nvert_buf_bindings = 1u;\n  pipeline_data.vertex_input_info.vert_buf_bindings  = &binding_desc;\n  pipeline_data.pipeline_info.compatible_rt_attachment_descs =\n      ngf_default_render_target_attachment_descs();\n  err = pipeline_.initialize(pipeline_data.pipeline_info);\n  NGF_MISC_ASSERT(err == NGF_ERROR_OK);\n\n  // Create and populate font texture.\n  const ngf_image_info font_texture_info = {\n      NGF_IMAGE_TYPE_IMAGE_2D,                                        // type\n      {(uint32_t)font_atlas_width, (uint32_t)font_atlas_height, 1u},  // extent\n      1u,                                                             // nmips\n      1u,                                                             // nlayers\n      NGF_IMAGE_FORMAT_RGBA8,                                         // image_format\n      NGF_SAMPLE_COUNT_1,                                             // samples\n      NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_XFER_DST          // usage_hint\n  };\n  err = font_texture_.initialize(font_texture_info);\n  NGF_MISC_ASSERT(err == NGF_ERROR_OK);\n  ImGui::GetIO().Fonts->TexID = (ImTextureID)(uintptr_t)font_texture_.get();\n  const ngf_buffer_info pbuffer_info {\n      4u * (size_t)font_atlas_width * (size_t)font_atlas_height,\n      NGF_BUFFER_STORAGE_HOST_WRITEABLE,\n      NGF_BUFFER_USAGE_XFER_SRC};\n  err = texture_data_.initialize(pbuffer_info);\n  NGF_MISC_ASSERT(err == NGF_ERROR_OK);\n  void* mapped_texture_data = ngf_buffer_map_range(\n      texture_data_.get(),\n      0,\n      4 * (size_t)font_atlas_width * (size_t)font_atlas_height);\n  memcpy(\n      mapped_texture_data,\n      font_atlas_bytes,\n      4 * (size_t)font_atlas_width * (size_t)font_atlas_height);\n  ngf_buffer_flush_range(\n      texture_data_.get(),\n      0,\n      4 * (size_t)font_atlas_width * (size_t)font_atlas_height);\n  ngf_buffer_unmap(texture_data_.get());\n  const ngf_image_write img_write {\n      .src_offset     = 0u,\n      .dst_offset     = {0, 0, 0},\n      .extent         = {.width = font_atlas_width, .height = font_atlas_height, .depth = 1u},\n      .dst_level      = 0u,\n      .dst_base_layer = 0u,\n      .nlayers        = 1u};\n  ngf_cmd_write_image(enc, texture_data_.get(), font_texture_.get(), &img_write, 1u);\n\n  // Create a sampler for the font texture.\n  ngf_sampler_info sampler_info {\n      NGF_FILTER_NEAREST,\n      NGF_FILTER_NEAREST,\n      NGF_FILTER_NEAREST,\n      NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      NGF_WRAP_MODE_CLAMP_TO_EDGE,\n      0.0f,\n      0.0f,\n      0.0f,\n      1.0f,\n      false, NGF_COMPARE_OP_NEVER};\n  tex_sampler_.initialize(sampler_info);\n#endif\n}\n\nvoid ngf_imgui::record_rendering_commands(ngf_render_encoder enc) {\n  ImGui::Render();\n  ImDrawData* data = ImGui::GetDrawData();\n  if (data->TotalIdxCount <= 0) return;\n  // Compute effective viewport width and height, apply scaling for\n  // retina/high-dpi displays.\n  ImGuiIO& io        = ImGui::GetIO();\n  int      fb_width  = (int)(data->DisplaySize.x * io.DisplayFramebufferScale.x);\n  int      fb_height = (int)(data->DisplaySize.y * io.DisplayFramebufferScale.y);\n  data->ScaleClipRects(io.DisplayFramebufferScale);\n\n  // Avoid rendering when minimized.\n  if (fb_width <= 0 || fb_height <= 0) { return; }\n\n  // Build projection matrix.\n  const ImVec2&      pos              = data->DisplayPos;\n  const float        L                = pos.x;\n  const float        R                = pos.x + data->DisplaySize.x;\n  const float        T                = pos.y;\n  const float        B                = pos.y + data->DisplaySize.y;\n  const uniform_data ortho_projection = {{\n      {2.0f / (R - L), 0.0f, 0.0f, 0.0f},\n      {0.0f, 2.0f / (B - T), 0.0f, 0.0f},\n      {0.0f, 0.0f, -1.0f, 0.0f},\n      {(R + L) / (L - R), (T + B) / (T - B), 0.0f, 1.0f},\n  }};\n  uniform_data_.write(ortho_projection);\n\n  // Bind the ImGui rendering pipeline.\n  ngf_cmd_bind_gfx_pipeline(enc, pipeline_);\n\n  // Bind resources.\n  ngf::cmd_bind_resources(\n      enc,\n      uniform_data_.bind_op_at_current_offset(0u, 0u),\n      ngf::descriptor_set<0>::binding<1>::texture(font_texture_.get()),\n      ngf::descriptor_set<0>::binding<2>::sampler(tex_sampler_.get()));\n\n  // Set viewport.\n  ngf_irect2d viewport_rect = {0u, 0u, (uint32_t)fb_width, (uint32_t)fb_height};\n  ngf_cmd_viewport(enc, &viewport_rect);\n  ngf_cmd_scissor(enc, &viewport_rect);\n\n  // These vectors will store vertex and index data for the draw calls.\n  // Later this data will be transferred to GPU buffers.\n  std::vector<ImDrawVert> vertex_data((size_t)data->TotalVtxCount, ImDrawVert());\n  std::vector<ImDrawIdx>  index_data((size_t)data->TotalIdxCount, 0u);\n  struct draw_data {\n    ngf_irect2d scissor;\n    uint32_t    first_elem;\n    uint32_t    nelem;\n  };\n  std::vector<draw_data> draw_data;\n\n  uint32_t last_vertex = 0u;\n  uint32_t last_index  = 0u;\n\n  // Process each ImGui command list and translate it into the nicegraf\n  // command buffer.\n  for (int i = 0u; i < data->CmdListsCount; ++i) {\n    // Append vertex data.\n    const ImDrawList* imgui_cmd_list = data->CmdLists[i];\n    memcpy(\n        &vertex_data[last_vertex],\n        imgui_cmd_list->VtxBuffer.Data,\n        sizeof(ImDrawVert) * (size_t)imgui_cmd_list->VtxBuffer.Size);\n\n    // Append index data.\n    for (int a = 0u; a < imgui_cmd_list->IdxBuffer.Size; ++a) {\n      // ImGui uses separate index buffers, but we'll use just one. We will\n      // update the index values accordingly.\n      index_data[last_index + (size_t)a] = (ImDrawIdx)(last_vertex + imgui_cmd_list->IdxBuffer[a]);\n    }\n    last_vertex += (uint32_t)imgui_cmd_list->VtxBuffer.Size;\n\n    // Process each ImGui command in the draw list.\n    uint32_t idx_buffer_sub_offset = 0u;\n    for (int j = 0u; j < imgui_cmd_list->CmdBuffer.Size; ++j) {\n      const ImDrawCmd& cmd = imgui_cmd_list->CmdBuffer[j];\n      if (cmd.UserCallback != nullptr) {\n        cmd.UserCallback(imgui_cmd_list, &cmd);\n      } else {\n        ImVec4 clip_rect = ImVec4(\n            cmd.ClipRect.x - pos.x,\n            cmd.ClipRect.y - pos.y,\n            cmd.ClipRect.z - pos.x,\n            cmd.ClipRect.w - pos.y);\n        if (clip_rect.x < (float)fb_width && clip_rect.y < (float)fb_height &&\n            clip_rect.z >= 0.0f && clip_rect.w >= 0.0f) {\n          const ngf_irect2d scissor_rect {\n              (int32_t)clip_rect.x,\n              (int32_t)clip_rect.y,\n              (uint32_t)(clip_rect.z - clip_rect.x),\n              (uint32_t)(clip_rect.w - clip_rect.y)};\n          draw_data.push_back(\n              {scissor_rect, last_index + idx_buffer_sub_offset, (uint32_t)cmd.ElemCount});\n          idx_buffer_sub_offset += (uint32_t)cmd.ElemCount;\n        }\n      }\n    }\n    last_index += (uint32_t)imgui_cmd_list->IdxBuffer.Size;\n  }\n\n  // Create new vertex and index buffers.\n  ngf_buffer_info attrib_buffer_info {\n      sizeof(ImDrawVert) * vertex_data.size(),  // data size\n      NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE,\n      NGF_BUFFER_USAGE_VERTEX_BUFFER};\n  ngf_buffer attrib_buffer = nullptr;\n  ngf_create_buffer(&attrib_buffer_info, &attrib_buffer);\n  attrib_buffer_.reset(attrib_buffer);\n  void* mapped_attrib_buffer = ngf_buffer_map_range(attrib_buffer, 0, attrib_buffer_info.size);\n  NGF_MISC_ASSERT(mapped_attrib_buffer != nullptr);\n  memcpy(mapped_attrib_buffer, vertex_data.data(), attrib_buffer_info.size);\n  ngf_buffer_flush_range(attrib_buffer, 0, attrib_buffer_info.size);\n  ngf_buffer_unmap(attrib_buffer);\n\n  ngf_buffer_info index_buffer_info {\n      sizeof(ImDrawIdx) * index_data.size(),\n      NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE,\n      NGF_BUFFER_USAGE_INDEX_BUFFER};\n  ngf_buffer index_buffer = nullptr;\n  ngf_create_buffer(&index_buffer_info, &index_buffer);\n  index_buffer_.reset(index_buffer);\n  void* mapped_index_buffer = ngf_buffer_map_range(index_buffer, 0, index_buffer_info.size);\n  NGF_MISC_ASSERT(mapped_index_buffer != nullptr);\n  memcpy(mapped_index_buffer, index_data.data(), index_buffer_info.size);\n  ngf_buffer_flush_range(index_buffer, 0, index_buffer_info.size);\n  ngf_buffer_unmap(index_buffer);\n\n  ngf_cmd_bind_index_buffer(\n      enc,\n      index_buffer,\n      0u,\n      sizeof(ImDrawIdx) < 4 ? NGF_TYPE_UINT16 : NGF_TYPE_UINT32);\n  ngf_cmd_bind_attrib_buffer(enc, attrib_buffer, 0u, 0u);\n  for (const auto& draw : draw_data) {\n    ngf_cmd_scissor(enc, &draw.scissor);\n    ngf_cmd_draw(enc, true, draw.first_elem, draw.nelem, 1u);\n  }\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/common/imgui-backend.h",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include \"imgui.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"nicegraf.h\"\n\nnamespace ngf_samples {\n\n/**\n * This is a nicegraf-based rendering backend for ImGui.\n * It's used to render the UI for samples.\n */\nclass ngf_imgui {\n  public:\n  /**\n   * Initializes the internal state of the ImGui rendering backend, and uploads\n   * the font texture by recording the appropriate commands into the given\n   * transfer encoder.\n   */\n  ngf_imgui(\n      ngf_xfer_encoder     font_xfer_encoder,\n      ngf_sample_count     main_render_target_sample_count,\n      const unsigned char* font_atlast_bytes,\n      uint32_t             font_atlas_width,\n      uint32_t             font_atlas_height);\n\n  /**\n   * Records commands for rendering the contents ofteh current ImGui draw data into the\n   * given render encoder.\n   */\n  void record_rendering_commands(ngf_render_encoder enc);\n\n  private:\n  struct uniform_data {\n    float ortho_projection[4][4];\n  };\n\n#if !defined(NGF_NO_IMGUI)\n  ngf::graphics_pipeline                 pipeline_;\n  ngf::uniform_multibuffer<uniform_data> uniform_data_;\n  ngf::image                             font_texture_;\n  ngf::sampler                           tex_sampler_;\n  ngf::buffer                            attrib_buffer_;\n  ngf::buffer                            index_buffer_;\n  ngf::buffer                            texture_data_;\n  ngf::shader_stage                      vertex_stage_;\n  ngf::shader_stage                      fragment_stage_;\n  ngf::render_target                     default_rt_;\n#endif\n};\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/common/main.cpp",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"nicegraf-mtl-handles.h\"\n#define GLFW_INCLUDE_NONE\n#include <GLFW/glfw3.h>\n\n#if defined(_WIN32) || defined(_WIN64)\n#define GLFW_EXPOSE_NATIVE_WIN32\n#elif defined(__APPLE__)\n#define GLFW_EXPOSE_NATIVE_COCOA\n#include \"platform/macos/glfw-cocoa-contentview.h\"\n#else\n#define GLFW_EXPOSE_NATIVE_X11\n#endif\n#include \"check.h\"\n#include \"diagnostic-callback.h\"\n#include \"imgui-backend.h\"\n#include \"imgui_impl_glfw.h\"\n#include \"logging.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"sample-interface.h\"\n\n#include <GLFW/glfw3native.h>\n#include <chrono>\n#include <optional>\n#include <stdio.h>\n\nint main(int, char**) {\n  /**\n   * We prefer a more verbose diagnostic output from nicegraf in debug builds.\n   */\n#if defined(NDEBUG)\n  constexpr ngf_diagnostic_log_verbosity diagnostics_verbosity = NGF_DIAGNOSTICS_VERBOSITY_DEFAULT;\n#else\n  constexpr ngf_diagnostic_log_verbosity diagnostics_verbosity = NGF_DIAGNOSTICS_VERBOSITY_DETAILED;\n#endif\n\n  /**\n   * Select a rendering device to be used by nicegraf.\n   */\n  uint32_t          ndevices = 0u;\n  const ngf_device* devices  = NULL;\n  NGF_MISC_CHECK_NGF_ERROR(ngf_get_device_list(&devices, &ndevices));\n  const char* device_perf_tier_names[NGF_DEVICE_PERFORMANCE_TIER_COUNT] = {\n      \"high\",\n      \"low\",\n      \"unknown\"};\n  /**\n   * For the sample code, we try to select a high-perf tier device. If one isn't available, we just\n   * fall back on the first device in the list. You may want to choose a different strategy for your\n   * specific application, or allow the user to pick.\n   */\n  size_t high_power_device_idx = (~0u);\n  ngf_misc::logi(\"available rendering devices: \");\n  for (uint32_t i = 0; i < ndevices; ++i) {\n    /**\n     * If no preferred index has been selected yet, and the current device is high-power, pick it as\n     * preferred. otherwise, just log the device details.\n     */\n    ngf_misc::logi(\n        \" device %d : %s (perf tier : `%s`)\",\n        i,\n        devices[i].name,\n        device_perf_tier_names[devices[i].performance_tier]);\n    if (high_power_device_idx == (~0u) &&\n        devices[i].performance_tier == NGF_DEVICE_PERFORMANCE_TIER_HIGH) {\n      high_power_device_idx = i;\n    }\n  }\n  /* Fall back to 1st device if no high-power device was found. */\n  const size_t preferred_device_idx = (high_power_device_idx == ~0u) ? 0 : high_power_device_idx;\n  const ngf_device_handle device_handle = devices[preferred_device_idx].handle;\n  ngf_misc::logi(\"selected device %d\", preferred_device_idx);\n\n  /*\n   * Initialize RenderDoc.\n   * Allows capturing of frame data to be opened in the RenderDoc debugger.\n   * To enable RenderDoc functionality, fill in the below struct with the path\n   * to the RenderDoc library (renderdoc.dll on Windows, librenderdoc.so on Linux,\n   * N/A on Mac OSX) and a file path template for where the captures should be stored.\n   *\n   * For example, if your library is saved in C:\\example\\dir\\renderdoc.dll and you want to save\n   * your captures as C:\\capture\\dir\\test. You would fill out the struct as such:\n   *\n   * const ngf_renderdoc_info renderdoc_info = {\n   *   .renderdoc_lib_path             = \"C:\\\\example\\\\dir\\\\renderdoc.dll\",\n   *   .renderdoc_destination_template = \"C:\\\\capture\\\\dir\\\\test\"};\n   *\n   * Provided that the above steps are completed, captures can be taken by pressing the\n   * \"C\" key while a sample is running. Captures will be saved to the specified directory.\n   * Custom instrumenting within the samples can also be done by making calls to\n   * ngf_capture_begin and ngf_capture end, respectively.\n   */\n  const ngf_renderdoc_info renderdoc_info = {\n      .renderdoc_lib_path             = NULL,\n      .renderdoc_destination_template = NULL};\n\n  /*\n   * Initialize nicegraf.\n   * Set our rendering device preference to \"discrete\" to pick a high-power GPU if one is available,\n   * and install a diagnostic callback.\n   */\n  const ngf_diagnostic_info diagnostic_info {\n      .verbosity = diagnostics_verbosity,\n      .userdata  = nullptr,\n      .callback  = ngf_samples::sample_diagnostic_callback,\n      .enable_debug_groups = true };\n\n  const ngf_init_info init_info {\n      .diag_info            = &diagnostic_info,\n      .allocation_callbacks = NULL,\n      .device               = device_handle,\n      .renderdoc_info       = (renderdoc_info.renderdoc_lib_path != NULL) ? &renderdoc_info : NULL};\n  NGF_MISC_CHECK_NGF_ERROR(ngf_initialize(&init_info));\n\n  ngf_misc::logi(\n      \"device-local memory is host-visible: %s\",\n      ngf_get_device_capabilities()->device_local_memory_is_host_visible ? \"YES\" : \"NO\");\n\n  /**\n   * Initialize imgui and generate its font atlas.\n   */\n  ImGuiContext* imgui_ctx = ImGui::CreateContext();\n  ImGui::SetCurrentContext(imgui_ctx);\n  unsigned char* imgui_font_atlas_bytes;\n  int            imgui_font_atlas_width, imgui_font_atlas_height;\n  ImGui::GetIO().Fonts->GetTexDataAsRGBA32(\n      &imgui_font_atlas_bytes,\n      &imgui_font_atlas_width,\n      &imgui_font_atlas_height);\n\n  /**\n   * Initialize glfw.\n   */\n  glfwInit();\n\n  /**\n   * Create a window.\n   * The `width` and `height` here refer to the dimensions of the window's \"client area\", i.e. the\n   * area that can actually be rendered to (excludes borders and any other decorative elements). The\n   * dimensions we request are a hint, we need to get the actual dimensions after the window is\n   * created.\n   * Note that we deliberately create the window before setting up the nicegraf context. This is\n   * done so that when the destructors are invoked, the context is destroyed before the window -\n   * changing this sequence of events might lead to misbehavior.\n   * Also note that we set a special window hint to make sure GLFW does _not_ attempt to create\n   * an OpenGL (or other API) context for us - this is nicegraf's job.\n   */\n  constexpr uint32_t window_width_hint = 800, window_height_hint = 600;\n  glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);\n  GLFWwindow* window =\n      glfwCreateWindow(window_width_hint, window_height_hint, \"nicegraf sample\", nullptr, nullptr);\n  if (window == nullptr) {\n    ngf_misc::loge(\"Failed to create a window, exiting.\");\n    return 0;\n  }\n  int fb_width, fb_height;\n  glfwGetFramebufferSize(window, &fb_width, &fb_height);\n  ngf_misc::logi(\"created a window with client area of size size %d x %d.\", fb_width, fb_height);\n\n  /**\n   * Make sure keyboard/mouse work with imgui.\n   */\n  ImGui_ImplGlfw_InitForOther(window, true);\n\n  /**\n   * Retrieve the native window handle to pass on to nicegraf.\n   */\n  uintptr_t native_window_handle = 0;\n#if defined(_WIN32) || defined(_WIN64)\n  native_window_handle = (uintptr_t)glfwGetWin32Window(window);\n#elif defined(__APPLE__)\n  native_window_handle = (uintptr_t)ngf_samples::get_glfw_contentview(window);\n#else\n  native_window_handle = (uintptr_t)glfwGetX11Window(window);\n#endif\n\n  // Begin Context Scope\n  {\n    /**\n     * Configure the swapchain and create a nicegraf context.\n     * Use an sRGB color attachment and a 32-bit float depth attachment. Enable MSAA with\n     * the highest supported framebuffer sample count.\n     */\n    const ngf_sample_count main_render_target_sample_count =\n        ngf_get_device_capabilities()->max_supported_framebuffer_color_sample_count;\n    const ngf_swapchain_info swapchain_info = {\n        .color_format  = NGF_IMAGE_FORMAT_BGRA8_SRGB,\n        .colorspace    = NGF_COLORSPACE_SRGB_NONLINEAR,\n        .depth_format  = NGF_IMAGE_FORMAT_DEPTH32,\n        .sample_count  = main_render_target_sample_count,\n        .capacity_hint = 3u,\n        .width         = (uint32_t)fb_width,\n        .height        = (uint32_t)fb_height,\n        .present_mode  = NGF_PRESENTATION_MODE_FIFO,\n        .native_handle = native_window_handle};\n    const ngf_context_info ctx_info = {\n        .swapchain_info = &swapchain_info,\n        .shared_context = nullptr};\n    ngf::context context;\n    NGF_MISC_CHECK_NGF_ERROR(context.initialize(ctx_info));\n\n    /**\n     * Make the newly created context current on this thread.\n     * Once a context has been made current on a thread, it cannot be switched to another thread,\n     * and another context cannot be made current on that thread.\n     */\n    NGF_MISC_CHECK_NGF_ERROR(ngf_set_context(context));\n\n    /**\n     * This is the nicegraf-based rendering backend for ImGui - we will initialize it\n     * on first frame.\n     */\n    std::optional<ngf_samples::ngf_imgui> imgui_backend;\n\n    /**\n     * Main command buffer that samples will record rendering commands into.\n     */\n    ngf::cmd_buffer main_cmd_buffer;\n    NGF_MISC_CHECK_NGF_ERROR(main_cmd_buffer.initialize(ngf_cmd_buffer_info {}));\n\n    /**\n     * Pointer to sample-specific data, returned by sample_initialize.\n     * It shall be passed to the sample on every frame.\n     */\n    void* sample_opaque_data = nullptr;\n\n    /**\n     * Main loop. Exit when either the window closes or `poll_events` returns false, indicating that\n     * the application has received a request to exit.\n     */\n    bool first_frame      = true;\n    auto prev_frame_start = std::chrono::system_clock::now();\n    while (!glfwWindowShouldClose(window)) {\n      glfwPollEvents();\n      auto                               frame_start  = std::chrono::system_clock::now();\n      const std::chrono::duration<float> time_delta   = frame_start - prev_frame_start;\n      float                              time_delta_f = time_delta.count();\n      prev_frame_start                                = frame_start;\n\n      if (glfwGetKey(window, GLFW_KEY_C) == GLFW_PRESS) { ngf_renderdoc_capture_next_frame(); }\n\n      /**\n       * Query the updated size of the window and handle resize events.\n       */\n      const int old_fb_width = fb_width, old_fb_height = fb_height;\n      glfwGetFramebufferSize(window, &fb_width, &fb_height);\n      bool       resize_successful = true;\n      const bool need_resize       = (fb_width != old_fb_width || fb_height != old_fb_height);\n      if (need_resize) {\n        ngf_misc::logd(\n            \"window resizing detected, calling ngf_resize context. \"\n            \"old size: %d x %d; new size: %d x %d\",\n            old_fb_width,\n            old_fb_height,\n            fb_width,\n            fb_height);\n        resize_successful &=\n            (NGF_ERROR_OK == ngf_resize_context(context, (uint32_t)fb_width, (uint32_t)fb_height));\n      }\n\n      if (resize_successful) {\n        /**\n         * Begin the frame and start the main command buffer.\n         */\n        ngf_frame_token frame_token;\n        if (ngf_begin_frame(&frame_token) != NGF_ERROR_OK) continue;\n        NGF_MISC_CHECK_NGF_ERROR(ngf_start_cmd_buffer(main_cmd_buffer, frame_token));\n\n        /**\n         * On first frame, initialize the sample and the ImGui rendering backend.\n         */\n        if (first_frame) {\n          ngf_cmd_begin_debug_group(main_cmd_buffer, \"Initial GPU uploads\");\n          /**\n           * Start a new transfer command encoder for uploading resources to the GPU.\n           */\n          ngf_xfer_encoder   xfer_encoder {};\n          ngf_xfer_pass_info xfer_pass_info {};\n          NGF_MISC_CHECK_NGF_ERROR(\n              ngf_cmd_begin_xfer_pass(main_cmd_buffer, &xfer_pass_info, &xfer_encoder));\n\n          /**\n           * Initialize the sample, and save the opaque data pointer.\n           */\n          ngf_misc::logi(\"Initializing sample\");\n          sample_opaque_data = ngf_samples::sample_initialize(\n              (uint32_t)fb_width,\n              (uint32_t)fb_height,\n              main_render_target_sample_count,\n              xfer_encoder);\n\n          /**\n           * Exit if sample failed to initialize.\n           */\n          if (sample_opaque_data == nullptr) {\n            ngf_misc::loge(\"Sample failed to initialize\");\n            break;\n          }\n          ngf_misc::logi(\"Sample initialized\");\n\n          /**\n           * Initialize the ImGui rendering backend.\n           */\n          imgui_backend.emplace(\n              xfer_encoder,\n              main_render_target_sample_count,\n              imgui_font_atlas_bytes,\n              imgui_font_atlas_width,\n              imgui_font_atlas_height);\n\n          /**\n           * Finish the transfer encoder.\n           */\n          NGF_MISC_CHECK_NGF_ERROR(ngf_cmd_end_xfer_pass(xfer_encoder));\n          ngf_cmd_end_current_debug_group(main_cmd_buffer);\n        }\n\n        /**\n         * Let the sample code record any commands prior to the main render pass.\n         */\n        ngf_cmd_begin_debug_group(main_cmd_buffer, \"Sample pre-draw frame\");\n        ngf_samples::sample_pre_draw_frame(\n            main_cmd_buffer,\n            sample_opaque_data);\n        ngf_cmd_end_current_debug_group(main_cmd_buffer);\n\n        /**\n         * Record the commands for the main render pass.\n         */\n        ngf_cmd_begin_debug_group(main_cmd_buffer, \"Main render pass\");\n        {\n          /**\n           * Begin the main render pass.\n           */\n          ngf::render_encoder main_render_pass_encoder(\n              main_cmd_buffer,\n              ngf_default_render_target(),\n              0.0f,\n              0.0f,\n              0.0f,\n              0.0f,\n              1.0f,\n              0);\n\n          /**\n           * Call into the sample code to draw a single frame.\n           */\n          static float t = 0.0;\n          ngf_samples::sample_draw_frame(\n              main_render_pass_encoder,\n              time_delta_f,\n              frame_token,\n              (uint32_t)fb_width,\n              (uint32_t)fb_height,\n              t,\n              sample_opaque_data);\n          t += 0.008f;\n\n          /**\n           * Begin a new ImGui frame.\n           */\n          ImGui_ImplGlfw_NewFrame();\n          ImGui::NewFrame();\n\n          /**\n           * Call into the sample-specific code to execute ImGui UI commands, and end ImGui frame.\n           */\n          ngf_samples::sample_draw_ui(sample_opaque_data);\n          ImGui::EndFrame();\n\n          /**\n           * Draw the UI on top of everything else.\n           */\n          imgui_backend->record_rendering_commands(main_render_pass_encoder);\n        }\n        ngf_cmd_end_current_debug_group(main_cmd_buffer);\n\n        /**\n         * Let the sample record commands after the main render pass.\n         */\n        ngf_cmd_begin_debug_group(main_cmd_buffer, \"Sample post-draw frame\");\n        ngf_samples::sample_post_draw_frame(main_cmd_buffer, sample_opaque_data);\n        ngf_cmd_end_current_debug_group(main_cmd_buffer);\n\n        /**\n         * Submit the main command buffer and end the frame.\n         */\n        ngf_cmd_buffer submitted_cmd_bufs[] = {main_cmd_buffer.get()};\n        NGF_MISC_CHECK_NGF_ERROR(ngf_submit_cmd_buffers(1, submitted_cmd_bufs));\n        ngf_samples::sample_post_submit(sample_opaque_data);\n        if (ngf_end_frame(frame_token) != NGF_ERROR_OK) {\n          ngf_misc::loge(\"failed to present image to swapchain!\");\n        }\n      } else {\n        ngf_misc::loge(\"failed to handle window resize!\");\n      }\n      first_frame = false;\n    }\n\n    /**\n     * De-initialize any sample-specific data, shut down ImGui.\n     */\n    ngf_misc::logi(\"Finishing execution\");\n    ngf_samples::sample_shutdown(sample_opaque_data);\n    ImGui::DestroyContext(imgui_ctx);\n  }  // End Context Scope\n\n  ngf_shutdown();\n\n  return 0;\n}\n"
  },
  {
    "path": "samples/common/platform/macos/glfw-cocoa-contentview.h",
    "content": "/**\n * Copyright (c) 2021 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include <GLFW/glfw3.h>\n\nnamespace ngf_samples {\n\nvoid* get_glfw_contentview(GLFWwindow *win);\n\n}\n"
  },
  {
    "path": "samples/common/platform/macos/glfw-cocoa-contentview.mm",
    "content": "/**\n * Copyright (c) 2021 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"platform/macos/glfw-cocoa-contentview.h\"\n\n#define GLFW_EXPOSE_NATIVE_COCOA\n#include <GLFW/glfw3native.h>\n\nnamespace ngf_samples {\n\n/**\n * On Mac, the NSWindow's ContentView needs to be\n * passed to nicegraf as the native window handle.\n */\nvoid* get_glfw_contentview(GLFWwindow *win) {\n  NSWindow* w = glfwGetCocoaWindow(win);\n  return (void*)CFBridgingRetain(w.contentView);\n}\n\n}\n"
  },
  {
    "path": "samples/common/sample-interface.h",
    "content": "/**\n * Copyright (c) 2023 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#pragma warning(disable:26812)\n#include \"nicegraf.h\"\n\n#include <stdint.h>\n\n/**\n * Each sample has to implement the functions declared in this header.\n * They are called by the common sample code.\n */\nnamespace ngf_samples {\n\n/**\n * This function is called once at startup, to let the sample set up whatever it needs.\n * This function may assume that a nicegraf context has already been created and made current on\n * the calling thread.\n * It gets passed the dimensions of the window to be rendered to, as well as the sample count\n * of the main rendertarget.\n * It also gets a transfer encoder, which can samples can use to upload some resources to the\n * GPU.\n * The function shall return a pointer that will be passed in to other callbacks.\n */\nvoid* sample_initialize(\n    uint32_t         initial_window_width,\n    uint32_t         initial_window_height,\n    ngf_sample_count main_render_target_sample_count,\n    ngf_xfer_encoder xfer_encoder);\n\n/**\n * This function gets called every frame before beginning the main render pass.\n * It receives the command buffer that the main render pass will eventually be\n * recorded into.\n */\nvoid sample_pre_draw_frame(ngf_cmd_buffer cmd_buffer, void* userdata);\n\n/**\n * This function gets called every frame, to render the frame contents.\n * It gets passed a token identifying the frame, the current window dimensions, and a (monotonically\n * increasing) timestamp. Window resizes are generally handled in the common code, but it's up to\n * the specific sample to monitor for size changes and e.g. resize any rendertargets that have to\n * match screen resolution. `userdata` is the pointer returned previously by `sample_initialize`.\n */\nvoid sample_draw_frame(\n    ngf_render_encoder main_render_pass,\n    float              time_delta_ms,\n    ngf_frame_token    frame_token,\n    uint32_t           width,\n    uint32_t           height,\n    float              time,\n    void*              userdata);\n\n/**\n * This function gets called every frame after finishing the main render pass.\n * It receives the command buffer that the main render pass was previously\n * recorded into.\n */\nvoid sample_post_draw_frame(ngf_cmd_buffer cmd_buffer, void* userdata);\n\nvoid sample_post_submit(void* userdata);\n\n\n/**\n * This function gets called every frame, to render the UI of the sample. It should mostly consist\n * of ImGui calls. `userdata` is the pointer returned previously by `sample_initialize`.\n */\nvoid sample_draw_ui(void* userdata);\n\n/**\n * This function gets called once, before the sample ceases execution, to perform any cleanup\n * actions. This function may assume that a nicegraf context is still present and current on the\n * calling thread. `userdata` is the pointer returned previously by `sample_initialize`.\n */\nvoid sample_shutdown(void* userdata);\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/common/staging-image.cpp",
    "content": "#include \"staging-image.h\"\n\n#include \"check.h\"\n#include \"file-utils.h\"\n#include \"targa-loader.h\"\n\n#include <cmath>\n#include <vector>\n\nusing namespace ngf_misc;\n\nnamespace ngf_samples {\n\nstaging_image create_staging_image_from_tga(const char* file_name) {\n  /* Read in the texture image file.*/\n  std::vector<char> texture_tga_data = load_file(file_name);\n\n  /* this call does nothing but quickly get the width & height. */\n  uint32_t texture_width, texture_height;\n  load_targa(\n      texture_tga_data.data(),\n      texture_tga_data.size(),\n      nullptr,\n      0u,\n      &texture_width,\n      &texture_height);\n\n  /* Create an appropriately sized staging buffer for the texture upload. */\n  const size_t texture_size_bytes = texture_width * texture_height * 4u;\n  ngf::buffer  staging_buf;\n  NGF_MISC_CHECK_NGF_ERROR(staging_buf.initialize(ngf_buffer_info {\n      .size         = texture_size_bytes,\n      .storage_type = NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE,\n      .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC}));\n  void* mapped_staging_buf = ngf_buffer_map_range(staging_buf.get(), 0, texture_size_bytes);\n\n  /* Decode the loaded targa file, writing RGBA values directly into mapped memory. */\n  load_targa(\n      texture_tga_data.data(),\n      texture_tga_data.size(),\n      mapped_staging_buf,\n      texture_size_bytes,\n      &texture_width,\n      &texture_height);\n\n  /* Flush and unmap the staging buffer. */\n  ngf_buffer_flush_range(staging_buf.get(), 0, texture_size_bytes);\n  ngf_buffer_unmap(staging_buf.get());\n\n  /* Count the number of mipmaps we'll have to generate for trilinear filtering.\n     Note that we keep generating mip levels until both dimensions are reduced to 1.\n   */\n  uint32_t nmips =\n      1 + static_cast<uint32_t>(std::floor(std::log2(std::max(texture_width, texture_height))));\n\n  return staging_image {\n      .staging_buffer  = std::move(staging_buf),\n      .width_px        = texture_width,\n      .height_px       = texture_height,\n      .nmax_mip_levels = nmips};\n}\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/common/staging-image.h",
    "content": "#pragma once\n\n#include \"nicegraf-wrappers.h\"\n\nnamespace ngf_samples {\n\n/**\n * This is a helper type used by the samples to upload image data to the rendering device.\n * Usually the samples create a staging buffer that is just enough to upload a given image. The raw\n * RGBA data is loaded directly into the staging buffer, which the sample can then use to populate\n * an image. After that, the staging buffer is discarded. This simple method works for the sample\n * code, but more advanced applications will require a different approach.\n */\nstruct staging_image {\n  ngf::buffer staging_buffer; /** Staging buffer containing raw image data. */\n  uint32_t    width_px;       /** Image width in pixels. */\n  uint32_t    height_px;      /** Image height in pixels. */\n  uint32_t nmax_mip_levels; /** Maximum number of mip level that may be generated for this image. */\n};\n\n/**\n * Creates a staging_image populated with the raw RGBA data from the given Targa file.\n */\nstaging_image create_staging_image_from_tga(const char* file_name);\n\n}  // namespace ngf_samples\n"
  },
  {
    "path": "samples/shaders/blinn-phong.hlsl",
    "content": "//T: blinn-phong vs:VSMain ps:PSMain\n\n[[vk::constant_id(0)]] const uint enableHalfLambert = 0;\n\nstruct PixelShaderInput {\n  float4 clipSpacePosition : SV_Position;\n  float4 viewSpaceInterpNormal : ATTR0;\n  float4 viewSpacePosition : ATTR1;\n};\n\nstruct VertexShaderInput {\n  float3 objSpacePosition : SV_Position;\n  float3 objSpaceNormal : ATTR0;\n};\n\nstruct ShaderUniforms {\n  float4x4 objToViewTransform;\n  float4x4 viewToClipTransform;\n  float4   ambientLightIntensity;\n  float4   viewSpacePointLightPosition;\n  float4   pointLightIntensity;\n  float4   viewSpaceDirectionalLightDirection;\n  float4   directionalLightIntensity;\n  float4   diffuseReflectance;\n  float4   specularCoefficient;\n  float    shininess;\n};\n\n[[vk::binding(0, 0)]] ConstantBuffer<ShaderUniforms> shaderUniforms;\n\nPixelShaderInput VSMain(VertexShaderInput vertexAttrs) {\n  float4 viewSpacePosition = mul(shaderUniforms.objToViewTransform, float4(vertexAttrs.objSpacePosition, 1.0));\n  float4 viewSpaceNormal =  normalize(mul(shaderUniforms.objToViewTransform, float4(vertexAttrs.objSpaceNormal, 0.0))); // TODO inverse transpose.\n  float4 clipSpacePosition = mul(shaderUniforms.viewToClipTransform, viewSpacePosition);\n  clipSpacePosition.y *= -1.0;\n   PixelShaderInput result = {\n    clipSpacePosition,\n    viewSpaceNormal,\n    viewSpacePosition,\n  };\n  return result;\n}\n\nfloat computeCosineFactor(float3 direction, float3 normal) {\n  float cosineFactor = dot(direction, normal);\n  if (enableHalfLambert == 0) {\n    return max(0.0, cosineFactor);\n  } else {\n    cosineFactor = 0.5 * cosineFactor + 0.5;\n    cosineFactor *= cosineFactor;\n    return cosineFactor;\n  }\n}\n\nfloat3 computeIrradiance(float3 intensity, float3 direction, float3 normal, float distSquared) {\n  return intensity * computeCosineFactor(direction, normal) / distSquared; \n}\n\nfloat3 computeSpecular(float3 position, float3 lightDirection, float3 normal, float shininess) {\n  float3 directionToObserver = normalize(-position);\n  float3 halfwayVector = normalize(directionToObserver + lightDirection);\n  return pow(max(0.0, dot(normal, halfwayVector)), shininess);\n}\n\nfloat4 PSMain(PixelShaderInput fragmentAttribs) : SV_Target {\n    float4 viewSpaceNormal = normalize(fragmentAttribs.viewSpaceInterpNormal);\n    float4 viewSpaceVectorToPointLight = shaderUniforms.viewSpacePointLightPosition - fragmentAttribs.viewSpacePosition;\n    float distanceToPointLightSquared = dot(viewSpaceVectorToPointLight, viewSpaceVectorToPointLight);\n    float4 viewSpaceDirectionToPointLight = normalize(viewSpaceVectorToPointLight);\n    float3 pointLightIrradiance =\n      computeIrradiance(\n        shaderUniforms.pointLightIntensity.rgb,\n        viewSpaceDirectionToPointLight.xyz,\n        viewSpaceNormal.xyz,\n        distanceToPointLightSquared);\n    float3 directionalLightIrradiance =\n      computeIrradiance(\n        shaderUniforms.directionalLightIntensity.rgb,\n        normalize(shaderUniforms.viewSpaceDirectionalLightDirection.xyz),\n        viewSpaceNormal.xyz,\n        1.0f);\n    float3 specularReflectanceFromPointLight =\n      shaderUniforms.specularCoefficient.rgb *\n      computeSpecular(\n        fragmentAttribs.viewSpacePosition.xyz,\n        viewSpaceDirectionToPointLight.xyz,\n        viewSpaceNormal.xyz,\n        shaderUniforms.shininess); \n    float3 specularReflectanceFromDirectionalLight =\n      shaderUniforms.specularCoefficient.rgb *\n      computeSpecular(\n        fragmentAttribs.viewSpacePosition.xyz,\n        normalize(shaderUniforms.viewSpaceDirectionalLightDirection.xyz),\n        viewSpaceNormal.xyz,\n        shaderUniforms.shininess); \n        \n    float3 pointLightContribution = (shaderUniforms.diffuseReflectance.rgb + specularReflectanceFromPointLight) * pointLightIrradiance;\n    float3 directionalLightContribution = (shaderUniforms.diffuseReflectance.rgb + specularReflectanceFromDirectionalLight) * directionalLightIrradiance;\n    \n    return float4(pointLightContribution + directionalLightContribution + shaderUniforms.ambientLightIntensity.rgb, 1.0);\n}\n"
  },
  {
    "path": "samples/shaders/compute-demo.hlsl",
    "content": "// T: compute-demo cs:CSMain\n\n[[vk::binding(0, 0)]] RWTexture2D<float4> outputImage;\n\nfloat2 f(float2 x, float2 c) {\n  return mul(x, float2x2(x.x, x.y, -x.y, x.x)) + c;\n}\n\nfloat3 palette(float t, float3 a, float3 b, float3 c, float3 d) {\n  return a + b * cos(6.28318 * (c * t + d)); /* thanks, iq */\n}\n\n[numthreads(4, 4, 1)] void CSMain(uint3 tid\n                                  : SV_DispatchThreadID) {\n  float2 uv         = float2 ((float)tid.x / 512.0f, (float)tid.y / 512.0f);\n  float2 c          = float2(-0.6, 0.0) + (2.0*uv - 1.0);\n  float2 x          = float2(0.0, 0.0);\n  bool   escaped    = false;\n  int    iterations = 0;\n  for (int i = 0; i < 50; i++) {\n    iterations = i;\n    x          = f(x, c);\n    if (length(x) > 2.0) {\n      escaped = true;\n      break;\n    }\n  }\n  outputImage[tid.xy] = (escaped ? float4(\n                                               palette(\n                                                   float(iterations) / 50.0,\n                                                   float3(0.3, 0.2, 0.4),\n                                                   float3(0.2, 0.1, 0.0),\n                                                   float3(1.0, 1.0, 1.0),\n                                                   float3(0.3, 0.5, 0.2)),\n                                               1.0)\n                                         : float4(0.0, 0.0, 0.0, 1.0));\n}\n"
  },
  {
    "path": "samples/shaders/compute-vertices.hlsl",
    "content": "struct VertexData {\n  float3 position;\n  float3 normal;\n};\n\nstruct VertexOutput {\n  float4 position : SV_Position;\n  float  height : ATTR0;\n};\n\n// T: render-vertices vs:VSMain ps:PSMain\n\nstruct VertexShaderUniforms {\n  float4x4 objToViewTransform;\n  float4x4 viewToClipTransform;\n};\n\n[[vk::binding(0, 0)]] ConstantBuffer<VertexShaderUniforms> vertShaderUniforms;\n\n#define maxAmplitude 0.05\n\nVertexOutput VSMain(uint vertID : SV_VertexID, float4 pos : SV_Position) {\n  VertexOutput result;\n  result.height = pos.y;\n  result.position =\n      mul(vertShaderUniforms.viewToClipTransform,\n          mul(vertShaderUniforms.objToViewTransform, float4(pos.xyz, 1.0)));\n  return result;\n}\n\nfloat4 PSMain(VertexOutput pxIn) : SV_Target {\n  float shade = saturate(-(pxIn.height / (maxAmplitude + 0.04)) * 0.5 + 0.5);\n  shade       = shade * shade;\n  return shade * float4(1., 1., 1.0, 1.0);\n}\n\n// T: compute-vertices cs:CSMain\n\nstruct ComputeShaderUniforms {\n  float4 time;\n};\n\n[[vk::binding(0, 1)]] RWStructuredBuffer<float4>            outputBuffer;\n[[vk::binding(1, 1)]] ConstantBuffer<ComputeShaderUniforms> computeShaderUniforms;\n\n[numthreads(2, 2, 1)] void CSMain(uint3 tid\n                                  : SV_DispatchThreadID) {\n  const uint vertsPerSide  = 512;\n  uint       vertID        = tid.y * vertsPerSide + tid.x;\n  uint2      vertRowColumn = tid.xy;\n  float2     vertUV        = float2(\n      (float)vertRowColumn.x / (float)(vertsPerSide - 1),\n      (float)vertRowColumn.y / (float)(vertsPerSide - 1));\n  float2 vertXZ = vertUV * 2.0 - float2(1.0, 1.0);\n  float  height = maxAmplitude *\n      sin(cos(computeShaderUniforms.time.x * 2.0 + vertRowColumn.x * 0.1) + vertRowColumn.y * 0.1);\n  float4 position      = float4(vertXZ.x, height, vertXZ.y, 1.0);\n  outputBuffer[vertID] = position;\n}"
  },
  {
    "path": "samples/shaders/cubemap.hlsl",
    "content": "// T: cubemap vs:VSMain ps:PSMain define:GENERIC_FS_INPUT_HAS_CLIPSPACE_POS=1\n// T: cubemap-array vs:VSMain ps:PSMain define:GENERIC_FS_INPUT_HAS_CLIPSPACE_POS=1 define:USE_CUBEMAP_ARRAY=1\n\n#include \"triangle.hlsl\"\n\nstruct ShaderUniforms {\n  float4x4 cameraTransform;\n  float    aspectRatio;\n#if defined(USE_CUBEMAP_ARRAY)\n  float    cubemapArrayIndex;\n#endif\n};\n\n#if defined(USE_CUBEMAP_ARRAY)\n#define TEXTURE_IMAGE_TYPE TextureCubeArray\n#else\n#define TEXTURE_IMAGE_TYPE TextureCube\n#endif\n\n[[vk::binding(0, 0)]] ConstantBuffer<ShaderUniforms> shaderUniforms;\n[[vk::binding(1, 0)]] uniform TEXTURE_IMAGE_TYPE cubemapImage;\n[[vk::binding(2, 0)]] uniform sampler imageSampler;\n\nfloat4 PSMain(GenericFragShaderInput vertexAttribs) : SV_Target {\n  float3 direction = -mul(shaderUniforms.cameraTransform,\n                          float4(vertexAttribs.clipSpacePosition.x * shaderUniforms.aspectRatio,\n                                 vertexAttribs.clipSpacePosition.y,\n                                 1.0,\n                                 0.0)).xyz;\n#if defined(USE_CUBEMAP_ARRAY)\n  float4 cubemapSampleCoords = float4(direction, shaderUniforms.cubemapArrayIndex);\n#else\n  float3 cubemapSampleCoords = direction;\n#endif\n  return cubemapImage.Sample(imageSampler, cubemapSampleCoords);\n}\n\nGenericFragShaderInput VSMain(uint vertexId : SV_VertexID) {\n  return TriangleVertex(vertexId, 1.0, 0.0, 0.0);\n}\n"
  },
  {
    "path": "samples/shaders/fullscreen-triangle.hlsl",
    "content": "//T: fullscreen-triangle ps:PSMain vs:VSMain\n//T: small-triangle ps:PSMain vs:VSMain define:SCALE=0.25\n\n#define GENERIC_FS_INPUT_HAS_COLOR\n#include \"triangle.hlsl\"\n\n#ifndef SCALE\n#define SCALE 1.0\n#endif\n\nfloat4 PSMain(GenericFragShaderInput vertexAttribs) : SV_TARGET {\n  return vertexAttribs.color;\n}\n\nGenericFragShaderInput VSMain(uint vertexId : SV_VertexID) {\n  return TriangleVertex(vertexId, SCALE, 0.0, 0.0);\n}\n"
  },
  {
    "path": "samples/shaders/generic-frag-shader-input.hlsl",
    "content": "struct GenericFragShaderInput {\n  float4 position : SV_Position;\n  \n#if defined(GENERIC_FS_INPUT_HAS_COLOR)\n  float4 color : NGF_COLOR;\n#endif\n\n#if defined(GENERIC_FS_INPUT_HAS_CLIPSPACE_POS)\n  float4 clipSpacePosition : NGF_CLIP_SPACE_POSITION;\n#endif\n\n#if defined(GENERIC_FS_INPUT_HAS_UV)\n  float2 textureUv : NGF_UV;\n#endif\n\n};"
  },
  {
    "path": "samples/shaders/imgui.hlsl",
    "content": "//T: imgui ps:PSMain vs:VSMain\n\n#define GENERIC_FS_INPUT_HAS_UV\n#define GENERIC_FS_INPUT_HAS_COLOR\n#include \"generic-frag-shader-input.hlsl\"\n\nstruct ImGuiVSInput {\n  float2 position : ATTR0;\n  float2 uv : TEXCOORD0;\n  float4 color : COLOR0;\n};\n\nstruct VertShaderUniforms {\n  float4x4 projectionTransform;\n};\n\n[[vk::binding(0, 0)]] ConstantBuffer<VertShaderUniforms> vertShaderUniforms;\n\nGenericFragShaderInput VSMain(ImGuiVSInput input) {\n  GenericFragShaderInput vertexData = {\n    mul(vertShaderUniforms.projectionTransform,\n        float4(input.position, 0.0, 1.0)),\n    input.color,\n    input.uv\n  };\n  return vertexData;\n}\n\n[[vk::binding(1, 0)]] uniform Texture2D textureImage;\n[[vk::binding(2, 0)]] uniform sampler imageSampler;\n\nfloat4 PSMain(GenericFragShaderInput vertexAttribs) : SV_Target {\n  return vertexAttribs.color * textureImage.Sample(imageSampler, vertexAttribs.textureUv);\n}\n"
  },
  {
    "path": "samples/shaders/instancing.hlsl",
    "content": "//T: instancing ps:PSMain vs:VSMain\n\n#include \"quat.hlsl\"\n\n#define GENERIC_FS_INPUT_HAS_UV\n#include \"generic-frag-shader-input.hlsl\"\n\nstruct VertexShaderInput {\n  float3 objSpacePosition : SV_Position;\n  float2 textureUv : TEXCOORD0;\n};\n\nstruct ShaderUniforms {\n  float4x4 worldToClipTransform;\n  float timestamp;\n};\n\n[[vk::binding(0, 0)]] ConstantBuffer<ShaderUniforms> shaderUniforms;\n[[vk::binding(1, 0)]] Buffer<float3> perInstanceData;\n\nGenericFragShaderInput VSMain(VertexShaderInput vertexAttrs, int instanceIdx : SV_InstanceID) {\n  float4 worldSpaceTranslation = float4(perInstanceData.Load(instanceIdx), 0.0);\n  const float oscillationFrequency = 5.0;\n  float  oscillationPhase = worldSpaceTranslation.x * worldSpaceTranslation.y;\n  float4 oscillationOffset = float4(0.0, sin(oscillationFrequency * (shaderUniforms.timestamp + oscillationPhase)), 0.0, 0.0);\n  float4 rotationQuat = quatFromAxisAngle(worldSpaceTranslation.xyz, shaderUniforms.timestamp);\n  float4 worldSpacePosition = rotateByQuat(float4(vertexAttrs.objSpacePosition, 1.0), rotationQuat) +\n                              worldSpaceTranslation +\n                              oscillationOffset;\n  float4 clipSpacePosition = mul(shaderUniforms.worldToClipTransform, worldSpacePosition);\n  clipSpacePosition.y *= -1.0;\n  GenericFragShaderInput result = {\n    clipSpacePosition,\n    vertexAttrs.textureUv\n  };\n  return result;\n}\n\n[[vk::binding(2, 0)]] uniform Texture2D modelTexture;\n[[vk::binding(3, 0)]] uniform sampler textureSampler;\n\nfloat4 PSMain(GenericFragShaderInput fragmentAttribs) : SV_Target {\n    return modelTexture.Sample(textureSampler, fragmentAttribs.textureUv);\n}"
  },
  {
    "path": "samples/shaders/polygon.hlsl",
    "content": "//T: polygon ps:PSMain vs:VSMain\n\n#define GENERIC_FS_INPUT_HAS_COLOR\n#include \"generic-frag-shader-input.hlsl\"\n\nstruct VertShaderUniforms {\n  float scaleA;\n  float scaleB;\n  float time;\n  float aspectRatio;\n  float theta;\n};\n\n\n[[vk::binding(0, 0)]] ConstantBuffer<VertShaderUniforms> vertShaderUniforms;\n\nfloat4 PSMain(GenericFragShaderInput vertexAttribs) : SV_Target {\n  return vertexAttribs.color;\n}\n\nGenericFragShaderInput VSMain(uint vertexId : SV_VertexID) {\n  GenericFragShaderInput polygonVertexData;\n  if (vertexId % 3 == 0) {\n    polygonVertexData.position = float4(0.0, 0.0, 0.0, 1.0);\n    polygonVertexData.color = float4(0.8, 0.7, 0.8, 1.0);\n  } else {\n    float    rotationAngle = vertShaderUniforms.time;\n    float2x2 rotationMatrix = {\n      cos(rotationAngle), -sin(rotationAngle),\n      sin(rotationAngle), cos(rotationAngle)\n    };\n    float effectiveScale = (vertexId % 2  ? vertShaderUniforms.scaleB : vertShaderUniforms.scaleA);\n    int outerVertexId = int(round(float(vertexId)/3.0));\n    float theta = vertShaderUniforms.theta;\n    float2 vertexPosition = mul(rotationMatrix,\n                                float2(sin(outerVertexId * theta),\n                                       cos(outerVertexId * theta))) * float2(1.0, vertShaderUniforms.aspectRatio)\n                                                                    * effectiveScale;\n    polygonVertexData.position = float4(vertexPosition, 0.0, 1.0);\n    polygonVertexData.color = float4(0.5 * (vertexPosition.x + 1.0), 0.5 * (vertexPosition.y + 1.0), abs(1.0 - vertexPosition.x), 1.0);\n    polygonVertexData.position.y *= -1.0;\n  }\n  return polygonVertexData;\n}\n"
  },
  {
    "path": "samples/shaders/quad.hlsl",
    "content": ""
  },
  {
    "path": "samples/shaders/quat.hlsl",
    "content": "// Helper functions for quaternions.\n\nfloat4 quatFromAxisAngle(float3 axis, float angle) {\n  float3 n = normalize(axis);\n  return float4(sin(angle/2.0) * n, cos(angle/2.0));\n}\n\nfloat4 quatMul(float4 lhs, float4 rhs) {\n  const float x1 = lhs[0],\n              x2 = rhs[0],\n              y1 = lhs[1],\n              y2 = rhs[1],\n              z1 = lhs[2],\n              z2 = rhs[2],\n              w1 = lhs[3],\n              w2 = rhs[3];\n\n  return float4(x1 * w1 + y1 * z2 - z1 * y2 + x2 * w1,\n                y1 * w2 - x1 * z2 + z1 * x2 + y2 * w1,\n                x1 * y2 - y1 * x2 + z1 * w2 + z2 * w1,\n                w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2);\n}\n\nfloat4 rotateByQuat(float4 a, float4 q) {\n  float x = a[0], y = a[1], z = a[2];\n  float qx = q[0], qy = q[1], qz = q[2], qw = q[3];\n\n  float ix = qw * x + qy * z - qz * y;\n  float iy = qw * y + qz * x - qx * z;\n  float iz = qw * z + qx * y - qy * x;\n  float iw = -qx * x - qy * y - qz * z;\n\n  return float4(ix * qw + iw * -qx + iy * -qz - iz * -qy,\n                iy * qw + iw * -qy + iz * -qx - ix * -qz,\n                iz * qw + iw * -qz + ix * -qy - iy * -qx,\n                a[3]);\n}"
  },
  {
    "path": "samples/shaders/simple-texture.hlsl",
    "content": "// T: simple-texture vs:VSMain ps:PSMain\n\n#define GENERIC_FS_INPUT_HAS_UV\n#include \"triangle.hlsl\"\n\n[[vk::binding(1, 0)]] uniform Texture2D textureImage;\n[[vk::binding(2, 0)]] uniform sampler imageSampler;\n\nfloat4 PSMain(GenericFragShaderInput vertexAttribs) : SV_Target {\n  return textureImage.Sample(imageSampler, vertexAttribs.textureUv);\n}\n\nGenericFragShaderInput VSMain(uint vertexId : SV_VertexID) {\n  return TriangleVertex(vertexId, 1.0, 0.0, 0.0);\n}\n"
  },
  {
    "path": "samples/shaders/textured-quad.hlsl",
    "content": "//T: textured-quad ps:PSMain vs:VSMain define:GENERIC_FS_INPUT_HAS_UV=1\n//T: textured-quad-image-array ps:PSMain vs:VSMain define:GENERIC_FS_INPUT_HAS_UV=1 define:USE_IMAGE_ARRAY=1\n//T: textured-quad-multiple-images ps:PSMain vs:VSMain define:GENERIC_FS_INPUT_HAS_UV=1 define:NUM_IMAGES=4\n\n#include \"generic-frag-shader-input.hlsl\"\n\nstruct ShaderUniforms {\n  float4x4 transformMatrix;\n#if defined(USE_IMAGE_ARRAY)\n  float imageArrayIdx;\n#endif\n#if defined(NUM_IMAGES)\n  uint imageIdx;\n#endif\n};\n\n[[vk::binding(0, 0)]] ConstantBuffer<ShaderUniforms> shaderUniforms;\n\nGenericFragShaderInput VSMain(uint vertexId : SV_VertexID) {\n  const float2 vertices[] = {\n    float2(1.0, -1.0), float2(-1.0, -1.0), float2(1.0, 1.0),\n    float2(1.0, 1.0), float2(-1.0, -1.0), float2(-1.0, 1.0)\n  };\n  const float2 uvs[] = {\n    float2(1.0, 1.0), float2(0.0, 1.0), float2(1.0, 0.0),\n    float2(1.0, 0.0), float2(0.0, 1.0), float2(0.0, 0.0)\n  };\n  vertexId = vertexId % 6;\n  GenericFragShaderInput result = {\n    mul(shaderUniforms.transformMatrix,\n        float4(vertices[vertexId], 0.0, 1.0)),\n    2 * uvs[vertexId]\n  };\n  return result;\n}\n\n#if defined(USE_IMAGE_ARRAY)\n#define TEXTURE_IMAGE_TYPE Texture2DArray\n#else\n#define TEXTURE_IMAGE_TYPE Texture2D\n#endif\n\n#if !defined(NUM_IMAGES)\n#define NUM_IMAGES (1)\n#endif\n\n[[vk::binding(0, 1)]] uniform TEXTURE_IMAGE_TYPE textureImage[NUM_IMAGES];\n[[vk::binding(1, 0)]] uniform sampler imageSampler;\n\nfloat4 PSMain(GenericFragShaderInput vertexAttribs) : SV_Target {\n#if defined(USE_IMAGE_ARRAY)\n  float3 sampleCoords = float3(vertexAttribs.textureUv, shaderUniforms.imageArrayIdx);\n#else\n  float2 sampleCoords = vertexAttribs.textureUv;\n#endif\n#if NUM_IMAGES > 1\n  uint i = shaderUniforms.imageIdx % NUM_IMAGES;\n#else\n  uint i = 0u;\n#endif\n  return textureImage[i].Sample(imageSampler, sampleCoords);\n}\n"
  },
  {
    "path": "samples/shaders/triangle.hlsl",
    "content": "#include \"generic-frag-shader-input.hlsl\"\n\nGenericFragShaderInput TriangleVertex(uint vertexId, float scale, float2 offset, float depth) {\n  float4 pos[] = {\n    float4(-1.0,  1.0, 0.0, 1.0),\n    float4( 3.0,  1.0, 0.0, 1.0),\n    float4(-1.0, -3.0, 0.0, 1.0)\n  };\n  const float2 texcoords[] = {\n    float2(0.0, 1.0), float2(2.0, 1.0), float2(0.0, -1.0)\n  };\n  const float4 colors[] = {\n    float4(1.0, 0.0, 0.0, 1.0),\n    float4(0.0, 1.0, 0.0, 1.0),\n    float4(0.0, 0.0, 1.0, 1.0)\n  };\n  GenericFragShaderInput triangleVertexData;\n  vertexId = vertexId % 3;\n  triangleVertexData.position = float4(pos[vertexId].xyz * scale, 1.0) + float4(offset, depth, 0.0);\n#if defined(GENERIC_FS_INPUT_HAS_UV)  \n  triangleVertexData.textureUv = texcoords[vertexId];\n#endif\n\n#if defined(GENERIC_FS_INPUT_HAS_COLOR)  \n  triangleVertexData.color    = colors[vertexId];\n#endif\n\n#if defined(GENERIC_FS_INPUT_HAS_CLIPSPACE_POS)\n  triangleVertexData.clipSpacePosition = triangleVertexData.position;\n#endif\n  return triangleVertexData;\n}"
  },
  {
    "path": "samples/shaders/volume-renderer.hlsl",
    "content": "// T: volume-renderer vs:VSMain ps:PSMain\n\nstruct VertexShaderInput {\n  float4 position : SV_Position;\n  float3 textureCoordinate : TexCoord;\n};\n\nstruct VolumeRendererUniforms {\n  float4x4 transformMatrix;\n  float    aspectRatio;\n};\n\n[[vk::binding(0,1)]] ConstantBuffer<VolumeRendererUniforms> shaderUniforms;\n[[vk::binding(0,0)]] Texture3D volumeImage;\n\nVertexShaderInput VSMain(uint vertexId: SV_VertexID, uint instanceId : SV_InstanceID) {\n const float2 vertices[] = {\n    float2(1.0, -1.0), float2(-1.0, -1.0), float2(1.0, 1.0),\n    float2(1.0, 1.0), float2(-1.0, -1.0), float2(-1.0, 1.0)\n  };\n  vertexId = vertexId % 6;\n  float w, h, d;\n  volumeImage.GetDimensions(w, h, d);\n  float3 xyz = float3(vertices[vertexId], 2.0 * (instanceId/d) - 1.0);\n  float3 uvw = xyz * float3(1.0, -1.0, 1.0);\n  xyz.y *= shaderUniforms.aspectRatio;\n  uvw = mul(shaderUniforms.transformMatrix, float4(uvw, 1.0)).xyz;\n  uvw.xy = 0.5 * uvw.xy + 0.5;\n  VertexShaderInput result = {\n    float4(xyz.xy, 0.0, 1.0),\n    uvw,\n  };\n  return result;\n}\n\n[[vk::binding(1,0)]] sampler volumeSampler;\n\nfloat4 PSMain(VertexShaderInput input) : SV_Target {\n  float alpha = volumeImage.Sample(volumeSampler, input.textureCoordinate).r;\n  return float4(1., 1., 1., alpha);\n}"
  },
  {
    "path": "source/ngf-common/arena.cpp",
    "content": "/**\n * Copyright (c) 2026 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"arena.h\"\n\n#include \"macros.h\"\n#include \"util.h\"\n\n#include <cstdlib>\n#include <new>\n\nnamespace ngfi {\n\n// Align a pointer to the given alignment (must be power of two)\nstatic void* align_ptr(void* ptr, size_t alignment) noexcept {\n  uintptr_t addr    = reinterpret_cast<uintptr_t>(ptr);\n  uintptr_t aligned = (addr + alignment - 1) & ~(alignment - 1);\n  return (void*)(aligned);\n}\n\n// Internal block structure - header followed by data in single allocation\nstruct arena::block {\n  size_t        capacity;\n  size_t        used;\n  arena::block* next;\n\n  uint8_t* data() noexcept {\n    uintptr_t header_end = reinterpret_cast<uintptr_t>(this) + sizeof(arena::block);\n    uintptr_t aligned    = (header_end + NGFI_MAX_ALIGNMENT - 1) & ~(NGFI_MAX_ALIGNMENT - 1);\n    return reinterpret_cast<uint8_t*>(aligned);\n  }\n\n  const uint8_t* data() const noexcept {\n    return const_cast<arena::block*>(this)->data();\n  }\n\n  // Calculate total allocation size for a block with given data capacity\n  static size_t alloc_size(size_t data_capacity) noexcept {\n    constexpr size_t header_size    = sizeof(arena::block);\n    const size_t     align_mask     = NGFI_MAX_ALIGNMENT - 1;\n    const size_t     aligned_header = (header_size + align_mask) & ~align_mask;\n    return aligned_header + data_capacity;\n  }\n\n  static arena::block* create(size_t data_capacity) noexcept {\n    size_t total_size = block::alloc_size(data_capacity);\n    void*  raw        = malloc(total_size);\n    if (!raw || total_size < sizeof(arena::block)) return nullptr;\n    auto* block     = new (raw) arena::block {};\n    block->capacity = data_capacity;\n    block->used     = 0;\n    block->next     = block;\n    return block;\n  }\n\n  static void destroy(arena::block* block) noexcept {\n    block->~block();\n    ::free(block);\n  }\n\n  static size_t destroy_chain(arena::block* blks, bool destroy_self) noexcept {\n    size_t result = 0;\n    if (blks) {\n      arena::block* cur = blks->next;\n      while (cur != blks) {\n        auto prev = cur;\n        cur       = cur->next;\n        result += alloc_size(prev->capacity);\n        destroy(prev);\n      }\n      if (destroy_self) {\n        result += alloc_size(blks->capacity);\n        destroy(blks);\n      }      else {\n        blks->next = blks;\n        }\n    }\n    return result;\n  }\n\n  void* alloc(size_t size, size_t alignment, size_t* out_total_used) noexcept {\n    uint8_t* data_start   = data();\n    uint8_t* current_ptr  = data_start + used;\n    void*    aligned_ptr  = align_ptr(current_ptr, alignment);\n    size_t   padding      = static_cast<size_t>(static_cast<uint8_t*>(aligned_ptr) - current_ptr);\n    size_t   total_needed = padding + size;\n\n    if (used + total_needed > capacity) { return nullptr; }\n\n    used += total_needed;\n    *out_total_used += total_needed;\n    return aligned_ptr;\n  }\n};\n\narena::arena(size_t initial_capacity) noexcept : block_capacity_ { initial_capacity } {}\n\narena::arena(arena&& other) noexcept\n    : current_block_(other.current_block_)\n    , block_capacity_(other.block_capacity_)\n    , total_allocated_(other.total_allocated_)\n    , total_used_(other.total_used_) {\n  other.current_block_      = nullptr;\n  other.total_allocated_    = 0;\n  other.total_used_         = 0;\n  other.block_capacity_ = 0;\n}\n\narena::~arena() noexcept {\n  block::destroy_chain(current_block_, true);\n}\n\nvoid* arena::alloc(size_t size) noexcept {\n  return alloc_aligned(size, NGFI_MAX_ALIGNMENT);\n}\n\nvoid* arena::alloc_aligned(size_t size, size_t alignment) noexcept {\n  size_t min_capacity = size + alignment;  // Enough for alignment + allocation\n  if (size == 0 || block_capacity_ == 0 || (!current_block_ && !grow(min_capacity))) {\n    return nullptr;\n  }\n\n  // Try to allocate from current block\n  void* result = current_block_->alloc(size, alignment, &total_used_);\n  if (result) {\n    return result;\n  }\n\n  // Need to grow - allocate new block\n  if (!grow(min_capacity)) {\n    return nullptr;\n  }\n\n  // Allocate from new block (should always succeed)\n  return current_block_->alloc(size, alignment, &total_used_);\n}\n\nvoid arena::reset() noexcept {\n  if (current_block_) {\n    total_allocated_ -= block::destroy_chain(current_block_, false);\n    current_block_->used = 0;\n    current_block_->next = current_block_;\n    total_used_          = 0;\n  }\n}\n\nsize_t arena::total_allocated() const noexcept {\n  return total_allocated_;\n}\n\nsize_t arena::total_used() const noexcept {\n  return total_used_;\n}\n\nbool arena::grow(size_t min_capacity) noexcept {\n  // New block size is at least default_block_size_ or min_capacity\n  size_t new_capacity = block_capacity_;\n  if (new_capacity < min_capacity) {\n    new_capacity = min_capacity;\n  }\n\n  block* new_block = block::create(new_capacity);\n  if (!new_block) {\n    return false;\n  }\n\n  // Chain to current block\n  if (current_block_) {\n    new_block->next = current_block_->next;\n    current_block_->next = new_block;\n  }\n  current_block_       = new_block;\n  total_allocated_ += block::alloc_size(new_capacity);\n\n  return true;\n}\n\n}  // namespace ngfi\n"
  },
  {
    "path": "source/ngf-common/arena.h",
    "content": "/**\n * Copyright (c) 2026 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include <stddef.h>\n#include <stdint.h>\n\nnamespace ngfi {\n\nclass arena {\nprivate:\n  struct block;\n\n  block* current_block_ = nullptr;\n\n  size_t block_capacity_ = 0;\n  size_t total_allocated_    = 0;\n  size_t total_used_         = 0;\n\npublic:\n  arena() = default;\n  explicit arena(size_t initial_capacity) noexcept;\n  arena(arena&& other) noexcept;\n  ~arena() noexcept;\n  arena(const arena&)            = delete;\n  arena& operator=(const arena&) = delete;\n  arena& operator=(arena&&)      = delete;\n\n  void                 reset() noexcept;\n  void*                alloc(size_t size) noexcept;\n  void*                alloc_aligned(size_t size, size_t alignment) noexcept;\n  template<class T> T* alloc() noexcept {\n    return (T*)(alloc_aligned(sizeof(T), alignof(T)));\n  }\n  template<class T> T* alloc(size_t n) noexcept {\n    // Check for overflow before computing sizeof(T) * n\n    if (n != 0 && SIZE_MAX / sizeof(T) < n) {\n      return nullptr;\n    }\n    return (T*)(alloc_aligned(sizeof(T) * n, alignof(T)));\n  }\n\n  size_t total_allocated() const noexcept;\n  size_t total_used() const noexcept;\n  void set_block_size(size_t size) { block_capacity_ = size; }\n\nprivate:\n  bool grow(size_t min_capacity) noexcept;\n};\n\n}  // namespace ngfi\n"
  },
  {
    "path": "source/ngf-common/array.h",
    "content": "/**\n * Copyright (c) 2026 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include \"macros.h\"\n#include \"util.h\"\n\n#include <stddef.h>\n#include <string.h>\n\nnamespace ngfi {\n\n/**\n * A simple dynamic array for trivially-copyable types.\n * Similar to std::vector but uses NGFI allocation callbacks.\n */\ntemplate<class T, class AllocT = configured_alloc_callbacks, bool FixedSize = false>\nclass array {\nprivate:\n  T*     data_     = nullptr;\n  size_t size_     = 0;\n  size_t capacity_ = 0;\n\n  static constexpr size_t MIN_CAPACITY = 8;\n\npublic:\n  using value_type      = T;\n  using iterator        = T*;\n  using const_iterator  = const T*;\n  using reference       = T&;\n  using const_reference = const T&;\n\n  array() noexcept = default;\n  explicit array(size_t size) : data_ {ngfi::allocn<T>(size)}, size_ {size}, capacity_ {size} {\n  }\n  array(const T* src, size_t count) : array{count} {\n    if (data_) { memcpy (data_, src, sizeof(T)*size_); }\n  }\n  array(array&& other) noexcept {\n    *this = ngfi::move(other);\n  }\n  ~array() noexcept { destroy(); }\n\n  array& operator=(array&& other ) noexcept {\n    destroy();\n    data_ = other.data_;\n    size_ = other.size_;\n    capacity_ = other.capacity_;\n\n    other.data_     = nullptr;\n    other.size_     = 0;\n    other.capacity_ = 0;\n\n    return *this;\n  }\n  \n  size_t size() const noexcept { return size_; }\n  size_t capacity() const noexcept { return capacity_; }\n  bool empty() const noexcept { return size_ == 0; }\n\n  T& operator[](size_t idx) noexcept { return data_[idx]; }\n  const T& operator[](size_t idx) const noexcept { return data_[idx]; }\n\n  T& front() noexcept { return data_[0]; }\n  const T& front() const noexcept { return data_[0]; }\n  T& back() noexcept { return data_[size_ - 1]; }\n  const T& back() const noexcept { return data_[size_ - 1]; }\n\n  T* data() noexcept { return data_; }\n  const T* data() const noexcept { return data_; }\n\n  T* push_back(const T& value) noexcept {\n    static_assert(!FixedSize);\n    if (!ensure_capacity(size_ + 1)) {\n      return nullptr;\n    }\n    if constexpr (__is_trivially_copyable(T)) {\n        memcpy(&data_[size_], &value, sizeof(T));\n    } else {\n        data_[size_] = value;\n    }\n    ++size_;\n    return &data_[size_ - 1];\n  }\n\n  template <class... Args> T* emplace_back(Args... args) {\n    static_assert(!FixedSize);\n    if (!ensure_capacity(size_ + 1)) { return nullptr; }\n    new (&data_[size_]) T {ngfi::forward<Args>(args)...};\n    ++size_;\n    return &data_[size_ - 1];\n  }\n\n  void pop_back() noexcept {\n    static_assert(!FixedSize);\n    if (size_ > 0) {\n      --size_;\n    }\n  }\n\n  void clear() noexcept {\n    static_assert(!FixedSize);\n    size_ = 0;\n  }\n\n  \n  bool resize(size_t new_size) noexcept {\n    static_assert(!FixedSize);\n    if (new_size > capacity_) {\n      if (!reserve(new_size)) {\n        return false;\n      }\n    }\n    size_ = new_size;\n    return true;\n  }\n\n  bool reserve(size_t new_capacity) noexcept {\n    static_assert(!FixedSize);\n    if (new_capacity <= capacity_) {\n      return true;\n    }\n    return grow_to(new_capacity);\n  }\n\n  iterator begin() noexcept { return data_; }\n  const_iterator begin() const noexcept { return data_; }\n  iterator end() noexcept { return data_ + size_; }\n  const_iterator end() const noexcept { return data_ + size_; }\n\nprivate:\n  void destroy() noexcept {\n    if (data_ != nullptr) {\n      AllocT::freen(data_, capacity_);\n      data_     = nullptr;\n      size_     = 0;\n      capacity_ = 0;\n    }\n  }\n\n  array(const array&)            = delete;\n  array& operator=(const array&) = delete;\n \n  bool ensure_capacity(size_t required) noexcept {\n    if (required <= capacity_) {\n      return true;\n    }\n    size_t new_capacity = capacity_ == 0 ? MIN_CAPACITY : capacity_ * 2;\n    while (new_capacity < required) {\n      new_capacity *= 2;\n    }\n    return grow_to(new_capacity);\n  }\n\n  bool grow_to(size_t new_capacity) noexcept {\n    T* new_data = AllocT::template allocn<T>(new_capacity);\n    if (new_data == nullptr) {\n      return false;\n    }\n\n    if (data_ != nullptr) {\n      if (size_ > 0) {\n        if constexpr (__is_trivially_copyable(T)) {\n            memcpy(new_data, data_, size_ * sizeof(T));\n        } else {\n            for (size_t i = 0u; i < size_; ++i) {\n                new (&new_data[i]) T {ngfi::move(data_[i])};\n            }\n        }\n      }\n      AllocT::freen(data_, capacity_);\n    }\n\n    data_     = new_data;\n    capacity_ = new_capacity;\n    return true;\n  }\n};\n\ntemplate <class T, class AllocT = configured_alloc_callbacks>\nusing fixed_array = array<T, AllocT, true>;\n\n}  // namespace ngfi\n"
  },
  {
    "path": "source/ngf-common/chunked-list.h",
    "content": "#pragma once\n\n#include \"arena.h\"\n\nnamespace ngfi {\n\ntemplate <class T, unsigned ChunkCapacity = 10>\nclass chunked_list {\nstatic_assert(__is_trivially_copyable(T));\nprivate:\n  struct chunk {\n    chunk* next;\n    T* free_slot;\n    T slots[ChunkCapacity];\n  };\n  chunk* last_chunk_ = nullptr;\n  \npublic:\n  class iterator {\n    friend class chunked_list;\n\n    chunk* first_chunk_ = nullptr;\n    chunk* curr_chunk_ = nullptr;\n    T* curr_slot_ = nullptr;\n\n    iterator() = default;\n    explicit iterator(chunk* first_chunk) : first_chunk_{first_chunk},\n     curr_chunk_{first_chunk},\n     curr_slot_ { first_chunk->slots } {}\n\n  public:\n    iterator& operator++() {\n      ++curr_slot_;\n      if (curr_chunk_->free_slot == curr_slot_) {\n        if (curr_chunk_->next == first_chunk_) {\n          first_chunk_ = nullptr;\n          curr_chunk_ = nullptr;\n          curr_slot_ = nullptr;\n        } else {\n          curr_chunk_ = curr_chunk_->next;\n          curr_slot_ = curr_chunk_->slots;\n        }\n      }\n      return *this;\n    }\n    const T& operator*() { return *curr_slot_; }\n    bool operator==(const iterator& it) const {\n      return curr_slot_ == it.curr_slot_ &&\n         first_chunk_ == it.first_chunk_ &&\n         curr_chunk_ == it.curr_chunk_;\n    }\n  };\n\n  iterator begin() noexcept { return !last_chunk_ ? end() : iterator{last_chunk_->next}; }\n  iterator begin() const noexcept { return !last_chunk_ ? end() : iterator{last_chunk_->next}; }\n  iterator end() const noexcept { return iterator{}; }\n\n\n  T* append(const T& element, arena& a) noexcept {\n    const bool need_new_chunk = !last_chunk_ || last_chunk_->free_slot == &last_chunk_->slots[ChunkCapacity];\n    if (need_new_chunk) {\n      chunk* new_chunk = a.alloc<chunk>();\n      if (!new_chunk) return nullptr;\n      new_chunk->free_slot = new_chunk->slots;\n      if (!last_chunk_) {\n        last_chunk_ = new_chunk;\n        last_chunk_->next = last_chunk_;\n      } else {\n        new_chunk->next = last_chunk_->next;\n        last_chunk_->next = new_chunk;\n        last_chunk_ = new_chunk;\n      }\n    }\n    T* result = last_chunk_->free_slot++;\n    *result = element;\n    return result;\n  }\n\n  void clear() { last_chunk_ = nullptr; }\n};\n\n}\n"
  },
  {
    "path": "source/ngf-common/cmdbuf-state.h",
    "content": "/**\n * Copyright (c) 2026 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include \"ngf-common/macros.h\"\n#include \"nicegraf.h\"\n\nnamespace ngfi {\n\nenum cmd_buffer_state {\n  CMD_BUFFER_STATE_NEW,\n  CMD_BUFFER_STATE_READY,\n  CMD_BUFFER_STATE_RECORDING,\n  CMD_BUFFER_STATE_READY_TO_SUBMIT,\n  CMD_BUFFER_STATE_PENDING,\n  CMD_BUFFER_STATE_SUBMITTED\n};\n\ntemplate<class CmdBufT> bool transition_cmd_buf(CmdBufT cmd_buf, cmd_buffer_state new_state) {\n  cmd_buffer_state cur_state = cmd_buf->state;\n  bool             has_active_pass =\n      (cmd_buf)->renderpass_active || (cmd_buf)->compute_pass_active || (cmd_buf)->xfer_pass_active;\n  bool is_recordable = cur_state == ::ngfi::CMD_BUFFER_STATE_READY ||\n                       cur_state == ::ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT;\n  switch (new_state) {\n  case ngfi::CMD_BUFFER_STATE_NEW:\n    NGFI_DIAG_ERROR(\"command buffer cannot go back to a `new` state\");\n    return false;\n  case ngfi::CMD_BUFFER_STATE_READY:\n    if (cur_state != ngfi::CMD_BUFFER_STATE_SUBMITTED &&\n        cur_state != ngfi::CMD_BUFFER_STATE_READY && cur_state != ngfi::CMD_BUFFER_STATE_NEW) {\n      NGFI_DIAG_ERROR(\"command buffer not in a startable state.\");\n      return false;\n    }\n    break;\n  case ngfi::CMD_BUFFER_STATE_RECORDING:\n    if (!is_recordable) {\n      NGFI_DIAG_ERROR(\"command buffer not in a recordable state.\");\n      return false;\n    }\n    break;\n  case ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT:\n    if (cur_state != ngfi::CMD_BUFFER_STATE_RECORDING) {\n      NGFI_DIAG_ERROR(\"command buffer is not actively recording.\");\n      return false;\n    }\n    if (has_active_pass) {\n      NGFI_DIAG_ERROR(\"cannot finish render encoder with an unterminated pass.\");\n      return false;\n    }\n    break;\n  case ngfi::CMD_BUFFER_STATE_PENDING:\n    if (cur_state != ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT &&\n        cur_state != ngfi::CMD_BUFFER_STATE_READY) {\n      NGFI_DIAG_ERROR(\"command buffer not ready to be submitted\");\n      return false;\n    }\n    break;\n  case ngfi::CMD_BUFFER_STATE_SUBMITTED:\n    if (cur_state != ngfi::CMD_BUFFER_STATE_PENDING) {\n      NGFI_DIAG_ERROR(\"command buffer not in a submittable state\");\n      return false;\n    }\n    break;\n  }\n  cmd_buf->state = new_state;\n  return true;\n}\n\n}  // namespace ngfi\n\n#define NGFI_TRANSITION_CMD_BUF(b, new_state) \\\n  if (!ngfi::transition_cmd_buf(b, new_state)) { return NGF_ERROR_INVALID_OPERATION; }\n"
  },
  {
    "path": "source/ngf-common/create-destroy.cpp",
    "content": "/**\n * Copyright (c) 2026 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n// NOTE: this file is meant to be included from the backend implementation file.\n\nnamespace ngfi {\ntemplate<class T, class InfoT> ngf_error generic_create(const InfoT& info, T** result) {\n  auto maybe_t = T::make(info);\n  if (!maybe_t.has_error()) result[0] = maybe_t.value().release();\n  return maybe_t.has_error() ? maybe_t.error() : NGF_ERROR_OK;\n}\n}  // namespace ngfi\n\nngf_error ngf_create_context(const ngf_context_info* info, ngf_context* result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  return ngfi::generic_create(*info, result);\n}\n\nvoid ngf_destroy_context(ngf_context ctx) NGF_NOEXCEPT {\n  // TODO: unset current context\n  assert(ctx);\n  NGFI_FREE(ctx);\n}\n\nngf_error\nngf_create_shader_stage(const ngf_shader_stage_info* info, ngf_shader_stage* result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  return ngfi::generic_create(*info, result);\n}\n\nvoid ngf_destroy_shader_stage(ngf_shader_stage stage) NGF_NOEXCEPT {\n  if (stage != nullptr) { NGFI_FREE(stage); }\n}\n\nngf_error ngf_create_render_target(const ngf_render_target_info* info, ngf_render_target* result)\n    NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n\n  return ngfi::generic_create(*info, result);\n}\n\nvoid ngf_destroy_render_target(ngf_render_target rt) NGF_NOEXCEPT {\n  if (rt != nullptr) { \n    if (rt->is_default) {\n      NGFI_DIAG_ERROR(\"default RT can only be destroyed by owning context\\n\");\n      return;\n    }\n    NGFI_FREE(rt);\n   }\n}\n\nngf_error ngf_create_compute_pipeline(\n    const ngf_compute_pipeline_info* info,\n    ngf_compute_pipeline*            result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  return ngfi::generic_create(*info, result);\n}\n\nngf_error ngf_create_graphics_pipeline(\n    const ngf_graphics_pipeline_info* info,\n    ngf_graphics_pipeline*            result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  return ngfi::generic_create(*info, result);\n}\n\nvoid ngf_destroy_graphics_pipeline(ngf_graphics_pipeline pipe) NGF_NOEXCEPT {\n  if (pipe != nullptr) { NGFI_FREE(pipe); }\n}\n\nvoid ngf_destroy_compute_pipeline(ngf_compute_pipeline pipe) NGF_NOEXCEPT {\n  if (pipe != nullptr) { NGFI_FREE(pipe); }\n}\n\n\nngf_error ngf_create_texel_buffer_view(\n    const ngf_texel_buffer_view_info* info,\n    ngf_texel_buffer_view*            result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  return ngfi::generic_create(*info, result);\n}\n\nvoid ngf_destroy_texel_buffer_view(ngf_texel_buffer_view buf_view) NGF_NOEXCEPT {\n  if (buf_view) { NGFI_FREE(buf_view); }\n}\n\nngf_error ngf_create_buffer(const ngf_buffer_info* info, ngf_buffer* result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  return ngfi::generic_create(*info, result);\n}\n\nvoid ngf_destroy_buffer(ngf_buffer buf) NGF_NOEXCEPT {\n  if (buf != nullptr) { NGFI_FREE(buf); }\n}\n\n\nngf_error ngf_create_sampler(const ngf_sampler_info* info, ngf_sampler* result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  return ngfi::generic_create(*info, result);\n}\n\nvoid ngf_destroy_sampler(ngf_sampler sampler) NGF_NOEXCEPT {\n  if (sampler) { NGFI_FREE(sampler); }\n}\n\nngf_error\nngf_create_cmd_buffer(const ngf_cmd_buffer_info* info, ngf_cmd_buffer* result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  return ngfi::generic_create(*info, result);\n}\n\nngf_error\nngf_create_image_view(const ngf_image_view_info* info, ngf_image_view* result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  return ngfi::generic_create(*info, result);\n}\n\nvoid ngf_destroy_image_view(ngf_image_view view) NGF_NOEXCEPT {\n  if (view != nullptr) { NGFI_FREE(view); }\n}\n\nngf_error ngf_create_image(const ngf_image_info* info, ngf_image* result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  return ngfi::generic_create(*info, result);\n}\n\nvoid ngf_destroy_image(ngf_image image) NGF_NOEXCEPT {\n  if (image != nullptr) { NGFI_FREE(image); }\n}\n\nvoid ngf_destroy_cmd_buffer(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT {\n  if (cmd_buffer != nullptr) { NGFI_FREE(cmd_buffer); }\n}\n"
  },
  {
    "path": "source/ngf-common/default-arenas.cpp",
    "content": "/**\n * Copyright (c) 2025 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"default-arenas.h\"\n\nnamespace ngfi {\n\narena& tmp_arena() noexcept {\n  static thread_local arena a = arena{100u * 1024u};  // 100KB\n  return a;\n}\n\narena& frame_arena() noexcept {\n  static thread_local arena a = arena{4u * 1024u};  // 4KB\n  return a;\n}\n\n}  // namespace ngfi\n"
  },
  {
    "path": "source/ngf-common/default-arenas.h",
    "content": "/**\n * Copyright (c) 2025 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include \"arena.h\"\n\nnamespace ngfi {\n\n/**\n * Get the thread-local temporary arena.\n * This arena is reset frequently within operations.\n */\narena& tmp_arena() noexcept;\n\n/**\n * Get the thread-local frame arena.\n * This arena is reset only at frame boundaries.\n */\narena& frame_arena() noexcept;\n\n/**\n * Allocate a single element from the temporary arena.\n */\ntemplate<class T>\ninline T* tmp_alloc() noexcept {\n  return tmp_arena().alloc<T>();\n}\n\n/**\n * Allocate an array of n elements from the temporary arena.\n */\ntemplate<class T>\ninline T* tmp_alloc(size_t n) noexcept {\n  return tmp_arena().alloc<T>(n);\n}\n\n/**\n * Allocate a single element from the frame arena.\n */\ntemplate<class T>\ninline T* frame_alloc() noexcept {\n  return frame_arena().alloc<T>();\n}\n\n/**\n * Allocate an array of n elements from the frame arena.\n */\ntemplate<class T>\ninline T* frame_alloc(size_t n) noexcept {\n  return frame_arena().alloc<T>(n);\n}\n\n}  // namespace ngfi\n"
  },
  {
    "path": "source/ngf-common/frame-token.h",
    "content": "/**\n * Copyright (c) 2021 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include <stdint.h>\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nstatic inline uintptr_t\nngfi_encode_frame_token(uint16_t ctx_id, uint8_t max_inflight_frames, uint8_t frame_id) {\n  const uint32_t ctx_id_ext = ctx_id, max_inflight_frames_ext = max_inflight_frames,\n                 frame_id_ext = frame_id;\n  return (ctx_id_ext << 0x10) | (max_inflight_frames_ext << 0x08) | frame_id_ext;\n}\n\nstatic inline uint16_t ngfi_frame_ctx_id(uintptr_t frame_token) {\n  return ((uint16_t)(frame_token >> 0x10)) & 0xffff;\n}\n\nstatic inline uint8_t ngfi_frame_max_inflight_frames(uintptr_t frame_token) {\n  return ((uint8_t)(frame_token >> 0x08)) & 0xff;\n}\n\nstatic inline uint8_t ngfi_frame_id(uintptr_t frame_token) {\n  return (uint8_t)(frame_token & 0xff);\n}\n\n#ifdef __cplusplus\n}\n#endif\n"
  },
  {
    "path": "source/ngf-common/hashtable.h",
    "content": "/**\n * Copyright (c) 2026 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include \"macros.h\"\n#include \"util.h\"\n\n#include <stddef.h>\n#include <stdint.h>\n\nnamespace ngfi {\n\nnamespace detail {\n\n/**\n * murmur3 hash function implementation.\n * This is a simplified version for keys 8 bytes in length.\n */\n\ninline uint64_t rotl64(uint64_t x, int8_t r) {\n  return (x << r) | (x >> (64 - r));\n}\n\ninline uint64_t fmix64(uint64_t k) {\n  k ^= k >> 33;\n  k *= 0xff51afd7ed558ccdLLU;\n  k ^= k >> 33;\n  k *= 0xc4ceb9fe1a85ec53LLU;\n  k ^= k >> 33;\n  return k;\n}\n\ninline void mmh3_x64_128(uintptr_t key, uint32_t seed, uint64_t* out) {\n  const auto* data = reinterpret_cast<const uint8_t*>(&key);\n\n  uint64_t h1 = seed;\n  uint64_t h2 = seed;\n  uint64_t c1 = 0x87c37b91114253d5LLU;\n  uint64_t c2 = 0x4cf5ad432745937fLLU;\n  uint64_t k1 = 0;\n\n  k1 ^= static_cast<uint64_t>(data[7]) << 56;\n  k1 ^= static_cast<uint64_t>(data[6]) << 48;\n  k1 ^= static_cast<uint64_t>(data[5]) << 40;\n  k1 ^= static_cast<uint64_t>(data[4]) << 32;\n  k1 ^= static_cast<uint64_t>(data[3]) << 24;\n  k1 ^= static_cast<uint64_t>(data[2]) << 16;\n  k1 ^= static_cast<uint64_t>(data[1]) << 8;\n  k1 ^= static_cast<uint64_t>(data[0]) << 0;\n  k1 *= c1;\n  k1 = rotl64(k1, 31);\n  k1 *= c2;\n  h1 ^= k1;\n\n  h1 ^= sizeof(key);\n  h2 ^= sizeof(key);\n\n  h1 += h2;\n  h2 += h1;\n\n  h1 = fmix64(h1);\n  h2 = fmix64(h2);\n\n  h1 += h2;\n  h2 += h1;\n\n  out[0] = h1;\n  out[1] = h2;\n}\n\n}  // namespace detail\n\n/**\n * A hash table with open addressing (linear probing).\n * Keys are 64-bit unsigned integers, values can be any trivially copyable type.\n * Does not support individual element deletion - only full clear.\n */\ntemplate<class V, class AllocT = configured_alloc_callbacks> class hashtable {\n  static_assert(\n      __is_trivially_copyable(V),\n      \"hashtable only supports trivially copyable value types\");\n\n  public:\n  using key_type = uint64_t;\n\n  static constexpr key_type EMPTY_KEY = ~key_type {0};\n\n  struct keyhash {\n    key_type key;\n    uint64_t hash;\n  };\n\n  struct entry {\n    key_type key;\n    V        value;\n  };\n\n  private:\n  static constexpr uint32_t HASH_SEED       = 0x9e3779b9u;\n  static constexpr float    MAX_LOAD_FACTOR = 0.7f;\n\n  entry* slots_            = nullptr;\n  size_t capacity_         = 0;\n  size_t initial_capacity_ = 100u;\n  size_t size_             = 0;\n\n  public:\n  hashtable() noexcept = default;\n  explicit hashtable(size_t capacity) : initial_capacity_ {capacity} {\n  }\n  hashtable(hashtable&& other) noexcept {\n    *this = ngfi::move(other);\n  }\n  ~hashtable() noexcept {\n    destroy();\n  }\n\n  hashtable& operator=(hashtable&& other) noexcept {\n    destroy();\n    slots_          = other.slots_;\n    capacity_       = other.capacity_;\n    size_           = other.size_;\n    other.slots_    = nullptr;\n    other.capacity_ = 0;\n    other.size_     = 0;\n    return *this;\n  }\n\n  size_t size() const noexcept {\n    return size_;\n  }\n  size_t capacity() const noexcept {\n    return capacity_;\n  }\n  bool empty() const noexcept {\n    return size_ == 0;\n  }\n\n  static keyhash compute_hash(key_type key) noexcept {\n    uint64_t mmh3_out[2] = {0, 0};\n    detail::mmh3_x64_128(static_cast<uintptr_t>(key), HASH_SEED, mmh3_out);\n    return keyhash {key, mmh3_out[0] ^ mmh3_out[1]};\n  }\n\n  V* get(key_type key) noexcept {\n    return get_prehashed(compute_hash(key));\n  }\n  const V* get(key_type key) const noexcept {\n    return get_prehashed(compute_hash(key));\n  }\n\n  V* get_prehashed(const keyhash& kh) noexcept {\n    if (!slots_) { return nullptr; }\n    const size_t start_idx = kh.hash % capacity_;\n    for (size_t offset = 0; offset < capacity_; ++offset) {\n      const size_t idx = (start_idx + offset) % capacity_;\n      if (slots_[idx].key == kh.key) { return &slots_[idx].value; }\n      if (slots_[idx].key == EMPTY_KEY) {\n        return nullptr;  // Key not found\n      }\n    }\n    return nullptr;  // Table is full and key not found\n  }\n\n  const V* get_prehashed(const keyhash& kh) const noexcept {\n    return const_cast<hashtable*>(this)->get_prehashed(kh);\n  }\n\n  V* insert(key_type key, const V& value) noexcept {\n    return insert_prehashed(compute_hash(key), value);\n  }\n\n  V* insert_prehashed(const keyhash& kh, const V& value) noexcept {\n    // Check if we need to rehash\n    if (capacity_ == 0 ||\n        static_cast<float>(size_ + 1) / static_cast<float>(capacity_) > MAX_LOAD_FACTOR) {\n      if (!rehash(capacity_ ? capacity_ * 2 : initial_capacity_)) { return nullptr; }\n    }\n\n    return insert_internal(kh, value);\n  }\n\n  V* get_or_insert(key_type key, const V& default_value, bool& is_new) noexcept {\n    return get_or_insert_prehashed(compute_hash(key), default_value, is_new);\n  }\n\n  V* get_or_insert_prehashed(const keyhash& kh, const V& default_value, bool& is_new) noexcept {\n    if (!slots_) {\n      is_new = true;\n      return insert_prehashed(kh, default_value);\n    } else {\n      // First try to find existing entry\n      const size_t start_idx = kh.hash % capacity_;\n      for (size_t offset = 0; offset < capacity_; ++offset) {\n        const size_t idx = (start_idx + offset) % capacity_;\n        if (slots_[idx].key == kh.key) {\n          is_new = false;\n          return &slots_[idx].value;\n        }\n        if (slots_[idx].key == EMPTY_KEY) {\n          // Key not found, insert new entry\n          // Check if we need to rehash first\n          is_new = true;\n          return insert_prehashed(kh, default_value);\n        }\n      }\n      return nullptr;  // Table is full (should not happen with proper load factor)\n    }\n  }\n\n  void clear() noexcept {\n    if (slots_) {\n      for (size_t i = 0; i < capacity_; ++i) { slots_[i].key = EMPTY_KEY; }\n      size_ = 0;\n    }\n  }\n\n  class iterator {\n    public:\n    using value_type      = entry;\n    using pointer         = entry*;\n    using reference       = entry&;\n    using difference_type = std::ptrdiff_t;\n\n    iterator() noexcept = default;\n\n    reference operator*() const noexcept {\n      return *slot_;\n    }\n\n    pointer operator->() const noexcept {\n      return slot_;\n    }\n\n    iterator& operator++() noexcept {\n      ++slot_;\n      advance_to_valid();\n      return *this;\n    }\n\n    iterator operator++(int) noexcept {\n      iterator tmp = *this;\n      ++(*this);\n      return tmp;\n    }\n\n    bool operator==(const iterator& other) const noexcept {\n      return slot_ == other.slot_;\n    }\n\n    bool operator!=(const iterator& other) const noexcept {\n      return slot_ != other.slot_;\n    }\n\n    private:\n    friend class hashtable;\n\n    iterator(entry* slot, entry* end) noexcept : slot_(slot), end_(end) {\n      advance_to_valid();\n    }\n\n    void advance_to_valid() noexcept {\n      while (slot_ != end_ && slot_->key == EMPTY_KEY) { ++slot_; }\n    }\n\n    entry* slot_ = nullptr;\n    entry* end_  = nullptr;\n  };\n\n  iterator begin() noexcept {\n    return !slots_ ? end() : iterator(slots_, slots_ + capacity_);\n  }\n\n  iterator end() noexcept {\n    return !slots_ ? iterator(nullptr, nullptr) : iterator {slots_ + capacity_, slots_ + capacity_};\n  }\n\n  private:\n  void destroy() noexcept {\n    if (slots_ != nullptr) {\n      ngfi::freen(slots_, capacity_);\n      slots_    = nullptr;\n      capacity_ = 0;\n      size_     = 0;\n    }\n  }\n\n  // Prevent copying\n  hashtable(const hashtable&)            = delete;\n  hashtable& operator=(const hashtable&) = delete;\n\n  /**\n   * Internal insert without load factor check.\n   */\n  V* insert_internal(const keyhash& kh, const V& value) noexcept {\n    const size_t start_idx = kh.hash % capacity_;\n    for (size_t offset = 0; offset < capacity_; ++offset) {\n      const size_t idx = (start_idx + offset) % capacity_;\n      if (slots_[idx].key == kh.key) {\n        // Update existing\n        memcpy(&slots_[idx].value, &value, sizeof(V));\n        return &slots_[idx].value;\n      }\n      if (slots_[idx].key == EMPTY_KEY) {\n        // Insert new\n        slots_[idx].key = kh.key;\n        memcpy(&slots_[idx].value, &value, sizeof(V));\n        ++size_;\n        return &slots_[idx].value;\n      }\n    }\n    return nullptr;  // Should not happen if load factor is maintained\n  }\n\n  /**\n   * Rehash the table to a new capacity.\n   */\n  bool rehash(size_t new_capacity) noexcept {\n    entry* old_slots    = slots_;\n    size_t old_capacity = capacity_;\n\n    slots_ = AllocT::template allocn<entry>(new_capacity);\n    if (slots_ == nullptr) {\n      slots_ = old_slots;\n      return false;\n    }\n\n    capacity_ = new_capacity;\n    size_     = 0;\n\n    // Initialize new slots as empty\n    for (size_t i = 0; i < capacity_; ++i) { slots_[i].key = EMPTY_KEY; }\n\n    // Reinsert all existing entries\n    for (size_t i = 0; i < old_capacity; ++i) {\n      if (old_slots[i].key != EMPTY_KEY) {\n        insert_internal(compute_hash(old_slots[i].key), old_slots[i].value);\n      }\n    }\n\n    AllocT::freen(old_slots, old_capacity);\n    return true;\n  }\n};\n\n}  // namespace ngfi\n"
  },
  {
    "path": "source/ngf-common/internal.cpp",
    "content": "/**\n * Copyright (c) 2026 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n */\n\n#include \"nicegraf.h\"\n#include <stdlib.h>\n\nngf_diagnostic_info ngfi_diag_info = {\n    .verbosity = NGF_DIAGNOSTICS_VERBOSITY_DEFAULT,\n    .userdata  = NULL,\n    .callback  = NULL};\n\n// Default allocation callbacks.\nvoid* ngf_default_alloc(size_t obj_size, size_t nobjs, void*) {\n  return malloc(obj_size * nobjs);\n}\n\nvoid ngf_default_free(void* ptr, size_t, size_t, void*) {\n  free(ptr);\n}\n\nconst ngf_allocation_callbacks NGF_DEFAULT_ALLOC_CB = {ngf_default_alloc, ngf_default_free, NULL};\n\nconst ngf_allocation_callbacks* NGF_ALLOC_CB = &NGF_DEFAULT_ALLOC_CB;\n\nvoid ngfi_set_allocation_callbacks(const ngf_allocation_callbacks* callbacks) {\n  if (callbacks == NULL) {\n    NGF_ALLOC_CB = &NGF_DEFAULT_ALLOC_CB;\n  } else {\n    NGF_ALLOC_CB = callbacks;\n  }\n}\n\nngf_sample_count ngfi_get_highest_sample_count(size_t counts_bitmap) {\n  size_t res = (size_t)NGF_SAMPLE_COUNT_64;\n  while ((res & counts_bitmap) == 0 && res > 1) { res >>= 1; }\n  return (ngf_sample_count)res;\n}\n"
  },
  {
    "path": "source/ngf-common/macros.h",
    "content": "/**\n * Copyright (c) 2026 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\n#include \"nicegraf.h\"\n\n#include <assert.h>\n#include <stdlib.h>\n#if defined(_WIN32) || defined(_WIN64)\n#define NGFI_THREADLOCAL __declspec(thread)\n#define WIN32_LEAN_AND_MEAN\n#include <windows.h>\n// emulate pthread mutexes\ntypedef CRITICAL_SECTION pthread_mutex_t;\n#define pthread_mutex_lock(m)    (EnterCriticalSection(m), 0)\n#define pthread_mutex_unlock(m)  (LeaveCriticalSection(m), 0)\n#define pthread_mutex_init(m, a) (InitializeCriticalSection(m), 0)\n#define pthread_mutex_destroy(m) (DeleteCriticalSection(m), 0)\n// dynamic module loading\ntypedef HMODULE ngfi_module_handle;\n#else\n#define NGFI_THREADLOCAL __thread\n#include <pthread.h>\n// dynamic module loading (emulate win32 api)\n#define LoadLibraryA(name) dlopen(name, RTLD_NOW)\n#define GetProcAddress(h, n) dlsym(h, n)\ntypedef void* ngfi_module_handle;\n#endif\n\n// Custom allocation callbacks.\nextern const ngf_allocation_callbacks* NGF_ALLOC_CB;\n\n// Convenience macros for invoking custom memory allocation callbacks.\n// C++ versions are defined after the template functions below.\n#ifndef __cplusplus\n#define NGFI_ALLOC(type)     ((type*)NGF_ALLOC_CB->allocate(sizeof(type), 1, NGF_ALLOC_CB->userdata))\n#define NGFI_ALLOCN(type, n) ((type*)NGF_ALLOC_CB->allocate(sizeof(type), n, NGF_ALLOC_CB->userdata))\n#define NGFI_FREE(ptr)       (NGF_ALLOC_CB->free((void*)(ptr), sizeof(*ptr), 1, NGF_ALLOC_CB->userdata))\n#define NGFI_FREEN(ptr, n)   (NGF_ALLOC_CB->free((void*)(ptr), sizeof(*ptr), n, NGF_ALLOC_CB->userdata))\n#endif\n\n// Macro for determining size of arrays.\n#if defined(_MSC_VER)\n#include <stdlib.h>\n#define NGFI_ARRAYSIZE(arr) _countof(arr)\n#else\n#define NGFI_ARRAYSIZE(arr) (sizeof(arr) / sizeof(arr[0]))\n#endif\n\n// For when you don't feel like comparing structs field-by-field.\n#define NGFI_STRUCT_EQ(s1, s2) \\\n  (sizeof(s1) == sizeof(s2) && memcmp((void*)&s1, (void*)&s2, sizeof(s1)) == 0)\n\n// It is $CURRENT_YEAR and C does not have a standard thing for this.\n#define NGFI_MAX(a, b) (a > b ? a : b)\n#define NGFI_MIN(a, b) (a < b ? a : b)\n\n// For fixing unreferenced parameter warnings.\n#define NGFI_IGNORE_VAR(name) \\\n  { (void)name; }\n\n// MSVC warnings that are safe to ignore.\n#pragma warning(disable : 4201)\n#pragma warning(disable : 4200)\n#pragma warning(disable : 4204)\n#pragma warning(disable : 4221)\n\nextern ngf_diagnostic_info ngfi_diag_info;\n\n// Invoke diagnostic message callback directly.\n#define NGFI_DIAG_MSG(level, fmt, ...)                                           \\\n  if (ngfi_diag_info.callback) {                                                 \\\n    ngfi_diag_info.callback(level, ngfi_diag_info.userdata, fmt, ##__VA_ARGS__); \\\n  }\n#define NGFI_DIAG_INFO(fmt, ...)    NGFI_DIAG_MSG(NGF_DIAGNOSTIC_INFO, fmt, ##__VA_ARGS__)\n#define NGFI_DIAG_WARNING(fmt, ...) NGFI_DIAG_MSG(NGF_DIAGNOSTIC_WARNING, fmt, ##__VA_ARGS__)\n#define NGFI_DIAG_ERROR(fmt, ...)   NGFI_DIAG_MSG(NGF_DIAGNOSTIC_ERROR, fmt, ##__VA_ARGS__)\n\n// Convenience macro to invoke diagnostic callback and raise error on unmet precondition.\n#define NGFI_CHECK_CONDITION(cond, err_code, err_fmtstring, ...) \\\n  if (!(cond)) {                                                 \\\n    NGFI_DIAG_ERROR(err_fmtstring, ##__VA_ARGS__);               \\\n    return err_code;                                             \\\n  }\n\n// Convenience macro to immediately die on an unmet precondition.\n#define NGFI_CHECK_FATAL(cond, err_fmtstring, ...) \\\nif (!(cond)) { \\\n  NGFI_DIAG_ERROR(err_fmtstring, ##__VA_ARGS__); \\\n  exit(1); \\\n}\n\ntypedef long double ngfi_max_align_t;\n\n#define NGFI_MAX_ALIGNMENT (sizeof(ngfi_max_align_t))\n\nstatic inline size_t ngfi_align_size(size_t s) {\n  static const size_t align_mask = NGFI_MAX_ALIGNMENT - 1u;\n  const size_t q = s & (~align_mask);\n  const size_t r = s & align_mask;\n\n  return q + ((r == 0) ? 0 : NGFI_MAX_ALIGNMENT);\n}\n\ntypedef struct ngfi_range {\n  size_t first_idx;\n  size_t last_idx;\n} ngfi_range;\n\nvoid ngfi_set_allocation_callbacks(const ngf_allocation_callbacks* callbacks);\n\n#ifdef __cplusplus\n#include <new>\n#include \"ngf-common/util.h\"\n\nnamespace ngfi {\n\ntemplate<class T, class... Args>\nT* alloc(Args&&... arg) noexcept {\n  T* ptr = static_cast<T*>(NGF_ALLOC_CB->allocate(sizeof(T), 1, NGF_ALLOC_CB->userdata));\n  if (ptr != nullptr) {\n    new (ptr) T(ngfi::forward<Args>(arg)...);\n  }\n  return ptr;\n}\n\ntemplate<class T>\nT* allocn(size_t n) noexcept {\n  if (n == 0) return nullptr;\n  T* ptr = static_cast<T*>(NGF_ALLOC_CB->allocate(sizeof(T), n, NGF_ALLOC_CB->userdata));\n  if (ptr != nullptr) {\n    for (size_t i = 0; i < n; ++i) {\n      new (&ptr[i]) T();\n    }\n  }\n  return ptr;\n}\n\ntemplate<class T>\nvoid free(T* ptr) noexcept {\n  if (ptr != nullptr) {\n    if constexpr (! __is_trivially_copyable(T)) {\n      ptr->~T();\n    }\n    NGF_ALLOC_CB->free(ptr, sizeof(T), 1, NGF_ALLOC_CB->userdata);\n  }\n}\n\ntemplate<class T>\nvoid freen(T* ptr, size_t n) noexcept {\n  if (ptr != nullptr) {\n    if constexpr (!__is_trivially_copyable(T)) {\n      for (size_t i = 0; i < n; ++i) {\n        ptr[i].~T();\n      }\n    }\n    NGF_ALLOC_CB->free((void*)ptr, sizeof(T), n, NGF_ALLOC_CB->userdata);\n  }\n}\n\nstruct configured_alloc_callbacks  {\n    template <class T> static T* alloc() noexcept { return ::ngfi::alloc<T>(); }\n    template <class T> static T* allocn(size_t n) noexcept { return ::ngfi::allocn<T>(n); }\n    template <class T> static void free(T* ptr) noexcept { ::ngfi::free<T>(ptr); }\n    template <class T> static void freen(T* ptr, size_t n) noexcept { ::ngfi::freen<T>(ptr, n); }\n};\n\nstruct system_alloc_callbacks  {\n    template <class T> static T* alloc() noexcept { return new T{}; }\n    template <class T> static T* allocn(size_t n) noexcept { return new T[n]; }\n    template <class T> static void free(T* ptr) noexcept { delete ptr; }\n    template <class T> static void freen(T* ptr, size_t) noexcept { delete[] ptr; }\n};\n\n\n}  // namespace ngfi\n\n// C++ versions of allocation macros that use the template functions.\n#define NGFI_ALLOC(type)     (ngfi::alloc<type>())\n#define NGFI_ALLOCN(type, n) (ngfi::allocn<type>(n))\n#define NGFI_FREE(ptr)       (ngfi::free(ptr))\n#define NGFI_FREEN(ptr, n)   (ngfi::freen(ptr, n))\n\n\n#endif\n\n"
  },
  {
    "path": "source/ngf-common/silence.h",
    "content": "#pragma once\n\n#ifndef _CRT_SECURE_NO_WARNINGS\n#define _CRT_SECURE_NO_WARNINGS\n#endif\n#ifdef __clang__\n#pragma clang diagnostic ignored \"-Wnullability-completeness\"\n#if __has_warning(\"-Wcast-function-type-mismatch\")\n#pragma clang diagnostic ignored \"-Wcast-function-type-mismatch\"\n#endif\n#endif\n"
  },
  {
    "path": "source/ngf-common/unique-ptr.h",
    "content": "#pragma once\n\n#include \"macros.h\"\n\nnamespace ngfi {\n\ntemplate<class T> class unique_ptr {\n  private:\n  T* obj_ = nullptr;\n\n  public:\n  unique_ptr() noexcept = default;\n  unique_ptr(T* obj) : obj_ {obj} {\n  }\n  ~unique_ptr() noexcept {\n    destroy();\n  }\n  unique_ptr(unique_ptr&& other) {\n    *this = ngfi::move(other);\n  }\n  unique_ptr(const unique_ptr&) = delete;\n\n  unique_ptr& operator=(unique_ptr&& other) {\n    destroy();\n    obj_       = other.obj_;\n    other.obj_ = nullptr;\n    return *this;\n  }\n  unique_ptr& operator=(const unique_ptr&) = delete;\n\n  T* release() noexcept {\n    auto r = obj_;\n    obj_   = nullptr;\n    return r;\n  }\n  T* get() noexcept {\n    return obj_;\n  }\n  const T* get() const noexcept {\n    return obj_;\n  }\n  T* operator->() noexcept {\n    return get();\n  }\n  operator bool() const noexcept {\n    return obj_ != nullptr;\n  }\n\n  template<class... Args> static unique_ptr make(Args&&... args) {\n    return unique_ptr {ngfi::alloc<T>(ngfi::forward<Args>(args)...)};\n  }\n\n  private:\n  void destroy() noexcept {\n    if (obj_) ngfi::free<T>(obj_);\n  }\n};\n\n}  // namespace ngfi\n"
  },
  {
    "path": "source/ngf-common/util.c",
    "content": "/**\n * Copyright (c) 2021 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"ngf-common/macros.h\"\n#include \"nicegraf-util.h\"\n\n#include <assert.h>\n#include <string.h>\n\n#if defined(_WIN32) || defined(_WIN64)\n#pragma comment(lib, \"ws2_32.lib\")\n#include <winsock2.h>\n#else\n#include <arpa/inet.h>\n#endif\n\nvoid ngf_util_create_default_graphics_pipeline_data(ngf_util_graphics_pipeline_data* result) {\n  ngf_stencil_info default_stencil = {\n      .fail_op       = NGF_STENCIL_OP_KEEP,\n      .pass_op       = NGF_STENCIL_OP_KEEP,\n      .depth_fail_op = NGF_STENCIL_OP_KEEP,\n      .compare_op    = NGF_COMPARE_OP_EQUAL,\n      .compare_mask  = 0,\n      .write_mask    = 0,\n      .reference     = 0};\n  ngf_depth_stencil_info dsi = {\n      .stencil_test  = false,\n      .depth_test    = false,\n      .depth_write   = false,\n      .depth_compare = NGF_COMPARE_OP_LESS,\n      .front_stencil = default_stencil,\n      .back_stencil  = default_stencil};\n  result->depth_stencil_info = dsi;\n\n  ngf_vertex_input_info vii = {.nattribs = 0, .nvert_buf_bindings = 0};\n  result->vertex_input_info = vii;\n\n  ngf_multisample_info msi = {.sample_count = NGF_SAMPLE_COUNT_1, .alpha_to_coverage = false};\n  result->multisample_info = msi;\n\n  ngf_rasterization_info ri = {\n      .cull_mode    = NGF_CULL_MODE_BACK,\n      .discard      = false,\n      .front_face   = NGF_FRONT_FACE_COUNTER_CLOCKWISE,\n      .polygon_mode = NGF_POLYGON_MODE_FILL};\n  result->rasterization_info = ri;\n\n  ngf_specialization_info spi = {\n      .specializations  = NULL,\n      .nspecializations = 0u,\n      .value_buffer     = NULL};\n  result->spec_info = spi;\n\n  ngf_input_assembly_info iai = {\n      .enable_primitive_restart = false,\n      .primitive_topology        = NGF_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST};\n  result->input_assembly_info    = iai;\n\n  ngf_graphics_pipeline_info gpi = {\n      .color_attachment_blend_states = NULL,\n      .depth_stencil           = &result->depth_stencil_info,\n      .input_info              = &result->vertex_input_info,\n      .multisample             = &result->multisample_info,\n      .input_assembly_info     = &result->input_assembly_info,\n      .shader_stages           = {NULL},\n      .nshader_stages          = 0u,\n      .rasterization           = &result->rasterization_info,\n      .spec_info               = &result->spec_info,\n      .debug_name              = NULL};\n  result->pipeline_info = gpi;\n}\n\nconst char* ngf_util_get_error_name(const ngf_error err) {\n  static const char* ngf_error_names[] = {\n      \"NGF_ERROR_OK\",\n      \"NGF_ERROR_OUT_OF_MEM\",\n      \"NGF_ERROR_OBJECT_CREATION_FAILED\",\n      \"NGF_ERROR_OUT_OF_BOUNDS\",\n      \"NGF_ERROR_INVALID_FORMAT\",\n      \"NGF_ERROR_INVALID_SIZE\",\n      \"NGF_ERROR_INVALID_ENUM\",\n      \"NGF_ERROR_INVALID_OPERATION\"};\n  if ((size_t)err > NGFI_ARRAYSIZE(ngf_error_names)) { return \"invalid error code\"; }\n  return ngf_error_names[err];\n}\n"
  },
  {
    "path": "source/ngf-common/util.h",
    "content": "/**\n * Copyright (c) 2025 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#pragma once\n\nnamespace ngfi {\n\n/**\n * Remove reference from a type.\n */\ntemplate<class T>\nstruct remove_reference {\n  using type = T;\n};\n\ntemplate<class T>\nstruct remove_reference<T&> {\n  using type = T;\n};\n\ntemplate<class T>\nstruct remove_reference<T&&> {\n  using type = T;\n};\n\ntemplate<class T>\nusing remove_reference_t = typename remove_reference<T>::type;\n\n/**\n * Cast to rvalue reference to enable move semantics.\n * Equivalent to std::move.\n */\ntemplate<class T>\nconstexpr remove_reference_t<T>&& move(T&& t) noexcept {\n  return static_cast<remove_reference_t<T>&&>(t);\n}\n\ntemplate<class T> constexpr T&& forward(typename remove_reference<T>::type& t) noexcept {\n  return static_cast<T&&>(t);\n}\n\ntemplate<class T> constexpr T&& forward(typename remove_reference<T>::type&& t) noexcept {\n  return static_cast<T&&>(t);\n}\n\n}  // namespace ngfi\n"
  },
  {
    "path": "source/ngf-common/value-or-error.h",
    "content": "#pragma once\n\n#include \"ngf-common/util.h\"\n\n#include <nicegraf.h>\n\nnamespace ngfi {\n\ntemplate<class ErrorT> ErrorT missing_value_error() noexcept;\n\ntemplate<class ErrorT> ErrorT non_error() noexcept;\n\ntemplate<class ValueT, class ErrorT> class value_or_error {\n  private:\n  alignas(ValueT) char value_[sizeof(ValueT)];\n  ErrorT error_;\n\n  public:\n  value_or_error(const ValueT& v) noexcept : error_{non_error<ErrorT>()} { new  (value_) ValueT { v }; }\n  value_or_error(ValueT&& v) noexcept : error_{non_error<ErrorT>()} {\n    new (value_) ValueT {ngfi::move(v)};\n  }\n  value_or_error(ErrorT err) noexcept : error_ {err} {\n    if (error_ == non_error<ErrorT>()) abort();\n  }\n  value_or_error(value_or_error&& other) {\n    *this = ngfi::move(other);\n  }\n  value_or_error(const value_or_error&) = delete;\n  ~value_or_error() noexcept {\n    maybe_destroy_value();\n  }\n\n  bool has_error() const noexcept {\n    return error_ != non_error<ErrorT>();\n  }\n  ErrorT error() const noexcept {\n    return error_;\n  }\n\n  const ValueT& value() const noexcept {\n    if (has_error()) { abort(); }\n    return *((const ValueT*)value_);\n  }\n  ValueT& value() noexcept {\n    if (has_error()) abort();\n    return *((ValueT*)value_);\n  }\n\n  value_or_error& operator=(value_or_error&& other) {\n    maybe_destroy_value();\n    if (other.has_error()) {\n      error_ = other.error();\n    } else {\n      new (value_) ValueT {ngfi::move(other.value())};\n      error_ = non_error<ErrorT>();\n      other.error_ = missing_value_error<ErrorT>();\n    }\n    return *this;\n  }\n\n  value_or_error& operator=(const value_or_error&) = delete;\n\n  bool has_value() const noexcept {  return !has_error(); }\n  operator bool() const noexcept { return has_value(); }\n\n  private:\n  void maybe_destroy_value() noexcept {\n    if (!has_error()) {\n      ((ValueT*)value_)->~ValueT();\n      error_ = missing_value_error<ErrorT>();\n    }\n  }\n};\n\ntemplate<class ValueT> using value_or_ngferr = value_or_error<ValueT, ngf_error>;\ntemplate<class ValueT> using maybe_ngfptr = value_or_error<ngfi::unique_ptr<ValueT>, ngf_error>;\n\ntemplate<> ngf_error missing_value_error<ngf_error>() noexcept {\n  return NGF_ERROR_INVALID_OPERATION;\n}\n\ntemplate<> ngf_error non_error<ngf_error>() noexcept {\n  return NGF_ERROR_OK;\n}\n\n}  // namespace ngfi\n"
  },
  {
    "path": "source/ngf-mtl/impl.cpp",
    "content": "/**\n * Copyright (c) 2026 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n#include \"ngf-common/array.h\"\n#include \"ngf-common/cmdbuf-state.h\"\n#include \"ngf-common/default-arenas.h\"\n#include \"ngf-common/macros.h\"\n#include \"ngf-common/unique-ptr.h\"\n#include \"ngf-common/value-or-error.h\"\n#include \"nicegraf-mtl-handles.h\"\n#include \"nicegraf-wrappers.h\"\n#include \"nicegraf.h\"\n\n#define NS_PRIVATE_IMPLEMENTATION\n#define MTL_PRIVATE_IMPLEMENTATION\n#define CA_PRIVATE_IMPLEMENTATION\n#include <MetalSingleHeader.hpp>\n\n// Indicates the maximum amount of buffers (attrib, index and uniform) that\n// can be bound at the same time.\n// This is required to work around a discrepancy between nicegraf's and Metal's\n// buffer binding models.\n// In Metal, bindings for vertex attribute buffers share the same space of IDs\n// with regular buffers. Therefore assigning binding 0 to a vertex\n// attrib buffer would cause a conflict if a vertex shader also requires a\n// uniform buffer bound at 0.\n// In order to solve this, attribute buffer bindings are remapped in the\n// following way:\n// nicegraf's attrib binding 0 becomes Metal vertex buffer binding 30\n// attrib binding 1 becomes Metal vertex buffer binding 29\n// ...and so on.\n// NOTE: the specific value 30 is based on the max total number of buffer bindings\n// specified in metal feature set tables.\n// TODO: consider using information from pipeline metadata to use an alternative\n//       remapping scheme: attrib binding 0 -> N; attrib binding 1 -> N+1;...\n//       etc. where N is the total number of uniform buffers consumed by the\n//       vertex stage.\nstatic constexpr uint32_t MAX_BUFFER_BINDINGS = 30u;\n\n// Metal device handle. We choose one upon initialization and always use that\n// one.\nMTL::Device* MTL_DEVICE = nullptr;\n\nngf_device_capabilities DEVICE_CAPS;\n\n#pragma mark ngf_enum_maps\n\nstatic MTL::BlendFactor get_mtl_blend_factor(ngf_blend_factor f) {\n  static constexpr MTL::BlendFactor factors[NGF_BLEND_FACTOR_COUNT] = {\n      MTL::BlendFactorZero,\n      MTL::BlendFactorOne,\n      MTL::BlendFactorSourceColor,\n      MTL::BlendFactorOneMinusSourceColor,\n      MTL::BlendFactorDestinationColor,\n      MTL::BlendFactorOneMinusDestinationColor,\n      MTL::BlendFactorSourceAlpha,\n      MTL::BlendFactorOneMinusSourceAlpha,\n      MTL::BlendFactorDestinationAlpha,\n      MTL::BlendFactorOneMinusDestinationAlpha,\n      MTL::BlendFactorBlendColor,\n      MTL::BlendFactorOneMinusBlendColor,\n      MTL::BlendFactorBlendAlpha,\n      MTL::BlendFactorOneMinusBlendAlpha};\n  return factors[f];\n}\n\nstatic MTL::BlendOperation get_mtl_blend_operation(ngf_blend_op op) {\n  static constexpr MTL::BlendOperation ops[NGF_BLEND_OP_COUNT] = {\n      MTL::BlendOperationAdd,\n      MTL::BlendOperationSubtract,\n      MTL::BlendOperationReverseSubtract,\n      MTL::BlendOperationMin,\n      MTL::BlendOperationMax};\n  return ops[op];\n}\n\nstruct mtl_format {\n  const MTL::PixelFormat format         = MTL::PixelFormatInvalid;\n  const uint8_t          bits_per_block = 0;\n  const bool             srgb           = false;\n  const uint8_t          block_width    = 1;\n  const uint8_t          block_height   = 1;\n};\n\nstatic mtl_format get_mtl_pixel_format(ngf_image_format f) {\n  static const mtl_format formats[NGF_IMAGE_FORMAT_COUNT] = {\n    {MTL::PixelFormatR8Unorm, 8},\n    {MTL::PixelFormatRG8Unorm, 16},\n    {MTL::PixelFormatRG8Snorm, 16},\n    {},  // RGB8, unsupported\n    {MTL::PixelFormatRGBA8Unorm, 32},\n    {},  // SRGB8, unsupported\n    {MTL::PixelFormatRGBA8Unorm_sRGB, 32, true},\n    {},  // BGR8, unsupported\n    {MTL::PixelFormatBGRA8Unorm, 32},\n    {},  // BGR8_SRGB, unsupported\n    {MTL::PixelFormatBGRA8Unorm_sRGB, 32, true},\n    {MTL::PixelFormatRGB10A2Unorm, 32},\n    {MTL::PixelFormatR32Float, 32},\n    {MTL::PixelFormatRG32Float, 64},\n    {},  // RGB32F, unsupported\n    {MTL::PixelFormatRGBA32Float, 128},\n    {MTL::PixelFormatR16Float, 16},\n    {MTL::PixelFormatRG16Float, 32},\n    {},  // RGB16F, unsupported\n    {MTL::PixelFormatRGBA16Float, 64},\n    {MTL::PixelFormatRG11B10Float, 32},\n    {MTL::PixelFormatRGB9E5Float, 32},\n    {MTL::PixelFormatR16Unorm, 16},\n    {MTL::PixelFormatR16Snorm, 16},\n    {MTL::PixelFormatRG16Unorm, 32},\n    {MTL::PixelFormatRG16Snorm, 32},\n    {MTL::PixelFormatRGBA16Unorm, 64},\n    {MTL::PixelFormatRGBA16Snorm, 64},\n    {MTL::PixelFormatR8Uint, 8},\n    {MTL::PixelFormatR8Sint, 8},\n    {MTL::PixelFormatR16Uint, 16},\n    {MTL::PixelFormatR16Sint, 16},\n    {MTL::PixelFormatRG16Uint, 32},\n    {},  // RGB16U, unsupported\n    {MTL::PixelFormatRGBA16Uint, 64},\n    {MTL::PixelFormatR32Uint, 32},\n    {MTL::PixelFormatRG32Uint, 64},\n    {},  // RGB32U, unsupported\n    {MTL::PixelFormatRGBA32Uint, 128},\n#if TARGET_OS_OSX\n    {MTL::PixelFormatBC7_RGBAUnorm, 128, false, 4, 4},\n    {MTL::PixelFormatBC7_RGBAUnorm_sRGB, 128, true, 4, 4},\n    {MTL::PixelFormatBC6H_RGBFloat, 128, false, 4, 4},\n    {MTL::PixelFormatBC6H_RGBUfloat, 128, false, 4, 4},\n    {MTL::PixelFormatBC5_RGUnorm, 128, false, 4, 4},\n    {MTL::PixelFormatBC5_RGSnorm, 128, false, 4, 4},\n#else\n    // BCn formats unsupported un iOS until 16.4\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n#endif\n#if TARGET_OS_OSX && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 110000\n    // ASTC is not supported till macOS 11.0\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n    {},\n#else\n    {MTL::PixelFormatASTC_4x4_LDR, 128, false, 4, 4},\n    {MTL::PixelFormatASTC_4x4_sRGB, 128, true, 4, 4},\n    {MTL::PixelFormatASTC_5x4_LDR, 128, false, 5, 4},\n    {MTL::PixelFormatASTC_5x4_sRGB, 128, true, 5, 4},\n    {MTL::PixelFormatASTC_5x5_LDR, 128, false, 5, 5},\n    {MTL::PixelFormatASTC_5x5_sRGB, 128, true, 5, 5},\n    {MTL::PixelFormatASTC_6x5_LDR, 128, false, 6, 5},\n    {MTL::PixelFormatASTC_6x5_sRGB, 128, true, 6, 5},\n    {MTL::PixelFormatASTC_6x6_LDR, 128, false, 6, 6},\n    {MTL::PixelFormatASTC_6x6_sRGB, 128, true, 6, 6},\n    {MTL::PixelFormatASTC_8x5_LDR, 128, false, 8, 5},\n    {MTL::PixelFormatASTC_8x5_sRGB, 128, true, 8, 5},\n    {MTL::PixelFormatASTC_8x6_LDR, 128, false, 8, 6},\n    {MTL::PixelFormatASTC_8x6_sRGB, 128, true, 8, 6},\n    {MTL::PixelFormatASTC_8x8_LDR, 128, false, 8, 8},\n    {MTL::PixelFormatASTC_8x8_sRGB, 128, true, 8, 8},\n    {MTL::PixelFormatASTC_10x5_LDR, 128, false, 10, 5},\n    {MTL::PixelFormatASTC_10x5_sRGB, 128, true, 10, 5},\n    {MTL::PixelFormatASTC_10x6_LDR, 128, false, 10, 6},\n    {MTL::PixelFormatASTC_10x6_sRGB, 128, true, 10, 6},\n    {MTL::PixelFormatASTC_10x8_LDR, 128, false, 10, 8},\n    {MTL::PixelFormatASTC_10x8_sRGB, 128, true, 10, 8},\n    {MTL::PixelFormatASTC_10x10_LDR, 128, false, 10, 10},\n    {MTL::PixelFormatASTC_10x10_sRGB, 128, true, 10, 10},\n    {MTL::PixelFormatASTC_12x10_LDR, 128, false, 12, 10},\n    {MTL::PixelFormatASTC_12x10_sRGB, 128, true, 12, 10},\n    {MTL::PixelFormatASTC_12x12_LDR, 128, false, 12, 12},\n    {MTL::PixelFormatASTC_12x12_sRGB, 128, true, 12, 12},\n#endif\n    {MTL::PixelFormatDepth32Float, 32},\n    {MTL::PixelFormatDepth16Unorm, 16},\n    {MTL::PixelFormatDepth32Float_Stencil8, 32},  // Emulate DEPTH24_STENCIL8 on iOS\n    {}\n  };\n  return formats[f];\n}\n\nstatic MTL::LoadAction get_mtl_load_action(ngf_attachment_load_op op) {\n  static const MTL::LoadAction action[NGF_LOAD_OP_COUNT] = {\n      MTL::LoadActionDontCare,\n      MTL::LoadActionLoad,\n      MTL::LoadActionClear};\n  return action[op];\n}\n\nstatic MTL::StoreAction get_mtl_store_action(ngf_attachment_store_op op) {\n  static const MTL::StoreAction action[NGF_STORE_OP_COUNT] = {\n      MTL::StoreActionDontCare,\n      MTL::StoreActionStore,\n      MTL::StoreActionMultisampleResolve};\n  return action[op];\n}\n\nstatic MTL::DataType get_mtl_type(ngf_type type) {\n  static const MTL::DataType types[NGF_TYPE_COUNT] = {\n      MTL::DataTypeNone, /* Int8, Metal does not support.*/\n      MTL::DataTypeNone, /*UInt8, Metal does not support*/\n      MTL::DataTypeShort,\n      MTL::DataTypeUShort,\n      MTL::DataTypeInt,\n      MTL::DataTypeUInt,\n      MTL::DataTypeFloat,\n      MTL::DataTypeHalf,\n      MTL::DataTypeNone /* Double,Metal does not support.*/\n  };\n  return types[type];\n}\n\nstatic MTL::VertexFormat get_mtl_attrib_format(ngf_type type, uint32_t size, bool normalized) {\n  static const MTL::VertexFormat formats[NGF_TYPE_COUNT][2][4] = {\n      {{MTL::VertexFormatChar,\n        MTL::VertexFormatChar2,\n        MTL::VertexFormatChar3,\n        MTL::VertexFormatChar4},\n       {MTL::VertexFormatCharNormalized,\n        MTL::VertexFormatChar2Normalized,\n        MTL::VertexFormatChar3Normalized,\n        MTL::VertexFormatChar4Normalized}},\n      {{MTL::VertexFormatUChar,\n        MTL::VertexFormatUChar2,\n        MTL::VertexFormatUChar3,\n        MTL::VertexFormatUChar4},\n       {MTL::VertexFormatUCharNormalized,\n        MTL::VertexFormatUChar2Normalized,\n        MTL::VertexFormatUChar3Normalized,\n        MTL::VertexFormatUChar4Normalized}},\n      {{MTL::VertexFormatShort,\n        MTL::VertexFormatShort2,\n        MTL::VertexFormatShort3,\n        MTL::VertexFormatShort4},\n       {MTL::VertexFormatShortNormalized,\n        MTL::VertexFormatShort2Normalized,\n        MTL::VertexFormatShort3Normalized,\n        MTL::VertexFormatShort4Normalized}},\n      {{MTL::VertexFormatUShort,\n        MTL::VertexFormatUShort2,\n        MTL::VertexFormatUShort3,\n        MTL::VertexFormatUShort4},\n       {MTL::VertexFormatUShortNormalized,\n        MTL::VertexFormatUShort2Normalized,\n        MTL::VertexFormatUShort3Normalized,\n        MTL::VertexFormatUShort4Normalized}},\n      {{MTL::VertexFormatInt, MTL::VertexFormatInt2, MTL::VertexFormatInt3, MTL::VertexFormatInt4},\n       {MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid}},\n      {{MTL::VertexFormatUInt,\n        MTL::VertexFormatUInt2,\n        MTL::VertexFormatUInt3,\n        MTL::VertexFormatUInt4},\n       {MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid}},\n      {{MTL::VertexFormatFloat,\n        MTL::VertexFormatFloat2,\n        MTL::VertexFormatFloat3,\n        MTL::VertexFormatFloat4},\n       {MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid}},\n      {{MTL::VertexFormatHalf,\n        MTL::VertexFormatHalf2,\n        MTL::VertexFormatHalf3,\n        MTL::VertexFormatHalf4},\n       {MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid}},\n      {{MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid,  // Double, Metal does not support.\n        MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid},\n       {MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid,\n        MTL::VertexFormatInvalid}}};\n  assert(size <= 4u && size > 0u);\n  return formats[type][normalized ? 1 : 0][size - 1u];\n}\n\nstatic MTL::VertexStepFunction get_mtl_step_function(ngf_vertex_input_rate rate) {\n  static const MTL::VertexStepFunction funcs[NGF_VERTEX_INPUT_RATE_COUNT] = {\n      MTL::VertexStepFunctionPerVertex,\n      MTL::VertexStepFunctionPerInstance};\n  return funcs[rate];\n}\n\nstatic MTL::PrimitiveTopologyClass get_mtl_primitive_topology_class(ngf_primitive_topology t) {\n  static const MTL::PrimitiveTopologyClass topo_class[NGF_PRIMITIVE_TOPOLOGY_COUNT] = {\n      MTL::PrimitiveTopologyClassTriangle,\n      MTL::PrimitiveTopologyClassTriangle,\n      MTL::PrimitiveTopologyClassLine,\n      MTL::PrimitiveTopologyClassLine,\n  };\n  return topo_class[t];\n}\n\nstatic MTL::PrimitiveType get_mtl_primitive_type(ngf_primitive_topology type) {\n  static const MTL::PrimitiveType types[NGF_PRIMITIVE_TOPOLOGY_COUNT] = {\n      MTL::PrimitiveTypeTriangle,\n      MTL::PrimitiveTypeTriangleStrip,\n      MTL::PrimitiveTypeLine,\n      MTL::PrimitiveTypeLineStrip};\n  return types[type];\n}\n\nstatic MTL::IndexType get_mtl_index_type(ngf_type type) {\n  assert(type == NGF_TYPE_UINT16 || type == NGF_TYPE_UINT32);\n  return type == NGF_TYPE_UINT16 ? MTL::IndexTypeUInt16 : MTL::IndexTypeUInt32;\n}\n\nstatic MTL::CompareFunction get_mtl_compare_function(ngf_compare_op op) {\n  static const MTL::CompareFunction compare_fns[NGF_COMPARE_OP_COUNT] = {\n      MTL::CompareFunctionNever,\n      MTL::CompareFunctionLess,\n      MTL::CompareFunctionLessEqual,\n      MTL::CompareFunctionEqual,\n      MTL::CompareFunctionGreaterEqual,\n      MTL::CompareFunctionGreater,\n      MTL::CompareFunctionNotEqual,\n      MTL::CompareFunctionAlways};\n  return compare_fns[op];\n}\n\nstatic MTL::StencilOperation get_mtl_stencil_op(ngf_stencil_op op) {\n  static const MTL::StencilOperation stencil_ops[NGF_STENCIL_OP_COUNT] = {\n      MTL::StencilOperationKeep,\n      MTL::StencilOperationZero,\n      MTL::StencilOperationReplace,\n      MTL::StencilOperationIncrementClamp,\n      MTL::StencilOperationIncrementWrap,\n      MTL::StencilOperationDecrementClamp,\n      MTL::StencilOperationDecrementWrap,\n      MTL::StencilOperationInvert};\n  return stencil_ops[op];\n}\n\nstatic MTL::CullMode get_mtl_culling(ngf_cull_mode c) {\n  static const MTL::CullMode cull_modes[NGF_CULL_MODE_COUNT] = {\n      MTL::CullModeBack,\n      MTL::CullModeFront,\n      MTL::CullModeNone, /* Metal has no front + back culling */\n      MTL::CullModeNone};\n  return cull_modes[c];\n}\n\nstatic MTL::Winding get_mtl_winding(ngf_front_face_mode w) {\n  static const MTL::Winding windings[NGF_FRONT_FACE_COUNT] = {\n      MTL::WindingCounterClockwise,\n      MTL::WindingClockwise};\n  return windings[w];\n}\n\nstatic ngfi::value_or_ngferr<MTL::TextureType>\nget_mtl_texture_type(ngf_image_type type, uint32_t nlayers, ngf_sample_count sample_count) {\n  if (type == NGF_IMAGE_TYPE_IMAGE_2D && nlayers == 1 && sample_count == NGF_SAMPLE_COUNT_1) {\n    return MTL::TextureType2D;\n  } else if (type == NGF_IMAGE_TYPE_IMAGE_2D && nlayers > 1 && sample_count == NGF_SAMPLE_COUNT_1) {\n    return MTL::TextureType2DArray;\n  }\n  if (type == NGF_IMAGE_TYPE_IMAGE_2D && nlayers == 1 && sample_count != NGF_SAMPLE_COUNT_1) {\n    return MTL::TextureType2DMultisample;\n  } else if (type == NGF_IMAGE_TYPE_IMAGE_2D && nlayers > 1 && sample_count != NGF_SAMPLE_COUNT_1) {\n    if (__builtin_available(iOS 14.0, *)) return MTL::TextureType2DMultisampleArray;\n  } else if (type == NGF_IMAGE_TYPE_IMAGE_3D) {\n    return MTL::TextureType3D;\n  } else if (type == NGF_IMAGE_TYPE_CUBE && nlayers == 1) {\n    return MTL::TextureTypeCube;\n  } else if (type == NGF_IMAGE_TYPE_CUBE && nlayers > 1) {\n    return MTL::TextureTypeCubeArray;\n  }\n  return NGF_ERROR_INVALID_FORMAT;\n}\n\nstatic ngfi::value_or_ngferr<const MTL::SamplerAddressMode>\nget_mtl_address_mode(ngf_sampler_wrap_mode mode) {\n  static const MTL::SamplerAddressMode modes[NGF_WRAP_MODE_COUNT] = {\n      MTL::SamplerAddressModeClampToEdge,\n      MTL::SamplerAddressModeRepeat,\n      MTL::SamplerAddressModeMirrorRepeat};\n  return modes[mode];\n}\n\nstatic MTL::SamplerMinMagFilter get_mtl_minmag_filter(ngf_sampler_filter f) {\n  static MTL::SamplerMinMagFilter filters[NGF_FILTER_COUNT] = {\n      MTL::SamplerMinMagFilterNearest,\n      MTL::SamplerMinMagFilterLinear};\n  return filters[f];\n}\n\nstatic MTL::SamplerMipFilter get_mtl_mip_filter(ngf_sampler_filter f) {\n  static MTL::SamplerMipFilter filters[NGF_FILTER_COUNT] = {\n      MTL::SamplerMipFilterNearest,\n      MTL::SamplerMipFilterLinear};\n  return filters[f];\n}\n\nstatic uint32_t ngfmtl_get_bytesperpel(const ngf_image_format format) {\n  const mtl_format f = get_mtl_pixel_format(format);\n  assert((f.block_width | f.block_height) == 1);  // invalid op for compressed formats\n  return f.bits_per_block / 8;\n}\n\nstatic uint32_t ngfmtl_get_pitch(const uint32_t width, const ngf_image_format format) {\n  const mtl_format f                    = get_mtl_pixel_format(format);\n  const bool       is_compressed_format = (f.block_width | f.block_height) > 1;\n  return is_compressed_format ? (width + f.block_width - 1) / f.block_width * f.bits_per_block / 8\n                              : width * f.bits_per_block / 8;\n}\n\nstatic uint32_t ngfmtl_get_num_rows(const uint32_t height, const ngf_image_format format) {\n  const mtl_format f                    = get_mtl_pixel_format(format);\n  const bool       is_compressed_format = (f.block_width | f.block_height) > 1;\n  return is_compressed_format ? (height + f.block_height - 1) / f.block_height : height;\n}\n\n#pragma mark ngf_struct_definitions\n\nenum ngf_id_init_types { id_default };\n\n// Shared pointer for managed objects\ntemplate<typename T> class ngf_id {\n  public:\n  // Create an ngf_id with an additional retain count\n  // Useful for keeping AutoReleasePool managed objects alive beyond\n  // Their pool lifetime\n  static ngf_id add_retain(T* ptr) {\n    ngf_id res = ptr;\n    if (res) { res->retain(); }\n    return res;\n  }\n\n  ngf_id() : ptr_(nullptr) {\n  }\n  ngf_id(const ngf_id_init_types& type) : ptr_(T::alloc()->init()) {\n  }\n  // Note: Does NOT increment ref count. You can use this directly after calling\n  // alloc()->init()\n  ngf_id(T* starting_ptr) : ptr_(starting_ptr) {\n  }\n  ~ngf_id() {\n    destroy_if_necessary();\n  }\n\n  ngf_id(const ngf_id&) = delete;\n  ngf_id& operator=(const ngf_id&) = delete;\n  ngf_id(ngf_id&& other) : ptr_(nullptr) {\n    *this = ngfi::move(other);\n  }\n  ngf_id& operator=(ngf_id&& other) {\n    destroy_if_necessary();\n    ptr_       = other.ptr_;\n    other.ptr_ = nullptr;\n    return *this;\n  }\n\n  T* get() const {\n    return ptr_;\n  }\n  T* operator->() const {\n    return ptr_;\n  }\n  operator bool() const {\n    return ptr_ != nullptr;\n  }\n\n  private:\n  void destroy_if_necessary() {\n    if (ptr_) { ptr_->release(); }\n  }\n\n  T* ptr_;\n};\n\nstruct ngf_render_target_t {\n  static ngfi::maybe_ngfptr<ngf_render_target_t>\n  make(const ngf_render_target_info& info) NGF_NOEXCEPT;\n\n  static ngfi::maybe_ngfptr<ngf_render_target_t> make(\n      const ngf_attachment_descriptions& attachment_descs,\n      const ngf_image_ref*               img_refs,\n      uint32_t                           rt_width,\n      uint32_t                           rt_height) NGF_NOEXCEPT;\n\n  ~ngf_render_target_t() NGF_NOEXCEPT {\n    if (attachment_descs.descs) { NGFI_FREEN(attachment_descs.descs, attachment_descs.ndescs); }\n  }\n\n  ngf_attachment_descriptions      attachment_descs;\n  ngfi::fixed_array<ngf_image_ref> render_image_refs;\n  ngfi::fixed_array<ngf_image_ref> resolve_image_refs;\n  uint32_t                         nrender_attachments  = 0u;\n  uint32_t                         nresolve_attachments = 0u;\n  bool                             is_default           = false;\n  NS::UInteger                     width;\n  NS::UInteger                     height;\n};\n\nstruct ngf_cmd_buffer_t {\n  ngfi::cmd_buffer_state      state                     = ngfi::CMD_BUFFER_STATE_NEW;\n  bool                        renderpass_active         = false;\n  bool                        compute_pass_active       = false;\n  bool                        xfer_pass_active          = false;\n  MTL::CommandBuffer*         mtl_cmd_buffer            = nullptr;\n  MTL::RenderCommandEncoder*  active_rce                = nullptr;\n  MTL::BlitCommandEncoder*    active_bce                = nullptr;\n  MTL::ComputeCommandEncoder* active_cce                = nullptr;\n  ngf_graphics_pipeline       active_gfx_pipe           = nullptr;\n  ngf_compute_pipeline        active_compute_pipe       = nullptr;\n  ngf_render_target           active_rt                 = nullptr;\n  ngf_id<MTL::Buffer>         bound_index_buffer        = nullptr;\n  MTL::IndexType              bound_index_buffer_type   = MTL::IndexTypeUInt16;\n  size_t                      bound_index_buffer_offset = 0u;\n\n  ngf_id<MTL::RenderPassSampleBufferAttachmentDescriptor>\n      sample_buf_attachment_for_next_render_pass = nullptr;\n  ngf_id<MTL::ComputePassSampleBufferAttachmentDescriptor>\n      sample_buf_attachment_for_next_compute_pass = nullptr;\n\n  // Re-applied via setBytes on every pipeline bind in this encoder; 0 size = none pending.\n  uint32_t pending_pc_size = 0u;\n  uint8_t  pending_pc_data[NGF_MAX_ENCODER_INLINE_BYTES] = {};\n\n  static ngfi::maybe_ngfptr<ngf_cmd_buffer_t> make(const ngf_cmd_buffer_info&) NGF_NOEXCEPT {\n    return ngfi::unique_ptr<ngf_cmd_buffer_t>::make();\n  }\n};\n#define NGFMTL_ENC2CMDBUF(enc) ((ngf_cmd_buffer)((void*)enc.pvt_data_donotuse.d0))\n\nstruct ngfmtl_niceshade_metadata {\n  ngfi::array<ngfi::array<uint32_t>> native_binding_map;\n  uint32_t                           threadgroup_size[3];\n  // Metal buffer slot for the push-constant block; ~0u if the shader has none.\n  uint32_t                           push_const_native_binding = ~0u;\n};\n\nstruct ngf_shader_stage_t {\n  ngf_id<MTL::Library>    func_lib = nullptr;\n  ngf_stage_type          type;\n  ngfi::fixed_array<char> entry_point_name;\n  ngfi::fixed_array<char> source_code;\n\n  static ngfi::maybe_ngfptr<ngf_shader_stage_t> make(const ngf_shader_stage_info&) NGF_NOEXCEPT;\n};\n\nstruct ngf_graphics_pipeline_t {\n  ngf_id<MTL::RenderPipelineState>    pipeline           = nullptr;\n  ngf_id<MTL::DepthStencilState>      depth_stencil      = nullptr;\n  ngf_id<MTL::DepthStencilDescriptor> depth_stencil_desc = nullptr;\n\n  uint32_t front_stencil_reference = 0u;\n  uint32_t back_stencil_reference  = 0u;\n\n  MTL::PrimitiveType primitive_type = MTL::PrimitiveTypeTriangle;\n  MTL::Winding       winding        = MTL::WindingCounterClockwise;\n  MTL::CullMode      culling        = MTL::CullModeBack;\n  float              blend_color[4] {0};\n\n  ngfmtl_niceshade_metadata niceshade_metadata;\n\n  static ngfi::maybe_ngfptr<ngf_graphics_pipeline_t>\n  make(const ngf_graphics_pipeline_info&) NGF_NOEXCEPT;\n};\n\nstruct ngf_compute_pipeline_t {\n  ngf_id<MTL::ComputePipelineState> pipeline = nullptr;\n  ngfmtl_niceshade_metadata         niceshade_metadata;\n\n  static ngfi::maybe_ngfptr<ngf_compute_pipeline_t>\n  make(const ngf_compute_pipeline_info&) NGF_NOEXCEPT;\n};\n\nstruct ngf_buffer_t {\n  ngf_id<MTL::Buffer> mtl_buffer    = nullptr;\n  size_t              mapped_offset = 0;\n\n  static ngfi::maybe_ngfptr<ngf_buffer_t> make(const ngf_buffer_info&) NGF_NOEXCEPT;\n};\n\nstruct ngf_texel_buffer_view_t {\n  ngf_id<MTL::Texture> mtl_buffer_view = nullptr;\n\n  static ngfi::maybe_ngfptr<ngf_texel_buffer_view_t>\n  make(const ngf_texel_buffer_view_info&) NGF_NOEXCEPT;\n};\n\nstruct ngf_sampler_t {\n  ngf_id<MTL::SamplerState> sampler = nullptr;\n\n  static ngfi::maybe_ngfptr<ngf_sampler_t> make(const ngf_sampler_info&) NGF_NOEXCEPT;\n};\n\nstruct ngf_image_t {\n  ngf_id<MTL::Texture> texture = nullptr;\n\n  // Workaround for binding srgb images as writeable storage images.\n  ngf_id<MTL::Texture> non_srgb_view = nullptr;\n\n  ngf_image_format format;\n  uint32_t         usage_flags = 0u;\n\n  static ngfi::maybe_ngfptr<ngf_image_t> make(const ngf_image_info&) NGF_NOEXCEPT;\n};\n\nstruct ngf_image_view_t {\n  ngf_id<MTL::Texture> view = nullptr;\n\n  static ngfi::maybe_ngfptr<ngf_image_view_t> make(const ngf_image_view_info& info) NGF_NOEXCEPT;\n};\n\nCA::MetalLayer* ngf_layer_add_to_view(\n    MTL::Device*     device,\n    uint32_t         width,\n    uint32_t         height,\n    MTL::PixelFormat pixel_format,\n    ngf_colorspace   colorspace,\n    uint32_t         capacity_hint,\n    bool             display_sync_enabled,\n    bool             compute_access_enabled,\n    uintptr_t        native_handle);\n\nCA::MetalDrawable* ngf_layer_next_drawable(CA::MetalLayer* layer);\n\nvoid ngf_resize_swapchain(\n    CA::MetalLayer* layer,\n    uint32_t        width,\n    uint32_t        height,\n    uintptr_t       native_handle);\n\n// Manages the final presentation surfaces.\nclass ngfmtl_swapchain {\n  public:\n  struct frame {\n    MTL::Texture* color_attachment_texture() {\n      return multisample_texture ? multisample_texture : color_drawable->texture();\n    }\n\n    MTL::Texture* resolve_attachment_texture() {\n      return multisample_texture ? color_drawable->texture() : nullptr;\n    }\n\n    MTL::Texture* depth_attachment_texture() {\n      return depth_texture;\n    }\n\n    CA::MetalDrawable* color_drawable      = nullptr;\n    MTL::Texture*      depth_texture       = nullptr;\n    MTL::Texture*      multisample_texture = nullptr;\n    ngf_image_t        img_wrapper;\n  };\n\n  ngfmtl_swapchain() = default;\n  ngfmtl_swapchain(ngfmtl_swapchain&& other) {\n    *this = ngfi::move(other);\n  }\n  ngfmtl_swapchain& operator=(ngfmtl_swapchain&& other) {\n    layer_        = other.layer_;\n    other.layer_  = nullptr;\n    depth_images_ = ngfi::move(other.depth_images_);\n    capacity_     = other.capacity_;\n    img_idx_      = other.img_idx_;\n\n    return *this;\n  }\n\n  // Delete copy ctor and copy assignment to make this type move-only.\n  ngfmtl_swapchain& operator=(const ngfmtl_swapchain&) = delete;\n  ngfmtl_swapchain(const ngfmtl_swapchain&)            = delete;\n\n  ngf_error initialize(const ngf_swapchain_info& swapchain_info, MTL::Device* device) noexcept {\n    // Initialize the Metal layer.\n    pixel_format_ = get_mtl_pixel_format(swapchain_info.color_format).format;\n    if (pixel_format_ == MTL::PixelFormatInvalid) {\n      NGFI_DIAG_ERROR(\"Image format not supported by Metal backend\");\n      return NGF_ERROR_INVALID_FORMAT;\n    }\n\n    layer_ = ngf_layer_add_to_view(\n        device,\n        swapchain_info.width,\n        swapchain_info.height,\n        pixel_format_,\n        swapchain_info.colorspace,\n        swapchain_info.capacity_hint,\n        (swapchain_info.present_mode == NGF_PRESENTATION_MODE_FIFO),\n        swapchain_info.enable_compute_access,\n        swapchain_info.native_handle);\n\n    // Remember the number of images in the swapchain.\n    capacity_ = swapchain_info.capacity_hint;\n\n    // Initialize depth attachments if necessary.\n    initialize_depth_attachments(swapchain_info);\n    initialize_multisample_images(swapchain_info);\n\n    compute_access_enabled_ = swapchain_info.enable_compute_access;\n\n    return NGF_ERROR_OK;\n  }\n\n  bool compute_access_enabled() const noexcept {\n    return compute_access_enabled_;\n  }\n\n  ngf_error resize(const ngf_swapchain_info& swapchain_info) {\n    ngf_resize_swapchain(\n        layer_,\n        swapchain_info.width,\n        swapchain_info.height,\n        swapchain_info.native_handle);\n\n    // ReiInitialize depth attachments & multisample images if necessary.\n    initialize_depth_attachments(swapchain_info);\n    initialize_multisample_images(swapchain_info);\n\n    return NGF_ERROR_OK;\n  }\n\n  frame next_frame() {\n    img_idx_ = (img_idx_ + 1u) % capacity_;\n    return {\n        ngf_layer_next_drawable(layer_),\n        depth_images_.size() > 0 ? depth_images_[img_idx_].get() : nullptr,\n        is_multisampled() ? multisample_images_[img_idx_]->texture.get() : nullptr};\n  }\n\n  MTL::PixelFormat get_pixel_format() const {\n    return pixel_format_;\n  }\n\n  operator bool() {\n    return layer_;\n  }\n\n  bool is_multisampled() const {\n    return !multisample_images_.empty();\n  }\n\n  private:\n  void initialize_depth_attachments(const ngf_swapchain_info& swapchain_info) {\n    destroy_depth_attachments();\n    if (swapchain_info.depth_format != NGF_IMAGE_FORMAT_UNDEFINED) {\n      depth_images_.resize(swapchain_info.capacity_hint);\n      MTL::PixelFormat depth_format = get_mtl_pixel_format(swapchain_info.depth_format).format;\n      // assert(depth_format != MTL::PixelFormatInvalid);\n      for (uint32_t i = 0u; i < swapchain_info.capacity_hint; ++i) {\n        ngf_id<MTL::TextureDescriptor> depth_texture_desc = id_default;\n        depth_texture_desc->setTextureType(\n            swapchain_info.sample_count > 1u ? MTL::TextureType2DMultisample : MTL::TextureType2D);\n        depth_texture_desc->setWidth(swapchain_info.width);\n        depth_texture_desc->setHeight(swapchain_info.height);\n        depth_texture_desc->setPixelFormat(depth_format);\n        depth_texture_desc->setDepth(1u);\n        depth_texture_desc->setSampleCount((NS::UInteger)swapchain_info.sample_count);\n        depth_texture_desc->setMipmapLevelCount(1u);\n        depth_texture_desc->setArrayLength(1u);\n        depth_texture_desc->setUsage(MTL::TextureUsageRenderTarget);\n        depth_texture_desc->setStorageMode(MTL::StorageModePrivate);\n        depth_texture_desc->setResourceOptions(MTL::ResourceStorageModePrivate);\n        if (__builtin_available(macOS 10.14, *)) {\n          depth_texture_desc->setAllowGPUOptimizedContents(true);\n        }\n        depth_images_[i] = MTL_DEVICE->newTexture(depth_texture_desc.get());\n      }\n    }\n  }\n\n  void destroy_depth_attachments() {\n    depth_images_.resize(0);\n  }\n\n  void initialize_multisample_images(const ngf_swapchain_info& swapchain_info) {\n    destroy_multisample_images();\n    if (swapchain_info.sample_count > NGF_SAMPLE_COUNT_1) {\n      multisample_images_.resize(capacity_);\n      for (size_t i = 0; i < capacity_; ++i) {\n        const ngf_image_info info = {\n            .type   = NGF_IMAGE_TYPE_IMAGE_2D,\n            .extent = {.width = swapchain_info.width, .height = swapchain_info.height, .depth = 1u},\n            .nmips  = 1u,\n            .nlayers      = 1u,\n            .format       = swapchain_info.color_format,\n            .sample_count = (ngf_sample_count)swapchain_info.sample_count,\n            .usage_hint   = NGF_IMAGE_USAGE_ATTACHMENT};\n        ngf_create_image(&info, &multisample_images_[i]);\n      }\n    }\n  }\n\n  void destroy_multisample_images() {\n    assert(multisample_images_.empty() || capacity_ == multisample_images_.size());\n    for (size_t i = 0; i < multisample_images_.size(); ++i) {\n      ngf_destroy_image(multisample_images_[i]);\n    }\n    multisample_images_.resize(0);\n  }\n\n  CA::MetalLayer*                   layer_    = nullptr;\n  uint32_t                          img_idx_  = 0u;\n  uint32_t                          capacity_ = 0u;\n  ngfi::array<ngf_id<MTL::Texture>> depth_images_;\n  ngfi::array<ngf_image>            multisample_images_;\n  MTL::PixelFormat                  pixel_format_;\n  bool                              compute_access_enabled_;\n};\n\nstruct ngf_context_t {\n  ngf_id<MTL::Device>        device = nullptr;\n  ngfmtl_swapchain           swapchain;\n  ngfmtl_swapchain::frame    frame;\n  ngf_id<MTL::CommandQueue>  queue      = nullptr;\n  bool                       is_current = false;\n  ngf_swapchain_info         swapchain_info;\n  MTL::CommandBuffer*        pending_cmd_buffer = nullptr;\n  ngf_id<MTL::CommandBuffer> last_cmd_buffer    = nullptr;\n  dispatch_semaphore_t       frame_sync_sem     = nullptr;\n  ngf_render_target          default_rt;\n\n  static ngfi::maybe_ngfptr<ngf_context_t> make(const ngf_context_info&) NGF_NOEXCEPT;\n\n  ~ngf_context_t() NGF_NOEXCEPT {\n    if (last_cmd_buffer) { last_cmd_buffer->waitUntilCompleted(); }\n  }\n};\n\nconstexpr MTL::GPUFamily NGFMTL_GPU_FAMILIES[] = {\n    MTL::GPUFamilyCommon1,\n    MTL::GPUFamilyCommon2,\n    MTL::GPUFamilyApple1,\n    MTL::GPUFamilyApple2,\n    MTL::GPUFamilyApple3,\n    MTL::GPUFamilyApple4,\n    MTL::GPUFamilyApple5,\n    MTL::GPUFamilyApple6,\n    MTL::GPUFamilyApple7,\n    MTL::GPUFamilyMac2,\n    MTL::GPUFamilyMac2,\n    MTL::GPUFamilyMac2,\n    MTL::GPUFamilyMac2,\n};\n\nconstexpr size_t NGFMTL_NUM_GPU_FAMILIES = sizeof(NGFMTL_GPU_FAMILIES) / sizeof(MTL::GPUFamily);\n\nstatic constexpr size_t ngfmtl_gpufam_idx(MTL::GPUFamily fam) {\n  for (size_t i = 0; i < NGFMTL_NUM_GPU_FAMILIES; ++i)\n    if (NGFMTL_GPU_FAMILIES[i] == fam) return i;\n  return 0;\n}\n\nstatic size_t ngfmtl_max_supported_gpu_family(MTL::Device* mtldev) {\n  for (size_t fam_idx = NGFMTL_NUM_GPU_FAMILIES - 1; fam_idx >= 0; --fam_idx)\n    if (mtldev->supportsFamily(NGFMTL_GPU_FAMILIES[fam_idx])) return fam_idx;\n  return 0;\n}\n\nvoid             ngfi_set_allocation_callbacks(const ngf_allocation_callbacks* callbacks);\nngf_sample_count ngfi_get_highest_sample_count(size_t counts_bitmap);\n\nstatic void ngfmtl_populate_ngf_device(uint32_t handle, ngf_device& ngfdev, MTL::Device* mtldev) {\n  ngfdev.handle = handle;\n#if TARGET_OS_OSX\n  ngfdev.performance_tier =\n      mtldev->lowPower() ? NGF_DEVICE_PERFORMANCE_TIER_LOW : NGF_DEVICE_PERFORMANCE_TIER_HIGH;\n#else\n  ngfdev.performance_tier = NGF_DEVICE_PERFORMANCE_TIER_UNKNOWN;\n#endif\n  const size_t device_name_length = mtldev->name()->length();\n  strncpy(\n      ngfdev.name,\n      mtldev->name()->utf8String(),\n      NGFI_MIN(NGF_DEVICE_NAME_MAX_LENGTH, device_name_length));\n  ngf_device_capabilities& caps                 = ngfdev.capabilities;\n  const size_t             gpu_family_idx       = ngfmtl_max_supported_gpu_family(mtldev);\n  caps.clipspace_z_zero_to_one                  = true;\n  caps.max_vertex_input_attributes_per_pipeline = 31;\n  caps.max_uniform_buffers_per_stage            = 31;\n  caps.max_sampler_anisotropy                   = 16.0f;\n  caps.max_samplers_per_stage                   = 16;\n  caps.max_3d_image_dimension                   = 2048;\n  caps.max_image_layers                         = 2048;\n  caps.max_uniform_buffer_range                 = NGF_DEVICE_LIMIT_UNKNOWN;\n  caps.device_local_memory_is_host_visible      = mtldev->hasUnifiedMemory();\n\n  if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple6)) {\n    caps.max_sampled_images_per_stage = 128;\n  } else if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple4)) {\n    caps.max_sampled_images_per_stage = 96;\n  } else {\n    caps.max_sampled_images_per_stage = 31;\n  }\n\n  if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple4)) {\n    caps.max_fragment_input_components = 124;\n  } else {\n    caps.max_fragment_input_components = 60;\n  }\n  if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple4)) {\n    caps.max_fragment_inputs = 124;\n  } else {\n    caps.max_fragment_inputs = 60;\n  }\n\n  if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyMac2)) {\n    caps.uniform_buffer_offset_alignment = 32;\n  } else {\n    caps.uniform_buffer_offset_alignment = 4;\n  }\n  caps.storage_buffer_offset_alignment = 64;\n  caps.texel_buffer_offset_alignment   = 64;\n\n  if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple3)) {\n    caps.max_1d_image_dimension   = 16384;\n    caps.max_2d_image_dimension   = 16384;\n    caps.max_cube_image_dimension = 16384;\n  } else {\n    caps.max_1d_image_dimension   = 8192;\n    caps.max_2d_image_dimension   = 8192;\n    caps.max_cube_image_dimension = 8192;\n  }\n\n  if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple2)) {\n    caps.max_color_attachments_per_pass = 8;\n  } else {\n    caps.max_color_attachments_per_pass = 4;\n  }\n\n  caps.cubemap_arrays_supported = gpu_family_idx == ngfmtl_gpufam_idx(MTL::GPUFamilyCommon2) ||\n                                  gpu_family_idx == ngfmtl_gpufam_idx(MTL::GPUFamilyCommon3) ||\n                                  gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple3);\n\n  size_t supports_samples_bitmap = (mtldev->supportsTextureSampleCount(1) ? 1 : 0) |\n                                   (mtldev->supportsTextureSampleCount(2) ? 2 : 0) |\n                                   (mtldev->supportsTextureSampleCount(4) ? 4 : 0) |\n                                   (mtldev->supportsTextureSampleCount(8) ? 8 : 0);\n\n  ngf_sample_count max_supported_sample_count =\n      ngfi_get_highest_sample_count(supports_samples_bitmap);\n\n  caps.texture_color_sample_counts                  = supports_samples_bitmap;\n  caps.max_supported_texture_color_sample_count     = max_supported_sample_count;\n  caps.texture_depth_sample_counts                  = supports_samples_bitmap;\n  caps.max_supported_texture_depth_sample_count     = max_supported_sample_count;\n  caps.framebuffer_color_sample_counts              = supports_samples_bitmap;\n  caps.max_supported_framebuffer_color_sample_count = max_supported_sample_count;\n  caps.framebuffer_depth_sample_counts              = supports_samples_bitmap;\n  caps.max_supported_framebuffer_depth_sample_count = max_supported_sample_count;\n}\n\nNGFI_THREADLOCAL ngf_context CURRENT_CONTEXT = nullptr;\n\nstatic ngf_error ngfmtl_parse_niceshade_metadata(\n    const char*                input,\n    bool                       need_threadgroup_size,\n    ngfmtl_niceshade_metadata* output) {\n  static const char binding_map_tag[]           = \"NGF_NATIVE_BINDING_MAP\";\n  static const char threadgroup_size_tag[]      = \"NGF_THREADGROUP_SIZE\";\n  const char*       serialized_binding_map      = NULL;\n  const char*       serialized_threadgroup_size = NULL;\n  bool              in_comment                  = false;\n\n  for (; *input != '\\0' && (serialized_binding_map == NULL ||\n                            (!need_threadgroup_size || serialized_threadgroup_size == NULL));\n       ++input) {\n    if (!in_comment && *input == '/' && *(input + 1) == '*') {\n      in_comment = true;\n      input++;\n      continue;\n    }\n    if (in_comment && *input == '*' && *(input + 1) == '/') {\n      in_comment = false;\n      input++;\n      continue;\n    }\n    if (!in_comment) continue;\n\n    if (serialized_binding_map == NULL &&\n        strncmp(input, binding_map_tag, sizeof(binding_map_tag) - 1) == 0) {\n      serialized_binding_map = input + sizeof(binding_map_tag) - 1;\n    }\n    if (need_threadgroup_size && serialized_threadgroup_size == NULL &&\n        strncmp(input, threadgroup_size_tag, sizeof(threadgroup_size_tag) - 1) == 0) {\n      serialized_threadgroup_size = input + sizeof(threadgroup_size_tag) - 1;\n    }\n  }\n  if (!serialized_binding_map) {\n    NGFI_DIAG_ERROR(\"Failed to find a serialized binding map\");\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n  if (need_threadgroup_size && !serialized_threadgroup_size) {\n    NGFI_DIAG_ERROR(\"Failed to find a serialized threadgroup size\");\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n\n  // Parse the native binding map.\n  struct ngfmtl_binding_map_entry {\n    uint32_t set;\n    uint32_t binding;\n    uint32_t native_binding;\n  };\n\n  ngfi::array<ngfmtl_binding_map_entry> tmp_binding_map_entries;\n  uint32_t                              consumed_input_bytes;\n  uint32_t                              max_set     = 0u;\n  uint32_t                              max_binding = 0u;\n  ngfmtl_binding_map_entry              current_binding_map_entry;\n  while (sscanf(\n             serialized_binding_map,\n             \" ( %d %d ) : %d%n\",\n             &current_binding_map_entry.set,\n             &current_binding_map_entry.binding,\n             &current_binding_map_entry.native_binding,\n             &consumed_input_bytes) == 3 &&\n         current_binding_map_entry.set != -1 && current_binding_map_entry.binding != -1 &&\n         current_binding_map_entry.native_binding != -1) {\n    serialized_binding_map += consumed_input_bytes;\n    max_set     = NGFI_MAX(max_set, current_binding_map_entry.set);\n    max_binding = NGFI_MAX(max_binding, current_binding_map_entry.binding);\n    tmp_binding_map_entries.emplace_back(current_binding_map_entry);\n  }\n\n  ngfi::array<ngfi::array<uint32_t>> native_binding_map {max_set + 1};\n  for (uint32_t e = 0u; e < tmp_binding_map_entries.size(); ++e) {\n    auto& set_map = native_binding_map[tmp_binding_map_entries[e].set];\n    if (set_map.size() == 0) {\n      set_map.resize(max_binding + 1);\n      memset(set_map.data(), ~0, sizeof(set_map[0]) * set_map.size());\n    }\n    set_map[tmp_binding_map_entries[e].binding] = tmp_binding_map_entries[e].native_binding;\n  }\n  output->native_binding_map = ngfi::move(native_binding_map);\n\n  // Skip the binding-map sentinel and read the trailing push-constant slot if any.\n  if (current_binding_map_entry.set == -1) {\n    serialized_binding_map += consumed_input_bytes;\n    int pc_slot = -1;\n    if (sscanf(serialized_binding_map, \" %d\", &pc_slot) == 1 && pc_slot >= 0) {\n      output->push_const_native_binding = static_cast<uint32_t>(pc_slot);\n    }\n  }\n\n  if (need_threadgroup_size && serialized_threadgroup_size) {\n    if (sscanf(\n            serialized_threadgroup_size,\n            \"%d %d %d\",\n            &output->threadgroup_size[0],\n            &output->threadgroup_size[1],\n            &output->threadgroup_size[2]) != 3) {\n      NGFI_DIAG_ERROR(\"Failed to parse threadgroup size\");\n      return NGF_ERROR_INVALID_OPERATION;\n    }\n  }\n\n  return NGF_ERROR_OK;\n}\n\nstatic ngf_id<MTL::Function> ngfmtl_get_shader_main(\n    MTL::Library*                func_lib,\n    const char*                  entry_point_name,\n    MTL::FunctionConstantValues* spec_consts) {\n  NS::Error*  err                 = nullptr;\n  NS::String* ns_entry_point_name = NS::String::string(entry_point_name, NS::UTF8StringEncoding);\n  ngf_id<MTL::Function> result    = func_lib->newFunction(ns_entry_point_name, spec_consts, &err);\n  if (err) {\n    NGFI_DIAG_ERROR(err->localizedDescription()->utf8String());\n    return nullptr;\n  } else {\n    return result;\n  }\n}\n\nstatic ngf_id<MTL::FunctionConstantValues>\nngfmtl_function_consts(const ngf_specialization_info* spec_info) {\n  // Populate specialization constant values.\n  ngf_id<MTL::FunctionConstantValues> spec_consts = id_default;\n  if (spec_info != nullptr) {\n    for (uint32_t s = 0u; s < spec_info->nspecializations; ++s) {\n      const ngf_constant_specialization* spec = &spec_info->specializations[s];\n      MTL::DataType                      type = get_mtl_type(spec->type);\n      if (type == MTL::DataTypeNone) { return nullptr; }\n      void* write_ptr = ((uint8_t*)spec_info->value_buffer + spec->offset);\n      spec_consts->setConstantValue(write_ptr, type, spec->constant_id);\n    }\n  }\n  return spec_consts;\n}\n\nstatic ngf_id<MTL::StencilDescriptor>\nngfmtl_create_stencil_descriptor(const ngf_stencil_info& info) {\n  ngf_id<MTL::StencilDescriptor> result = id_default;\n  result->setStencilCompareFunction(get_mtl_compare_function(info.compare_op));\n  result->setStencilFailureOperation(get_mtl_stencil_op(info.fail_op));\n  result->setDepthStencilPassOperation(get_mtl_stencil_op(info.pass_op));\n  result->setDepthFailureOperation(get_mtl_stencil_op(info.depth_fail_op));\n  result->setWriteMask(info.write_mask);\n  result->setReadMask(info.compare_mask);\n  return result;\n}\n\nngfi::maybe_ngfptr<ngf_compute_pipeline_t>\nngf_compute_pipeline_t::make(const ngf_compute_pipeline_info& info) NGF_NOEXCEPT {\n  ngfmtl_niceshade_metadata metadata;\n  const ngf_error           metadata_parse_error =\n      ngfmtl_parse_niceshade_metadata(info.shader_stage->source_code.data(), true, &metadata);\n  if (metadata_parse_error != NGF_ERROR_OK) return metadata_parse_error;\n\n  ngf_id<MTL::FunctionConstantValues> func_const_values = ngfmtl_function_consts(info.spec_info);\n  ngf_id<MTL::Function>               function          = ngfmtl_get_shader_main(\n      info.shader_stage->func_lib.get(),\n      info.shader_stage->entry_point_name.data(),\n      func_const_values.get());\n  if (!function) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n\n  ngf_id<MTL::ComputePipelineDescriptor> mtl_compute_desc = id_default;\n  mtl_compute_desc->setComputeFunction(function.get());\n\n  if (info.debug_name != nullptr) {\n    mtl_compute_desc->setLabel(NS::String::string(info.debug_name, NS::UTF8StringEncoding));\n  }\n\n  NS::Error*                        err        = nullptr;\n  ngf_id<MTL::ComputePipelineState> computePSO = CURRENT_CONTEXT->device->newComputePipelineState(\n      mtl_compute_desc.get(),\n      MTL::PipelineOptionNone,\n      nullptr,\n      &err);\n\n  if (err) {\n    NGFI_DIAG_ERROR(err->localizedDescription()->utf8String());\n    return NGF_ERROR_OBJECT_CREATION_FAILED;\n  }\n  auto compute_pipeline                = ngfi::unique_ptr<ngf_compute_pipeline_t>::make();\n  compute_pipeline->pipeline           = ngfi::move(computePSO);\n  compute_pipeline->niceshade_metadata = ngfi::move(metadata);\n  return ngfi::move(compute_pipeline);\n}\n\nngfi::maybe_ngfptr<ngf_graphics_pipeline_t>\nngf_graphics_pipeline_t::make(const ngf_graphics_pipeline_info& info) NGF_NOEXCEPT {\n  ngf_id<MTL::RenderPipelineDescriptor> mtl_pipe_desc      = id_default;\n  const ngf_attachment_descriptions&    attachment_descs   = *info.compatible_rt_attachment_descs;\n  uint32_t                              ncolor_attachments = 0u;\n  for (uint32_t i = 0u; i < attachment_descs.ndescs; ++i) {\n    const ngf_attachment_description& attachment_desc = attachment_descs.descs[i];\n    if (attachment_desc.is_resolve) continue;\n    if (attachment_desc.type == NGF_ATTACHMENT_COLOR) {\n      const ngf_blend_info                          blend = info.color_attachment_blend_states\n                                                                ? info.color_attachment_blend_states[ncolor_attachments]\n                                                                : ngf_blend_info {};\n      MTL::RenderPipelineColorAttachmentDescriptor* mtl_attachment_desc =\n          mtl_pipe_desc->colorAttachments()->object(ncolor_attachments++);\n      mtl_attachment_desc->setPixelFormat(get_mtl_pixel_format(attachment_desc.format).format);\n      mtl_attachment_desc->setBlendingEnabled(blend.enable);\n      if (blend.enable) {\n        mtl_attachment_desc->setSourceRGBBlendFactor(\n            get_mtl_blend_factor(blend.src_color_blend_factor));\n        mtl_attachment_desc->setDestinationRGBBlendFactor(\n            get_mtl_blend_factor(blend.dst_color_blend_factor));\n        mtl_attachment_desc->setSourceAlphaBlendFactor(\n            get_mtl_blend_factor(blend.src_alpha_blend_factor));\n        mtl_attachment_desc->setDestinationAlphaBlendFactor(\n            get_mtl_blend_factor(blend.dst_alpha_blend_factor));\n        mtl_attachment_desc->setRgbBlendOperation(get_mtl_blend_operation(blend.blend_op_color));\n        mtl_attachment_desc->setAlphaBlendOperation(get_mtl_blend_operation(blend.blend_op_alpha));\n      }\n      if (info.color_attachment_blend_states) {\n        mtl_attachment_desc->setWriteMask(\n            (blend.color_write_mask & NGF_COLOR_MASK_WRITE_BIT_R ? MTL::ColorWriteMaskRed : 0) |\n            (blend.color_write_mask & NGF_COLOR_MASK_WRITE_BIT_G ? MTL::ColorWriteMaskGreen : 0) |\n            (blend.color_write_mask & NGF_COLOR_MASK_WRITE_BIT_B ? MTL::ColorWriteMaskBlue : 0) |\n            (blend.color_write_mask & NGF_COLOR_MASK_WRITE_BIT_A ? MTL::ColorWriteMaskAlpha : 0));\n      }\n    } else if (\n        attachment_desc.type == NGF_ATTACHMENT_DEPTH ||\n        attachment_desc.type == NGF_ATTACHMENT_DEPTH_STENCIL) {\n      mtl_pipe_desc->setDepthAttachmentPixelFormat(\n          get_mtl_pixel_format(attachment_desc.format).format);\n    }\n  }\n\n  mtl_pipe_desc->setRasterSampleCount(info.multisample->sample_count);\n  mtl_pipe_desc->setAlphaToCoverageEnabled(info.multisample->alpha_to_coverage);\n\n  mtl_pipe_desc->setStencilAttachmentPixelFormat(MTL::PixelFormatInvalid);\n\n  if (mtl_pipe_desc->depthAttachmentPixelFormat() == MTL::PixelFormatDepth32Float_Stencil8) {\n    mtl_pipe_desc->setStencilAttachmentPixelFormat(MTL::PixelFormatDepth32Float_Stencil8);\n  }\n\n  // Populate specialization constant values.\n  ngf_id<MTL::FunctionConstantValues> spec_consts = ngfmtl_function_consts(info.spec_info);\n\n  // Set stage functions.\n  bool                      have_niceshade_metadata = false;\n  ngfmtl_niceshade_metadata metadata;\n  for (uint32_t s = 0u; s < info.nshader_stages; ++s) {\n    const ngf_shader_stage stage = info.shader_stages[s];\n    if (!have_niceshade_metadata) {\n      const ngf_error metadata_parse_result = ngfmtl_parse_niceshade_metadata(\n          stage->source_code.data(),\n          stage->type == NGF_STAGE_COMPUTE,\n          &metadata);\n      have_niceshade_metadata = (metadata_parse_result == NGF_ERROR_OK);\n    }\n    if (stage->type == NGF_STAGE_VERTEX) {\n      assert(!mtl_pipe_desc->vertexFunction());\n      mtl_pipe_desc->setVertexFunction(ngfmtl_get_shader_main(\n                                           stage->func_lib.get(),\n                                           stage->entry_point_name.data(),\n                                           spec_consts.get())\n                                           .get());\n    } else if (stage->type == NGF_STAGE_FRAGMENT) {\n      assert(!mtl_pipe_desc->fragmentFunction());\n      mtl_pipe_desc->setFragmentFunction(ngfmtl_get_shader_main(\n                                             stage->func_lib.get(),\n                                             stage->entry_point_name.data(),\n                                             spec_consts.get())\n                                             .get());\n    }\n  }\n  if (!have_niceshade_metadata) {\n    NGFI_DIAG_ERROR(\"Native binding map not found.\");\n    return NGF_ERROR_OBJECT_CREATION_FAILED;\n  }\n\n  // Configure vertex input.\n  const ngf_vertex_input_info& vertex_input_info = *info.input_info;\n  MTL::VertexDescriptor*       vert_desc         = mtl_pipe_desc->vertexDescriptor();\n  for (uint32_t a = 0u; a < vertex_input_info.nattribs; ++a) {\n    MTL::VertexAttributeDescriptor* attr_desc = vert_desc->attributes()->object(a);\n    const ngf_vertex_attrib_desc&   attr_info = vertex_input_info.attribs[a];\n    attr_desc->setOffset(vertex_input_info.attribs[a].offset);\n    attr_desc->setBufferIndex(MAX_BUFFER_BINDINGS - vertex_input_info.attribs[a].binding);\n    attr_desc->setFormat(\n        get_mtl_attrib_format(attr_info.type, attr_info.size, attr_info.normalized));\n    if (attr_desc->format() == MTL::VertexFormatInvalid) {\n      NGFI_DIAG_ERROR(\"Vertex attrib format not supported by Metal backend.\");\n      return NGF_ERROR_INVALID_FORMAT;\n    }\n  }\n  for (uint32_t b = 0u; b < vertex_input_info.nvert_buf_bindings; ++b) {\n    MTL::VertexBufferLayoutDescriptor* binding_desc =\n        vert_desc->layouts()->object(MAX_BUFFER_BINDINGS - b);\n    const ngf_vertex_buf_binding_desc& binding_info = vertex_input_info.vert_buf_bindings[b];\n    binding_desc->setStride(binding_info.stride);\n    binding_desc->setStepFunction(get_mtl_step_function(binding_info.input_rate));\n  }\n\n  // Set primitive topology.\n  mtl_pipe_desc->setInputPrimitiveTopology(\n      get_mtl_primitive_topology_class(info.input_assembly_info->primitive_topology));\n  if (mtl_pipe_desc->inputPrimitiveTopology() == MTL::PrimitiveTopologyClassUnspecified) {\n    return NGF_ERROR_OBJECT_CREATION_FAILED;\n  }\n\n  auto pipeline                = ngfi::unique_ptr<ngf_graphics_pipeline_t>::make();\n  pipeline->niceshade_metadata = ngfi::move(metadata);\n  memcpy(pipeline->blend_color, info.blend_consts, sizeof(pipeline->blend_color));\n\n  if (info.debug_name != nullptr) {\n    mtl_pipe_desc->setLabel(NS::String::string(info.debug_name, NS::UTF8StringEncoding));\n  }\n\n  NS::Error* err     = nullptr;\n  pipeline->pipeline = CURRENT_CONTEXT->device->newRenderPipelineState(mtl_pipe_desc.get(), &err);\n  pipeline->primitive_type = get_mtl_primitive_type(info.input_assembly_info->primitive_topology);\n\n  // Set winding order and culling mode.\n  pipeline->winding = get_mtl_winding(info.rasterization->front_face);\n  pipeline->culling = get_mtl_culling(info.rasterization->cull_mode);\n\n  // Set up depth and stencil state.\n\n  pipeline->depth_stencil_desc                     = id_default;\n  const ngf_depth_stencil_info& depth_stencil_info = *info.depth_stencil;\n  pipeline->depth_stencil_desc->setDepthCompareFunction(\n      depth_stencil_info.depth_test ? get_mtl_compare_function(depth_stencil_info.depth_compare)\n                                    : MTL::CompareFunctionAlways);\n  pipeline->depth_stencil_desc->setDepthWriteEnabled(info.depth_stencil->depth_write);\n\n  ngf_id<MTL::StencilDescriptor> backface_descriptor =\n      ngfmtl_create_stencil_descriptor(depth_stencil_info.back_stencil);\n  ngf_id<MTL::StencilDescriptor> frontface_descriptor =\n      ngfmtl_create_stencil_descriptor(depth_stencil_info.front_stencil);\n  pipeline->depth_stencil_desc->setBackFaceStencil(backface_descriptor.get());\n  pipeline->depth_stencil_desc->setFrontFaceStencil(frontface_descriptor.get());\n  pipeline->front_stencil_reference = depth_stencil_info.front_stencil.reference;\n  pipeline->back_stencil_reference  = depth_stencil_info.back_stencil.reference;\n  pipeline->depth_stencil =\n      CURRENT_CONTEXT->device->newDepthStencilState(pipeline->depth_stencil_desc.get());\n\n  if (err) {\n    NGFI_DIAG_ERROR(err->localizedDescription()->utf8String());\n    return NGF_ERROR_OBJECT_CREATION_FAILED;\n  } else {\n    return ngfi::move(pipeline);\n  }\n}\n\nngfi::maybe_ngfptr<ngf_buffer_t> ngf_buffer_t::make(const ngf_buffer_info& info) NGF_NOEXCEPT {\n  MTL::ResourceOptions options = 0u;\n  switch (info.storage_type) {\n  case NGF_BUFFER_STORAGE_HOST_READABLE:\n  case NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE:\n  case NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_READABLE_WRITEABLE:\n    options = MTL::ResourceCPUCacheModeDefaultCache | MTL::ResourceStorageModeShared;\n    break;\n  case NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABLE:\n  case NGF_BUFFER_STORAGE_HOST_WRITEABLE:\n    options = MTL::ResourceCPUCacheModeWriteCombined | MTL::ResourceStorageModeShared;\n    break;\n  case NGF_BUFFER_STORAGE_DEVICE_LOCAL:\n    options = MTL::ResourceStorageModePrivate;\n    break;\n  default:\n    assert(false);\n  }\n  auto result        = ngfi::unique_ptr<ngf_buffer_t>::make();\n  result->mtl_buffer = ngf_id<MTL::Buffer> {CURRENT_CONTEXT->device->newBuffer(info.size, options)};\n  if (!result->mtl_buffer) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n  return ngfi::move(result);\n}\n\nngfi::maybe_ngfptr<ngf_texel_buffer_view_t>\nngf_texel_buffer_view_t::make(const ngf_texel_buffer_view_info& info) NGF_NOEXCEPT {\n  auto                           view = ngfi::unique_ptr<ngf_texel_buffer_view_t>::make();\n  ngf_id<MTL::TextureDescriptor> texel_buf_descriptor = id_default;\n\n  texel_buf_descriptor->setDepth(1);\n  texel_buf_descriptor->setMipmapLevelCount(1);\n  texel_buf_descriptor->setPixelFormat(get_mtl_pixel_format(info.texel_format).format);\n  texel_buf_descriptor->setTextureType(MTL::TextureTypeTextureBuffer);\n  texel_buf_descriptor->setArrayLength(1);\n  texel_buf_descriptor->setSampleCount(1);\n  texel_buf_descriptor->setUsage(MTL::TextureUsageShaderRead);\n  texel_buf_descriptor->setStorageMode(info.buffer->mtl_buffer->storageMode());\n  texel_buf_descriptor->setWidth(info.size / ngfmtl_get_bytesperpel(info.texel_format));\n  texel_buf_descriptor->setHeight(1);\n  view->mtl_buffer_view =\n      info.buffer->mtl_buffer->newTexture(texel_buf_descriptor.get(), info.offset, info.size);\n  return ngfi::move(view);\n}\n\nstatic ngf_sample_count ngfmtl_get_ngf_sample_count(NS::UInteger sc) {\n  switch (sc) {\n  case 0:\n  case 1:\n    return NGF_SAMPLE_COUNT_1;\n\n  case 2:\n    return NGF_SAMPLE_COUNT_2;\n  case 4:\n    return NGF_SAMPLE_COUNT_4;\n  case 8:\n    return NGF_SAMPLE_COUNT_8;\n  case 16:\n    return NGF_SAMPLE_COUNT_16;\n  case 32:\n    return NGF_SAMPLE_COUNT_32;\n  case 64:\n    return NGF_SAMPLE_COUNT_64;\n  }\n  return NGF_SAMPLE_COUNT_1;\n}\n\nngfi::maybe_ngfptr<ngf_image_view_t>\nngf_image_view_t::make(const ngf_image_view_info& info) NGF_NOEXCEPT {\n  const auto maybe_texture_type = get_mtl_texture_type(\n      info.view_type,\n      info.nlayers,\n      ngfmtl_get_ngf_sample_count(info.src_image->texture->sampleCount()));\n  if (!maybe_texture_type) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n  MTL::Texture* view = info.src_image->texture->newTextureView(\n      get_mtl_pixel_format(info.view_format).format,\n      maybe_texture_type.value(),\n      NS::Range(info.base_mip_level, info.nmips),\n      NS::Range(info.base_layer, info.nlayers));\n  if (!view) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n\n  auto image_view  = ngfi::unique_ptr<ngf_image_view_t>::make();\n  image_view->view = view;\n  return ngfi::move(image_view);\n}\n\nngfi::maybe_ngfptr<ngf_image_t> ngf_image_t::make(const ngf_image_info& info) NGF_NOEXCEPT {\n  ngf_id<MTL::TextureDescriptor> mtl_img_desc = id_default;\n\n  const MTL::PixelFormat fmt = get_mtl_pixel_format(info.format).format;\n  if (fmt == MTL::PixelFormatInvalid) {\n    NGFI_DIAG_ERROR(\"Image format %d not supported by Metal backend.\", info.format);\n    return NGF_ERROR_INVALID_FORMAT;\n  }\n\n  ngfi::value_or_ngferr<MTL::TextureType> maybe_texture_type =\n      get_mtl_texture_type(info.type, info.nlayers, info.sample_count);\n  if (!maybe_texture_type.has_value()) {\n    NGFI_DIAG_ERROR(\"Image type %d not supported by Metal backend.\", info.type);\n    return NGF_ERROR_INVALID_ENUM;\n  }\n  mtl_img_desc->setTextureType(maybe_texture_type.value());\n  mtl_img_desc->setPixelFormat(fmt);\n  mtl_img_desc->setWidth(info.extent.width);\n  mtl_img_desc->setHeight(info.extent.height);\n  mtl_img_desc->setDepth(info.extent.depth);\n  mtl_img_desc->setArrayLength(info.nlayers);\n  mtl_img_desc->setMipmapLevelCount(info.nmips);\n  mtl_img_desc->setStorageMode(MTL::StorageModePrivate);\n  mtl_img_desc->setSampleCount(info.sample_count);\n  if (info.usage_hint & NGF_IMAGE_USAGE_ATTACHMENT) {\n    mtl_img_desc->setUsage(mtl_img_desc->usage() | MTL::TextureUsageRenderTarget);\n  }\n  if (info.usage_hint & NGF_IMAGE_USAGE_SAMPLE_FROM) {\n    mtl_img_desc->setUsage(mtl_img_desc->usage() | MTL::TextureUsageShaderRead);\n  }\n  if (info.usage_hint & NGF_IMAGE_USAGE_STORAGE) {\n    mtl_img_desc->setUsage(mtl_img_desc->usage() | MTL::TextureUsageShaderWrite);\n  }\n  auto image         = ngfi::unique_ptr<ngf_image_t>::make();\n  image->texture     = MTL_DEVICE->newTexture(mtl_img_desc.get());\n  image->usage_flags = info.usage_hint;\n  image->format      = info.format;\n  return ngfi::move(image);\n}\n\nngfi::maybe_ngfptr<ngf_sampler_t> ngf_sampler_t::make(const ngf_sampler_info& info) NGF_NOEXCEPT {\n  ngf_id<MTL::SamplerDescriptor> sampler_desc = id_default;\n  auto s = get_mtl_address_mode(info.wrap_u), t = get_mtl_address_mode(info.wrap_v),\n       r = get_mtl_address_mode(info.wrap_w);\n  if (!(s && t && r)) { return NGF_ERROR_INVALID_ENUM; }\n  sampler_desc->setSAddressMode(s.value());\n  sampler_desc->setTAddressMode(t.value());\n  sampler_desc->setRAddressMode(r.value());\n  sampler_desc->setMinFilter(get_mtl_minmag_filter(info.min_filter));\n  sampler_desc->setMagFilter(get_mtl_minmag_filter(info.mag_filter));\n  sampler_desc->setMipFilter(get_mtl_mip_filter(info.mip_filter));\n  sampler_desc->setMaxAnisotropy(info.enable_anisotropy ? (NS::UInteger)info.max_anisotropy : 1);\n  sampler_desc->setLodMinClamp(info.lod_min);\n  sampler_desc->setLodMaxClamp(info.lod_max);\n  if (info.compare_op != NGF_COMPARE_OP_NEVER) {\n    sampler_desc->setCompareFunction(get_mtl_compare_function(info.compare_op));\n  }\n\n  auto sampler     = ngfi::unique_ptr<ngf_sampler_t>::make();\n  sampler->sampler = CURRENT_CONTEXT->device->newSamplerState(sampler_desc.get());\n  return ngfi::move(sampler);\n}\n\nngfi::maybe_ngfptr<ngf_render_target_t>\nngf_render_target_t::make(const ngf_render_target_info& info) NGF_NOEXCEPT {\n  return ngf_render_target_t::make(\n      *info.attachment_descriptions,\n      info.attachment_image_refs,\n      (uint32_t)info.attachment_image_refs[0].image->texture->width(),\n      (uint32_t)info.attachment_image_refs[0].image->texture->height());\n}\n\nngfi::maybe_ngfptr<ngf_render_target_t> ngf_render_target_t::make(\n    const ngf_attachment_descriptions& attachment_descs,\n    const ngf_image_ref*               img_refs,\n    uint32_t                           rt_width,\n    uint32_t                           rt_height) NGF_NOEXCEPT {\n  auto rt    = ngfi::unique_ptr<ngf_render_target_t>::make();\n  rt->width  = rt_width;\n  rt->height = rt_height;\n\n  ngf_attachment_description* attachment_descs_copy =\n      NGFI_ALLOCN(ngf_attachment_description, attachment_descs.ndescs);\n  rt->attachment_descs.descs = attachment_descs_copy;\n  if (!rt->attachment_descs.descs) { return NGF_ERROR_OUT_OF_MEM; }\n  rt->attachment_descs.ndescs = attachment_descs.ndescs;\n  for (uint32_t i = 0; i < rt->attachment_descs.ndescs; ++i) {\n    if (attachment_descs.descs[i].is_resolve) {\n      ++rt->nresolve_attachments;\n    } else {\n      ++rt->nrender_attachments;\n    }\n    attachment_descs_copy[i] = attachment_descs.descs[i];\n  }\n\n  if (img_refs) {\n    rt->render_image_refs = ngfi::fixed_array<ngf_image_ref> {rt->nrender_attachments};\n    if (rt->nresolve_attachments > 0u) {\n      rt->resolve_image_refs = ngfi::fixed_array<ngf_image_ref> {rt->nresolve_attachments};\n    }\n\n    uint32_t image_ref_idx         = 0u;\n    uint32_t resolve_image_ref_idx = 0u;\n    for (uint32_t i = 0; i < rt->attachment_descs.ndescs; ++i) {\n      if (!rt->attachment_descs.descs[i].is_resolve) {\n        rt->render_image_refs[image_ref_idx++] = img_refs[i];\n      } else if (rt->nresolve_attachments > 0u) {\n        rt->resolve_image_refs[resolve_image_ref_idx++] = img_refs[i];\n      } else {\n        assert(0);\n      }\n    }\n  }\n  return ngfi::move(rt);\n}\n\nngfi::maybe_ngfptr<ngf_context_t> ngf_context_t::make(const ngf_context_info& info) NGF_NOEXCEPT {\n  auto ctx = ngfi::unique_ptr<ngf_context_t>::make();\n  if (!ctx) { return NGF_ERROR_OUT_OF_MEM; }\n\n  ctx->device = MTL_DEVICE;\n  ctx->queue  = ctx->device->newCommandQueue();\n\n  if (info.swapchain_info) {\n    ctx->swapchain_info = *(info.swapchain_info);\n    ngf_error err       = ctx->swapchain.initialize(ctx->swapchain_info, ctx->device.get());\n    if (err != NGF_ERROR_OK) return err;\n    ngf_attachment_descriptions attachment_descs;\n    ngf_attachment_description  desc_array[3];\n    attachment_descs.descs     = desc_array;\n    attachment_descs.ndescs    = 1;\n    desc_array[0].format       = ctx->swapchain_info.color_format;\n    desc_array[0].type         = NGF_ATTACHMENT_COLOR;\n    desc_array[0].sample_count = ctx->swapchain_info.sample_count;\n    desc_array[0].is_resolve   = false;\n    if (ctx->swapchain_info.depth_format != NGF_IMAGE_FORMAT_UNDEFINED) {\n      attachment_descs.ndescs++;\n      desc_array[1].format = ctx->swapchain_info.depth_format;\n      desc_array[1].type   = ctx->swapchain_info.depth_format == NGF_IMAGE_FORMAT_DEPTH24_STENCIL8\n                                 ? NGF_ATTACHMENT_DEPTH_STENCIL\n                                 : NGF_ATTACHMENT_DEPTH;\n      desc_array[1].sample_count = ctx->swapchain_info.sample_count;\n      desc_array[1].is_resolve   = false;\n    }\n\n    auto maybe_default_rt = ngf_render_target_t::make(\n        attachment_descs,\n        nullptr,\n        info.swapchain_info->width,\n        info.swapchain_info->height);\n    if (maybe_default_rt.has_error()) { return maybe_default_rt.error(); }\n    ctx->default_rt             = maybe_default_rt.value().release();\n    ctx->default_rt->is_default = true;\n  }\n  ctx->frame_sync_sem = dispatch_semaphore_create(ctx->swapchain_info.capacity_hint);\n  return ngfi::move(ctx);\n}\n\nngfi::maybe_ngfptr<ngf_shader_stage_t>\nngf_shader_stage_t::make(const ngf_shader_stage_info& info) NGF_NOEXCEPT {\n  auto stage = ngfi::unique_ptr<ngf_shader_stage_t>::make();\n  if (!stage) { return NGF_ERROR_OUT_OF_MEM; }\n\n  stage->type        = info.type;\n  stage->source_code = ngfi::fixed_array {(const char*)info.content, info.content_length};\n\n  // Create a MTLLibrary for this stage.\n  ngf_id<NS::String> source = NS::String::alloc()->init(\n      (void*)info.content,\n      info.content_length,\n      NS::UTF8StringEncoding,\n      false);\n  ngf_id<MTL::CompileOptions> opts = id_default;\n  NS::Error*                  err  = nullptr;\n  stage->func_lib = CURRENT_CONTEXT->device->newLibrary(source.get(), opts.get(), &err);\n  if (!stage->func_lib) {\n    NGFI_DIAG_ERROR(err->localizedDescription()->utf8String());\n    return NGF_ERROR_OBJECT_CREATION_FAILED;\n  }\n\n  // Set debug name.\n  if (info.debug_name != nullptr) {\n    stage->func_lib->setLabel(\n        ngf_id<NS::String>(NS::String::alloc()->init(info.debug_name, NS::UTF8StringEncoding))\n            .get());\n  }\n  stage->entry_point_name =\n      ngfi::fixed_array {info.entry_point_name, strlen(info.entry_point_name) + 1};\n  return ngfi::move(stage);\n}\n\nngfi::array<ngf_device, ngfi::system_alloc_callbacks> NGFMTL_DEVICES_LIST;\nconst NS::Array*                                      NGFMTL_MTL_DEVICES;\n\n#pragma mark ngf_function_implementations\n\nngf_error ngf_get_device_list(const ngf_device** devices, uint32_t* ndevices) NGF_NOEXCEPT {\n  if (NGFMTL_DEVICES_LIST.empty()) {\n#if TARGET_OS_OSX\n    NGFMTL_MTL_DEVICES = MTL::CopyAllDevices();\n    NGFMTL_DEVICES_LIST.resize(NGFMTL_MTL_DEVICES->count());\n    for (uint32_t d = 0u; d < NGFMTL_MTL_DEVICES->count(); ++d) {\n      ngfmtl_populate_ngf_device(\n          d,\n          NGFMTL_DEVICES_LIST[d],\n          static_cast<MTL::Device*>(NGFMTL_MTL_DEVICES->object(d)));\n    }\n#else\n    NGFMTL_MTL_DEVICES = NS::Array::array(MTLCreateSystemDefaultDevice());\n    NGFMTL_DEVICES_LIST.resize(1);\n    ngfmtl_populate_ngf_device(\n        0,\n        NGFMTL_DEVICES_LIST[0],\n        (MTL::Device*)NGFMTL_MTL_DEVICES->object(0));\n#endif\n  }\n  if (devices) { *devices = NGFMTL_DEVICES_LIST.data(); }\n  if (ndevices) { *ndevices = (uint32_t)NGFMTL_DEVICES_LIST.size(); }\n  return NGF_ERROR_OK;\n}\n\nngf_error ngf_initialize(const ngf_init_info* init_info) NGF_NOEXCEPT {\n  if (MTL_DEVICE != nullptr || init_info->device >= NGFMTL_DEVICES_LIST.size()) {\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n  if (init_info->diag_info != NULL) {\n    ngfi_diag_info = *init_info->diag_info;\n  } else {\n    ngfi_diag_info.callback  = NULL;\n    ngfi_diag_info.userdata  = NULL;\n    ngfi_diag_info.verbosity = NGF_DIAGNOSTICS_VERBOSITY_DEFAULT;\n  }\n  ngfi_set_allocation_callbacks(init_info->allocation_callbacks);\n\n  MTL_DEVICE = static_cast<MTL::Device*>(NGFMTL_MTL_DEVICES->object(init_info->device));\n\n  // Initialize device capabilities.\n  DEVICE_CAPS = NGFMTL_DEVICES_LIST[init_info->device].capabilities;\n\n  return (MTL_DEVICE != nullptr) ? NGF_ERROR_OK : NGF_ERROR_INVALID_OPERATION;\n}\n\nvoid ngf_shutdown() NGF_NOEXCEPT {\n  NGFI_DIAG_INFO(\"Shutting down nicegraf.\");\n}\n\nconst ngf_device_capabilities* ngf_get_device_capabilities() NGF_NOEXCEPT {\n  return &DEVICE_CAPS;\n}\n\nextern \"C\" {\nvoid* objc_autoreleasePoolPush(void);\nvoid  objc_autoreleasePoolPop(void* pool);\n}\n\nngf_error ngf_begin_frame(ngf_frame_token* token) NGF_NOEXCEPT {\n  *token = (uintptr_t)objc_autoreleasePoolPush();\n  dispatch_semaphore_wait(CURRENT_CONTEXT->frame_sync_sem, DISPATCH_TIME_FOREVER);\n  CURRENT_CONTEXT->frame = CURRENT_CONTEXT->swapchain.next_frame();\n  if (CURRENT_CONTEXT->frame.color_drawable &&\n      CURRENT_CONTEXT->swapchain.compute_access_enabled()) {\n    CURRENT_CONTEXT->frame.img_wrapper.texture =\n        CURRENT_CONTEXT->frame.color_drawable->texture()->newTextureView(\n            CURRENT_CONTEXT->swapchain.get_pixel_format());\n  }\n  return (!CURRENT_CONTEXT->frame.color_drawable) ? NGF_ERROR_INVALID_OPERATION : NGF_ERROR_OK;\n}\n\nngf_error ngf_end_frame(ngf_frame_token token) NGF_NOEXCEPT {\n  ngf_context ctx = CURRENT_CONTEXT;\n  if (CURRENT_CONTEXT->frame.color_drawable && CURRENT_CONTEXT->pending_cmd_buffer) {\n    CURRENT_CONTEXT->pending_cmd_buffer->addCompletedHandler(\n        [ctx](MTL::CommandBuffer*) { dispatch_semaphore_signal(ctx->frame_sync_sem); });\n    CURRENT_CONTEXT->pending_cmd_buffer->presentDrawable(CURRENT_CONTEXT->frame.color_drawable);\n    CURRENT_CONTEXT->last_cmd_buffer =\n        ngf_id<MTL::CommandBuffer>::add_retain(CURRENT_CONTEXT->pending_cmd_buffer);\n    CURRENT_CONTEXT->pending_cmd_buffer->commit();\n    CURRENT_CONTEXT->pending_cmd_buffer = nullptr;\n    CURRENT_CONTEXT->frame              = ngfmtl_swapchain::frame {};\n  } else {\n    dispatch_semaphore_signal(ctx->frame_sync_sem);\n  }\n  objc_autoreleasePoolPop((void*)token);\n  return NGF_ERROR_OK;\n}\n\nngf_error ngf_get_current_swapchain_image(ngf_frame_token token, ngf_image* result) NGF_NOEXCEPT {\n  assert(CURRENT_CONTEXT);\n  *result = &CURRENT_CONTEXT->frame.img_wrapper;\n  return NGF_ERROR_OK;\n}\n\nngf_render_target ngf_default_render_target() NGF_NOEXCEPT {\n  return CURRENT_CONTEXT->default_rt;\n}\n\nconst ngf_attachment_descriptions* ngf_default_render_target_attachment_descs() NGF_NOEXCEPT {\n  return &CURRENT_CONTEXT->default_rt->attachment_descs;\n}\n\nngf_error\nngf_resize_context(ngf_context ctx, uint32_t new_width, uint32_t new_height) NGF_NOEXCEPT {\n  assert(ctx);\n  ctx->swapchain_info.width  = new_width;\n  ctx->swapchain_info.height = new_height;\n  ctx->default_rt->width     = new_width;\n  ctx->default_rt->height    = new_height;\n  return ctx->swapchain.resize(ctx->swapchain_info);\n}\n\nngf_error ngf_set_context(ngf_context ctx) NGF_NOEXCEPT {\n  CURRENT_CONTEXT = ctx;\n  ctx->is_current = true;\n  return NGF_ERROR_OK;\n}\n\nngf_context ngf_get_context() NGF_NOEXCEPT {\n  return CURRENT_CONTEXT;\n}\n\nvoid ngfmtl_attachment_set_common(\n    MTL::RenderPassAttachmentDescriptor* attachment,\n    uint32_t                             render_image_idx,\n    ngf_attachment_type                  type,\n    const ngf_render_target              rt,\n    ngf_attachment_load_op               load_op,\n    ngf_attachment_store_op              store_op) NGF_NOEXCEPT {\n  if (!rt->is_default) {\n    attachment->setTexture(rt->render_image_refs[render_image_idx].image->texture.get());\n    attachment->setLevel(rt->render_image_refs[render_image_idx].mip_level);\n    attachment->setSlice(rt->render_image_refs[render_image_idx].layer);\n  } else {\n    attachment->setTexture(\n        type == NGF_ATTACHMENT_COLOR ? CURRENT_CONTEXT->frame.color_attachment_texture()\n                                     : CURRENT_CONTEXT->frame.depth_attachment_texture());\n    attachment->setLevel(0);\n    attachment->setSlice(0);\n  }\n  attachment->setLoadAction(get_mtl_load_action(load_op));\n  attachment->setStoreAction(get_mtl_store_action(store_op));\n}\n\nuint8_t* ngf_map_buffer(MTL::Buffer* buffer, size_t offset, [[maybe_unused]] size_t size) {\n  return (uint8_t*)buffer->contents() + offset;\n}\n\nvoid* ngf_buffer_map_range(ngf_buffer buf, size_t offset, size_t size) NGF_NOEXCEPT {\n  buf->mapped_offset = offset;\n  return (void*)ngf_map_buffer(buf->mtl_buffer.get(), offset, size);\n}\n\nvoid ngf_buffer_flush_range(\n    [[maybe_unused]] ngf_buffer buf,\n    [[maybe_unused]] size_t     offset,\n    [[maybe_unused]] size_t     size) NGF_NOEXCEPT {\n}\n\nvoid ngf_buffer_unmap(ngf_buffer) NGF_NOEXCEPT {\n}\n\nngf_error ngf_start_cmd_buffer(ngf_cmd_buffer cmd_buffer, ngf_frame_token) NGF_NOEXCEPT {\n  assert(cmd_buffer);\n  cmd_buffer->mtl_cmd_buffer = CURRENT_CONTEXT->queue->commandBuffer();\n  assert(!cmd_buffer->active_rce);\n  assert(!cmd_buffer->active_bce);\n  NGFI_TRANSITION_CMD_BUF(cmd_buffer, ngfi::CMD_BUFFER_STATE_READY);\n  return NGF_ERROR_OK;\n}\n\nngf_error ngf_submit_cmd_buffers(uint32_t n, ngf_cmd_buffer* cmd_buffers) NGF_NOEXCEPT {\n  if (CURRENT_CONTEXT->pending_cmd_buffer) {\n    CURRENT_CONTEXT->pending_cmd_buffer->commit();\n    CURRENT_CONTEXT->pending_cmd_buffer = nullptr;\n  }\n  for (uint32_t b = 0u; b < n; ++b) {\n    NGFI_TRANSITION_CMD_BUF(cmd_buffers[b], ngfi::CMD_BUFFER_STATE_PENDING);\n    if (b < n - 1u) {\n      cmd_buffers[b]->mtl_cmd_buffer->commit();\n    } else {\n      CURRENT_CONTEXT->pending_cmd_buffer = cmd_buffers[b]->mtl_cmd_buffer;\n    }\n    cmd_buffers[b]->mtl_cmd_buffer = nullptr;\n    NGFI_TRANSITION_CMD_BUF(cmd_buffers[b], ngfi::CMD_BUFFER_STATE_SUBMITTED);\n  }\n  return NGF_ERROR_OK;\n}\n\nvoid ngfmtl_finish_pending_encoders(ngf_cmd_buffer cmd_buffer) {\n  /* End any current Metal encoders.*/\n  if (cmd_buffer->active_rce) {\n    cmd_buffer->active_rce->endEncoding();\n    cmd_buffer->active_rce = nullptr;\n  } else if (cmd_buffer->active_bce) {\n    cmd_buffer->active_bce->endEncoding();\n    cmd_buffer->active_bce = nullptr;\n  } else if (cmd_buffer->active_cce) {\n    cmd_buffer->active_cce->endEncoding();\n    cmd_buffer->active_cce = nullptr;\n  }\n}\n\nngf_error ngf_cmd_begin_render_pass_simple(\n    ngf_cmd_buffer      cmd_buf,\n    ngf_render_target   rt,\n    float               clear_color_r,\n    float               clear_color_g,\n    float               clear_color_b,\n    float               clear_color_a,\n    float               clear_depth,\n    uint32_t            clear_stencil,\n    ngf_render_encoder* enc) NGF_NOEXCEPT {\n  ngfi::tmp_arena().reset();\n  const uint32_t nattachments = rt->attachment_descs.ndescs;\n  auto           load_ops     = ngfi::tmp_alloc<ngf_attachment_load_op>(nattachments);\n  auto           store_ops    = ngfi::tmp_alloc<ngf_attachment_store_op>(nattachments);\n  auto           clears       = ngfi::tmp_alloc<ngf_clear>(nattachments);\n\n  for (size_t i = 0u; i < nattachments; ++i) {\n    load_ops[i] = NGF_LOAD_OP_CLEAR;\n    if (rt->attachment_descs.descs[i].type == NGF_ATTACHMENT_COLOR) {\n      clears[i].clear_color[0] = clear_color_r;\n      clears[i].clear_color[1] = clear_color_g;\n      clears[i].clear_color[2] = clear_color_b;\n      clears[i].clear_color[3] = clear_color_a;\n    } else if (rt->attachment_descs.descs[i].type == NGF_ATTACHMENT_DEPTH ||\n               rt->attachment_descs.descs[i].type == NGF_ATTACHMENT_DEPTH_STENCIL) {\n      clears[i].clear_depth_stencil.clear_depth   = clear_depth;\n      clears[i].clear_depth_stencil.clear_stencil = clear_stencil;\n    } else {\n      assert(false);\n    }\n    const bool needs_resolve = rt->attachment_descs.descs[i].type == NGF_ATTACHMENT_COLOR &&\n                               rt->attachment_descs.descs[i].sample_count > NGF_SAMPLE_COUNT_1 &&\n                               (rt->resolve_image_refs.data() || rt->is_default);\n    store_ops[i] = (needs_resolve) ? NGF_STORE_OP_RESOLVE : NGF_STORE_OP_STORE;\n  }\n  const ngf_render_pass_info pass_info =\n      {.render_target = rt, .load_ops = load_ops, .store_ops = store_ops, .clears = clears};\n  return ngf_cmd_begin_render_pass(cmd_buf, &pass_info, enc);\n}\n\nngf_error ngf_cmd_begin_render_pass(\n    ngf_cmd_buffer              cmd_buffer,\n    const ngf_render_pass_info* pass_info,\n    ngf_render_encoder*         enc) NGF_NOEXCEPT {\n  NGFI_TRANSITION_CMD_BUF(cmd_buffer, ngfi::CMD_BUFFER_STATE_RECORDING);\n  assert(pass_info);\n  const ngf_render_target rt = pass_info->render_target;\n  assert(rt);\n  assert(cmd_buffer);\n\n  ngfmtl_finish_pending_encoders(cmd_buffer);\n  cmd_buffer->renderpass_active = true;\n\n  uint32_t                          color_attachment_idx   = 0u;\n  uint32_t                          resolve_attachment_idx = 0u;\n  uint32_t                          render_image_idx       = 0u;\n  ngf_id<MTL::RenderPassDescriptor> pass_descriptor        = id_default;\n  pass_descriptor->setRenderTargetWidth(rt->width);\n  pass_descriptor->setRenderTargetHeight(rt->height);\n  pass_descriptor->setDepthAttachment(nullptr);\n  pass_descriptor->setStencilAttachment(nullptr);\n\n  if (cmd_buffer->sample_buf_attachment_for_next_render_pass) {\n    const auto& attachment_descriptor = cmd_buffer->sample_buf_attachment_for_next_render_pass;\n    const auto  attachment            = pass_descriptor->sampleBufferAttachments()->object(0);\n\n    attachment->setSampleBuffer(attachment_descriptor->sampleBuffer());\n\n    if (attachment_descriptor->startOfVertexSampleIndex() <\n        attachment_descriptor->endOfVertexSampleIndex()) {\n      attachment->setStartOfVertexSampleIndex(attachment_descriptor->startOfVertexSampleIndex());\n      attachment->setEndOfVertexSampleIndex(attachment_descriptor->endOfVertexSampleIndex());\n    }\n\n    if (attachment_descriptor->startOfFragmentSampleIndex() <\n        attachment_descriptor->endOfFragmentSampleIndex()) {\n      attachment->setStartOfFragmentSampleIndex(\n          attachment_descriptor->startOfFragmentSampleIndex());\n      attachment->setEndOfFragmentSampleIndex(attachment_descriptor->endOfFragmentSampleIndex());\n    }\n\n    cmd_buffer->sample_buf_attachment_for_next_render_pass = nullptr;\n  }\n\n  for (uint32_t i = 0u; i < rt->attachment_descs.ndescs; ++i) {\n    const ngf_attachment_description& attachment_desc = rt->attachment_descs.descs[i];\n    if (attachment_desc.is_resolve) { continue; }\n    const ngf_attachment_load_op  load_op  = pass_info->load_ops[i];\n    const ngf_attachment_store_op store_op = pass_info->store_ops[i];\n    const ngf_clear_info*         clear_info =\n        load_op == NGF_LOAD_OP_CLEAR && pass_info->clears ? &pass_info->clears[i] : nullptr;\n    switch (attachment_desc.type) {\n    case NGF_ATTACHMENT_COLOR: {\n      ngf_id<MTL::RenderPassColorAttachmentDescriptor> mtl_desc = id_default;\n      ngfmtl_attachment_set_common(\n          mtl_desc.get(),\n          render_image_idx++,\n          attachment_desc.type,\n          rt,\n          load_op,\n          store_op);\n      if (clear_info) {\n        mtl_desc->setClearColor(MTL::ClearColor::Make(\n            clear_info->clear_color[0],\n            clear_info->clear_color[1],\n            clear_info->clear_color[2],\n            clear_info->clear_color[3]));\n      }\n\n      if (attachment_desc.sample_count > NGF_SAMPLE_COUNT_1) {\n        if (rt->is_default) {\n          mtl_desc->setResolveTexture(CURRENT_CONTEXT->frame.resolve_attachment_texture());\n        } else if (rt->resolve_image_refs.data()) {\n          mtl_desc->setResolveTexture(\n              rt->resolve_image_refs[resolve_attachment_idx++].image->texture.get());\n        }\n      }\n\n      pass_descriptor->colorAttachments()->setObject(mtl_desc.get(), color_attachment_idx++);\n      break;\n    }\n    case NGF_ATTACHMENT_DEPTH: {\n      ngf_id<MTL::RenderPassDepthAttachmentDescriptor> mtl_desc = id_default;\n      ngfmtl_attachment_set_common(\n          mtl_desc.get(),\n          render_image_idx++,\n          attachment_desc.type,\n          rt,\n          load_op,\n          store_op);\n      if (clear_info) { mtl_desc->setClearDepth(clear_info->clear_depth_stencil.clear_depth); }\n      pass_descriptor->setDepthAttachment(mtl_desc.get());\n      break;\n    }\n    case NGF_ATTACHMENT_DEPTH_STENCIL: {\n      const uint32_t ds_image_idx = render_image_idx++;\n      ngf_id<MTL::RenderPassDepthAttachmentDescriptor> mtl_depth_desc = id_default;\n      ngfmtl_attachment_set_common(\n          mtl_depth_desc.get(),\n          ds_image_idx,\n          attachment_desc.type,\n          rt,\n          load_op,\n          store_op);\n      if (clear_info) {\n        mtl_depth_desc->setClearDepth(clear_info->clear_depth_stencil.clear_depth);\n      }\n      pass_descriptor->setDepthAttachment(mtl_depth_desc.get());\n      ngf_id<MTL::RenderPassStencilAttachmentDescriptor> mtl_stencil_desc = id_default;\n      ngfmtl_attachment_set_common(\n          mtl_stencil_desc.get(),\n          ds_image_idx,\n          attachment_desc.type,\n          rt,\n          load_op,\n          store_op);\n      if (clear_info) {\n        mtl_stencil_desc->setClearStencil(clear_info->clear_depth_stencil.clear_stencil);\n      }\n      pass_descriptor->setStencilAttachment(mtl_stencil_desc.get());\n      break;\n    }\n    }\n  }\n\n  assert(!cmd_buffer->active_rce);\n  cmd_buffer->active_rce = cmd_buffer->mtl_cmd_buffer->renderCommandEncoder(pass_descriptor.get());\n  cmd_buffer->active_rt  = rt;\n\n  enc->pvt_data_donotuse.d0 = (uintptr_t)cmd_buffer;\n  return NGF_ERROR_OK;\n}\n\nngf_error ngf_cmd_end_render_pass(ngf_render_encoder enc) NGF_NOEXCEPT {\n  auto cmd_buffer = NGFMTL_ENC2CMDBUF(enc);\n  if (cmd_buffer->active_rce) {\n    cmd_buffer->active_rce->endEncoding();\n    cmd_buffer->active_rce      = nullptr;\n    cmd_buffer->active_gfx_pipe = nullptr;\n  }\n  cmd_buffer->renderpass_active = false;\n  cmd_buffer->active_rt         = nullptr;\n  NGFI_TRANSITION_CMD_BUF(cmd_buffer, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT);\n\n  return NGF_ERROR_OK;\n}\n\nngf_error\nngf_cmd_begin_xfer_pass(ngf_cmd_buffer cmd_buf, const ngf_xfer_pass_info*, ngf_xfer_encoder* enc)\n    NGF_NOEXCEPT {\n  NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_RECORDING);\n  ngfmtl_finish_pending_encoders(cmd_buf);\n  cmd_buf->xfer_pass_active = true;\n  enc->pvt_data_donotuse.d0 = (uintptr_t)cmd_buf;\n  cmd_buf->active_bce       = cmd_buf->mtl_cmd_buffer->blitCommandEncoder();\n  return NGF_ERROR_OK;\n}\n\nngf_error ngf_cmd_end_xfer_pass(ngf_xfer_encoder enc) NGF_NOEXCEPT {\n  auto cmd_buf              = NGFMTL_ENC2CMDBUF(enc);\n  cmd_buf->xfer_pass_active = false;\n  NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT);\n  if (cmd_buf->active_bce) {\n    cmd_buf->active_bce->endEncoding();\n    cmd_buf->active_bce = nullptr;\n  }\n  return NGF_ERROR_OK;\n}\n\nngf_error ngf_cmd_begin_compute_pass(\n    ngf_cmd_buffer               cmd_buffer,\n    const ngf_compute_pass_info* pass_info,\n    ngf_compute_encoder*         enc) NGF_NOEXCEPT {\n  NGFI_TRANSITION_CMD_BUF(cmd_buffer, ngfi::CMD_BUFFER_STATE_RECORDING);\n  cmd_buffer->compute_pass_active                    = true;\n  ngf_id<MTL::ComputePassDescriptor> pass_descriptor = id_default;\n\n  if (cmd_buffer->sample_buf_attachment_for_next_compute_pass) {\n    const auto& attachment_descriptor = cmd_buffer->sample_buf_attachment_for_next_compute_pass;\n    const auto  attachment            = pass_descriptor->sampleBufferAttachments()->object(0);\n\n    attachment->setSampleBuffer(attachment_descriptor->sampleBuffer());\n\n    assert(\n        attachment_descriptor->startOfEncoderSampleIndex() <\n        attachment_descriptor->endOfEncoderSampleIndex());\n    attachment->setStartOfEncoderSampleIndex(attachment_descriptor->startOfEncoderSampleIndex());\n    attachment->setEndOfEncoderSampleIndex(attachment_descriptor->endOfEncoderSampleIndex());\n\n    cmd_buffer->sample_buf_attachment_for_next_compute_pass = nullptr;\n  }\n\n  enc->pvt_data_donotuse.d0 = (uintptr_t)cmd_buffer;\n  cmd_buffer->active_cce = cmd_buffer->mtl_cmd_buffer->computeCommandEncoder(pass_descriptor.get());\n\n  return NGF_ERROR_OK;\n}\n\nngf_error ngf_cmd_end_compute_pass(ngf_compute_encoder enc) NGF_NOEXCEPT {\n  auto cmd_buf = NGFMTL_ENC2CMDBUF(enc);\n  assert(cmd_buf);\n  cmd_buf->compute_pass_active = false;\n  NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT);\n  if (cmd_buf->active_cce) {\n    cmd_buf->active_cce->endEncoding();\n    cmd_buf->active_cce          = nullptr;\n    cmd_buf->active_compute_pipe = nullptr;\n  }\n  return NGF_ERROR_OK;\n}\n\nstatic void ngfmtl_apply_set_bytes_gfx(ngf_cmd_buffer cmd_buf) {\n  if (cmd_buf->pending_pc_size == 0u || !cmd_buf->active_rce || !cmd_buf->active_gfx_pipe) return;\n  const uint32_t slot = cmd_buf->active_gfx_pipe->niceshade_metadata.push_const_native_binding;\n  if (slot == ~0u) return;\n  cmd_buf->active_rce->setVertexBytes(cmd_buf->pending_pc_data, cmd_buf->pending_pc_size, slot);\n  cmd_buf->active_rce->setFragmentBytes(cmd_buf->pending_pc_data, cmd_buf->pending_pc_size, slot);\n}\n\nstatic void ngfmtl_apply_set_bytes_compute(ngf_cmd_buffer cmd_buf) {\n  if (cmd_buf->pending_pc_size == 0u || !cmd_buf->active_cce || !cmd_buf->active_compute_pipe) return;\n  const uint32_t slot =\n      cmd_buf->active_compute_pipe->niceshade_metadata.push_const_native_binding;\n  if (slot == ~0u) return;\n  cmd_buf->active_cce->setBytes(cmd_buf->pending_pc_data, cmd_buf->pending_pc_size, slot);\n}\n\nvoid ngf_cmd_bind_compute_pipeline(ngf_compute_encoder enc, ngf_compute_pipeline pipeline)\n    NGF_NOEXCEPT {\n  auto cmd_buf = NGFMTL_ENC2CMDBUF(enc);\n  assert(cmd_buf);\n  assert(cmd_buf->active_cce);\n  if (!cmd_buf->active_cce) {\n    NGFI_DIAG_ERROR(\"Attempt to bind compute pipeline without an active compute encoder\");\n    return;\n  }\n  cmd_buf->active_cce->setComputePipelineState(pipeline->pipeline.get());\n  cmd_buf->active_compute_pipe = pipeline;\n  ngfmtl_apply_set_bytes_compute(cmd_buf);\n}\n\nvoid ngf_cmd_dispatch(\n    ngf_compute_encoder enc,\n    uint32_t            x_threadgroups,\n    uint32_t            y_threadgroups,\n    uint32_t            z_threadgroups) NGF_NOEXCEPT {\n  auto cmd_buf = NGFMTL_ENC2CMDBUF(enc);\n  assert(cmd_buf->active_cce);\n  if (!cmd_buf->active_cce) {\n    NGFI_DIAG_ERROR(\"Attempt to perform a compute dispatch without an active \"\n                    \"compute encoder.\");\n    return;\n  }\n  assert(cmd_buf->active_compute_pipe);\n  if (!cmd_buf->active_compute_pipe) {\n    NGFI_DIAG_ERROR(\"Attempt to perform a compute dispatch without a bound \"\n                    \"compute pipeline.\");\n    return;\n  }\n  const uint32_t* threadgroup_size =\n      cmd_buf->active_compute_pipe->niceshade_metadata.threadgroup_size;\n      cmd_buf->active_cce->dispatchThreadgroups(MTL::Size::Make(x_threadgroups, y_threadgroups, z_threadgroups),\n                                                MTL::Size::Make(threadgroup_size[0], threadgroup_size[1], threadgroup_size[2]));\n}\n\nvoid ngf_cmd_bind_gfx_pipeline(ngf_render_encoder enc, const ngf_graphics_pipeline pipeline)\n    NGF_NOEXCEPT {\n  auto buf = NGFMTL_ENC2CMDBUF(enc);\n  buf->active_rce->setRenderPipelineState(pipeline->pipeline.get());\n  buf->active_rce->setCullMode(pipeline->culling);\n  buf->active_rce->setFrontFacingWinding(pipeline->winding);\n\n  buf->active_rce->setBlendColor(\n      pipeline->blend_color[0],\n      pipeline->blend_color[1],\n      pipeline->blend_color[2],\n      pipeline->blend_color[3]);\n  if (pipeline->depth_stencil) {\n    buf->active_rce->setDepthStencilState(pipeline->depth_stencil.get());\n  }\n  buf->active_rce->setStencilReferenceValues(\n      pipeline->front_stencil_reference,\n      pipeline->back_stencil_reference);\n  buf->active_gfx_pipe = pipeline;\n  ngfmtl_apply_set_bytes_gfx(buf);\n}\n\nvoid ngf_cmd_viewport(ngf_render_encoder enc, const ngf_irect2d* r) NGF_NOEXCEPT {\n  auto          buf = NGFMTL_ENC2CMDBUF(enc);\n  MTL::Viewport viewport;\n  viewport.originX = r->x;\n  viewport.originY = r->y + (int32_t)r->height;\n  viewport.width   = r->width;\n  viewport.height  = -1.0 * r->height;\n\n  // TODO: fix\n  viewport.znear = 0.0f;\n  viewport.zfar  = 1.0f;\n\n  buf->active_rce->setViewport(viewport);\n}\n\nvoid ngf_cmd_scissor(ngf_render_encoder enc, const ngf_irect2d* r) NGF_NOEXCEPT {\n  auto             buf = NGFMTL_ENC2CMDBUF(enc);\n  MTL::ScissorRect scissor;\n  scissor.x      = (uint32_t)r->x;\n  scissor.y      = (uint32_t)r->y;\n  scissor.width  = r->width;\n  scissor.height = r->height;\n  buf->active_rce->setScissorRect(scissor);\n}\n\nvoid ngf_cmd_draw(\n    ngf_render_encoder enc,\n    bool               indexed,\n    uint32_t           first_element,\n    uint32_t           nelements,\n    uint32_t           ninstances) NGF_NOEXCEPT {\n  auto               buf       = NGFMTL_ENC2CMDBUF(enc);\n  MTL::PrimitiveType prim_type = buf->active_gfx_pipe->primitive_type;\n  if (!indexed) {\n    buf->active_rce->drawPrimitives(prim_type, first_element, nelements, ninstances, 0);\n  } else {\n    buf->active_rce->drawIndexedPrimitives(\n        prim_type,\n        nelements,\n        buf->bound_index_buffer_type,\n        buf->bound_index_buffer.get(),\n        buf->bound_index_buffer_offset +\n            first_element * (buf->bound_index_buffer_type == MTL::IndexTypeUInt16 ? 2 : 4),\n        ninstances,\n        0,\n        0);\n  }\n}\n\nvoid ngf_cmd_bind_attrib_buffer(\n    ngf_render_encoder enc,\n    const ngf_buffer   buf,\n    uint32_t           binding,\n    size_t             offset) NGF_NOEXCEPT {\n  auto cmd_buf = NGFMTL_ENC2CMDBUF(enc);\n  cmd_buf->active_rce->setVertexBuffer(\n      buf->mtl_buffer.get(),\n      offset,\n      MAX_BUFFER_BINDINGS - binding);\n}\n\nvoid ngf_cmd_bind_index_buffer(\n    ngf_render_encoder enc,\n    const ngf_buffer   buf,\n    size_t             offset,\n    ngf_type           type) NGF_NOEXCEPT {\n  auto cmd_buf                       = NGFMTL_ENC2CMDBUF(enc);\n  cmd_buf->bound_index_buffer        = ngf_id<MTL::Buffer>::add_retain(buf->mtl_buffer.get());\n  cmd_buf->bound_index_buffer_type   = get_mtl_index_type(type);\n  cmd_buf->bound_index_buffer_offset = offset;\n}\n\nvoid ngf_cmd_bind_resources(\n    ngf_render_encoder          enc,\n    const ngf_resource_bind_op* bind_ops,\n    uint32_t                    nbind_ops) NGF_NOEXCEPT {\n  auto cmd_buf = NGFMTL_ENC2CMDBUF(enc);\n  assert(cmd_buf);\n  for (uint32_t o = 0u; o < nbind_ops; ++o) {\n    const ngf_resource_bind_op& bind_op = bind_ops[o];\n    assert(cmd_buf->active_gfx_pipe);\n    if (!cmd_buf->active_gfx_pipe) {\n      NGFI_DIAG_ERROR(\"Attempt to bind resources without a bound graphics pipeline.\");\n      return;\n    }\n    assert(cmd_buf->active_rce);\n    if (!cmd_buf->active_rce) {\n      NGFI_DIAG_ERROR(\"Attempt to bind resources without an active render \"\n                      \"command encoder.\");\n      return;\n    }\n    const uint32_t native_binding =\n        cmd_buf->active_gfx_pipe->niceshade_metadata\n            .native_binding_map[bind_op.target_set][bind_op.target_binding] +\n        bind_op.array_index;\n    if (native_binding == ~0) {\n      NGFI_DIAG_ERROR(\n          \"Failed to  find  native binding for set %d binding %d\",\n          bind_op.target_set,\n          bind_op.target_binding);\n      continue;\n    }\n    switch (bind_op.type) {\n    case NGF_DESCRIPTOR_TEXEL_BUFFER: {\n      cmd_buf->active_rce->setVertexTexture(\n          bind_op.info.texel_buffer_view->mtl_buffer_view.get(),\n          native_binding);\n      cmd_buf->active_rce->setFragmentTexture(\n          bind_op.info.texel_buffer_view->mtl_buffer_view.get(),\n          native_binding);\n      break;\n    }\n    case NGF_DESCRIPTOR_STORAGE_BUFFER:\n    case NGF_DESCRIPTOR_UNIFORM_BUFFER: {\n      const ngf_buffer_bind_info& buf_bind_op = bind_op.info.buffer;\n      const ngf_buffer            buf         = buf_bind_op.buffer;\n      size_t                      offset      = buf_bind_op.offset;\n      cmd_buf->active_rce->setVertexBuffer(buf->mtl_buffer.get(), offset, native_binding);\n      cmd_buf->active_rce->setFragmentBuffer(buf->mtl_buffer.get(), offset, native_binding);\n      break;\n    }\n    case NGF_DESCRIPTOR_IMAGE_AND_SAMPLER: {\n      const ngf_image_sampler_bind_info& img_bind_op = bind_op.info.image_sampler;\n      MTL::Texture* t = img_bind_op.is_image_view ? img_bind_op.resource.view->view.get()\n                                                  : img_bind_op.resource.image->texture.get();\n      cmd_buf->active_rce->setVertexTexture(t, native_binding);\n      cmd_buf->active_rce->setVertexSamplerState(\n          img_bind_op.sampler->sampler.get(),\n          native_binding);\n      cmd_buf->active_rce->setFragmentTexture(t, native_binding);\n      cmd_buf->active_rce->setFragmentSamplerState(\n          img_bind_op.sampler->sampler.get(),\n          native_binding);\n      break;\n    }\n    case NGF_DESCRIPTOR_IMAGE: {\n      const ngf_image_sampler_bind_info& img_bind_op = bind_op.info.image_sampler;\n      MTL::Texture* t = img_bind_op.is_image_view ? img_bind_op.resource.view->view.get()\n                                                  : img_bind_op.resource.image->texture.get();\n      cmd_buf->active_rce->setVertexTexture(t, native_binding);\n      cmd_buf->active_rce->setFragmentTexture(t, native_binding);\n      break;\n    }\n    case NGF_DESCRIPTOR_SAMPLER: {\n      const ngf_image_sampler_bind_info& img_bind_op = bind_op.info.image_sampler;\n      cmd_buf->active_rce->setVertexSamplerState(\n          img_bind_op.sampler->sampler.get(),\n          native_binding);\n      cmd_buf->active_rce->setFragmentSamplerState(\n          img_bind_op.sampler->sampler.get(),\n          native_binding);\n      break;\n    }\n    case NGF_DESCRIPTOR_STORAGE_IMAGE:\n      NGFI_DIAG_ERROR(\"Binding storage images to non-compute shader is \"\n                      \"currently unsupported.\");\n      break;\n    case NGF_DESCRIPTOR_ACCELERATION_STRUCTURE:\n      cmd_buf->active_rce->setVertexAccelerationStructure(\n          (MTL::AccelerationStructure*)bind_op.info.acceleration_structure,\n          native_binding);\n      cmd_buf->active_rce->setFragmentAccelerationStructure(\n          (MTL::AccelerationStructure*)bind_op.info.acceleration_structure,\n          native_binding);\n      break;\n    case NGF_DESCRIPTOR_TYPE_COUNT:\n      assert(false);\n    }\n  }\n}\n\nstatic ngfi::value_or_ngferr<ngf_image_format>\nget_regular_format_from_srgb(const ngf_image_format f) {\n  switch (f) {\n  case NGF_IMAGE_FORMAT_SRGB8:\n    return NGF_IMAGE_FORMAT_RGB8;\n  case NGF_IMAGE_FORMAT_SRGBA8:\n    return NGF_IMAGE_FORMAT_RGBA8;\n  case NGF_IMAGE_FORMAT_BGR8_SRGB:\n    return NGF_IMAGE_FORMAT_BGR8;\n  case NGF_IMAGE_FORMAT_BGRA8_SRGB:\n    return NGF_IMAGE_FORMAT_BGRA8;\n  default:\n    return NGF_ERROR_INVALID_ENUM;\n  }\n}\n\nvoid ngf_cmd_bind_compute_resources(\n    ngf_compute_encoder         enc,\n    const ngf_resource_bind_op* bind_ops,\n    uint32_t                    nbind_ops) NGF_NOEXCEPT {\n  auto cmd_buf = NGFMTL_ENC2CMDBUF(enc);\n  assert(cmd_buf);\n  for (uint32_t o = 0u; o < nbind_ops; ++o) {\n    const ngf_resource_bind_op& bind_op = bind_ops[o];\n    assert(cmd_buf->active_compute_pipe);\n    if (!cmd_buf->active_compute_pipe) {\n      NGFI_DIAG_ERROR(\"Attempt to bind resources without a bound compute pipeline.\");\n      return;\n    }\n    assert(cmd_buf->active_cce);\n    if (!cmd_buf->active_cce) {\n      NGFI_DIAG_ERROR(\"Attempt to bind resources without an active compute \"\n                      \"command encoder.\");\n      return;\n    }\n    const uint32_t native_binding =\n        cmd_buf->active_compute_pipe->niceshade_metadata\n            .native_binding_map[bind_op.target_set][bind_op.target_binding] +\n        bind_op.array_index;\n    if (native_binding == ~0) {\n      NGFI_DIAG_ERROR(\n          \"Failed to  find  native binding for set %d binding %d\",\n          bind_op.target_set,\n          bind_op.target_binding);\n      continue;\n    }\n    switch (bind_op.type) {\n    case NGF_DESCRIPTOR_TEXEL_BUFFER: {\n      cmd_buf->active_cce->setTexture(\n          bind_op.info.texel_buffer_view->mtl_buffer_view.get(),\n          native_binding);\n      break;\n    }\n    case NGF_DESCRIPTOR_STORAGE_BUFFER:\n    case NGF_DESCRIPTOR_UNIFORM_BUFFER: {\n      const ngf_buffer_bind_info& buf_bind_op = bind_op.info.buffer;\n      const ngf_buffer            buf         = buf_bind_op.buffer;\n      size_t                      offset      = buf_bind_op.offset;\n      cmd_buf->active_cce->setBuffer(buf->mtl_buffer.get(), offset, native_binding);\n      break;\n    }\n    case NGF_DESCRIPTOR_IMAGE_AND_SAMPLER: {\n      const ngf_image_sampler_bind_info& img_bind_op = bind_op.info.image_sampler;\n      MTL::Texture* t = img_bind_op.is_image_view ? img_bind_op.resource.view->view.get()\n                                                  : img_bind_op.resource.image->texture.get();\n      cmd_buf->active_cce->setTexture(t, native_binding);\n      cmd_buf->active_cce->setSamplerState(img_bind_op.sampler->sampler.get(), native_binding);\n      break;\n    }\n    case NGF_DESCRIPTOR_STORAGE_IMAGE:\n    case NGF_DESCRIPTOR_IMAGE: {\n      const ngf_image_sampler_bind_info& img_bind_op = bind_op.info.image_sampler;\n      if (img_bind_op.is_image_view) {\n        cmd_buf->active_cce->setTexture(img_bind_op.resource.view->view.get(), native_binding);\n      } else {\n        if (const auto maybe_format =\n                get_regular_format_from_srgb(img_bind_op.resource.image->format)) {\n          if (!img_bind_op.resource.image->non_srgb_view)\n            img_bind_op.resource.image->non_srgb_view =\n                img_bind_op.resource.image->texture.get()->newTextureView(\n                    get_mtl_pixel_format(maybe_format.value()).format);\n          cmd_buf->active_cce->setTexture(\n              img_bind_op.resource.image->non_srgb_view.get(),\n              native_binding);\n        } else {\n          cmd_buf->active_cce->setTexture(\n              img_bind_op.resource.image->texture.get(),\n              native_binding);\n        }\n      }\n      break;\n    }\n    case NGF_DESCRIPTOR_SAMPLER: {\n      const ngf_image_sampler_bind_info& img_bind_op = bind_op.info.image_sampler;\n      cmd_buf->active_cce->setSamplerState(img_bind_op.sampler->sampler.get(), native_binding);\n      break;\n    }\n    case NGF_DESCRIPTOR_ACCELERATION_STRUCTURE:\n      cmd_buf->active_cce->setAccelerationStructure(\n          (MTL::AccelerationStructure*)bind_op.info.acceleration_structure,\n          native_binding);\n      break;\n    case NGF_DESCRIPTOR_TYPE_COUNT:\n      assert(false);\n    }\n  }\n}\n\nvoid ngfmtl_cmd_copy_buffer(\n    ngf_xfer_encoder enc,\n    MTL::Buffer*     src,\n    MTL::Buffer*     dst,\n    size_t           size,\n    size_t           src_offset,\n    size_t           dst_offset) {\n  auto buf = NGFMTL_ENC2CMDBUF(enc);\n  assert(buf->active_rce == nullptr);\n  buf->active_bce->copyFromBuffer(src, src_offset, dst, dst_offset, size);\n}\n\nvoid ngf_cmd_copy_buffer(\n    ngf_xfer_encoder enc,\n    const ngf_buffer src,\n    ngf_buffer       dst,\n    size_t           size,\n    size_t           src_offset,\n    size_t           dst_offset) NGF_NOEXCEPT {\n  ngfmtl_cmd_copy_buffer(\n      enc,\n      src->mtl_buffer.get(),\n      dst->mtl_buffer.get(),\n      size,\n      src_offset,\n      dst_offset);\n}\n\nvoid ngf_cmd_write_image(\n    ngf_xfer_encoder       enc,\n    ngf_buffer             src,\n    ngf_image              dst,\n    const ngf_image_write* writes,\n    uint32_t               nwrites) NGF_NOEXCEPT {\n  auto buf = NGFMTL_ENC2CMDBUF(enc);\n  assert(buf->active_rce == nil);\n  for (size_t i = 0u; i < nwrites; ++i) {\n    const ngf_image_write* w = &writes[i];\n    for (uint32_t l = 0u; l < w->nlayers; ++l) {\n      const uint32_t pitch    = ngfmtl_get_pitch(w->extent.width, dst->format);\n      const uint32_t num_rows = ngfmtl_get_num_rows(w->extent.height, dst->format);\n      buf->active_bce->copyFromBuffer(\n          src->mtl_buffer.get(),\n          w->src_offset + (l * pitch * num_rows),\n          pitch,\n          pitch * num_rows,\n          MTL::Size::Make(w->extent.width, w->extent.height, w->extent.depth),\n          dst->texture.get(),\n          w->dst_base_layer + l,\n          w->dst_level,\n          MTL::Origin::Make(\n              (NS::UInteger)w->dst_offset.x,\n              (NS::UInteger)w->dst_offset.y,\n              (NS::UInteger)w->dst_offset.z));\n    }\n  }\n}\n\nvoid ngf_cmd_copy_image_to_buffer(\n    ngf_xfer_encoder    enc,\n    const ngf_image_ref src,\n    ngf_offset3d        src_offset,\n    ngf_extent3d        src_extent,\n    uint32_t            nlayers,\n    ngf_buffer          dst,\n    size_t              dst_offset) NGF_NOEXCEPT {\n  auto buf = NGFMTL_ENC2CMDBUF(enc);\n  assert(buf->active_rce == nullptr);\n  const MTL::TextureType texture_type = src.image->texture->textureType();\n  const bool             is_cubemap =\n      texture_type == MTL::TextureTypeCube || texture_type == MTL::TextureTypeCubeArray;\n  const uint32_t src_slice =\n      (is_cubemap ? 6u : 1u) * src.layer + (is_cubemap ? src.cubemap_face : 0);\n  const uint32_t pitch    = ngfmtl_get_pitch(src_extent.width, src.image->format);\n  const uint32_t num_rows = ngfmtl_get_num_rows(src_extent.height, src.image->format);\n  for (uint32_t l = 0; l < nlayers; ++l) {\n    buf->active_bce->copyFromTexture(\n        src.image->texture.get(),\n        src_slice + l,\n        src.mip_level,\n        MTL::Origin::Make(\n            (NS::UInteger)src_offset.x,\n            (NS::UInteger)src_offset.y,\n            (NS::UInteger)src_offset.z),\n        MTL::Size::Make(src_extent.width, src_extent.height, src_extent.depth),\n        dst->mtl_buffer.get(),\n        dst_offset + (l * pitch * num_rows),\n        pitch,\n        pitch * num_rows);\n  }\n}\n\nngf_error ngf_cmd_generate_mipmaps(ngf_xfer_encoder xfenc, ngf_image img) NGF_NOEXCEPT {\n  if (!(img->usage_flags & NGF_IMAGE_USAGE_MIPMAP_GENERATION)) {\n    NGFI_DIAG_ERROR(\"mipmap generation was requested for an image that was created \"\n                    \"without the NGF_IMAGE_USAGE_MIPMAP_GENERATION flag\");\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n  auto buf = NGFMTL_ENC2CMDBUF(xfenc);\n  assert(buf->active_rce == nullptr);\n  buf->active_bce->generateMipmaps(img->texture.get());\n  return NGF_ERROR_OK;\n}\n\nvoid ngf_cmd_stencil_reference(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT {\n  auto cmd_buf = NGFMTL_ENC2CMDBUF(enc);\n  cmd_buf->active_rce->setStencilReferenceValues(front, back);\n}\n\nvoid ngf_cmd_stencil_compare_mask(ngf_render_encoder enc, uint32_t front, uint32_t back)\n    NGF_NOEXCEPT {\n  auto cmd_buf = NGFMTL_ENC2CMDBUF(enc);\n\n  cmd_buf->active_gfx_pipe->depth_stencil_desc->frontFaceStencil()->setReadMask(front);\n  cmd_buf->active_gfx_pipe->depth_stencil_desc->backFaceStencil()->setReadMask(back);\n  ngf_id<MTL::DepthStencilState> depth_stencil_state =\n      CURRENT_CONTEXT->device->newDepthStencilState(\n          cmd_buf->active_gfx_pipe->depth_stencil_desc.get());\n  cmd_buf->active_rce->setDepthStencilState(depth_stencil_state.get());\n}\n\nvoid ngf_cmd_stencil_write_mask(ngf_render_encoder enc, uint32_t front, uint32_t back)\n    NGF_NOEXCEPT {\n  auto cmd_buf = NGFMTL_ENC2CMDBUF(enc);\n  cmd_buf->active_gfx_pipe->depth_stencil_desc->frontFaceStencil()->setWriteMask(front);\n  cmd_buf->active_gfx_pipe->depth_stencil_desc->backFaceStencil()->setWriteMask(back);\n  ngf_id<MTL::DepthStencilState> depth_stencil_state =\n      CURRENT_CONTEXT->device->newDepthStencilState(\n          cmd_buf->active_gfx_pipe->depth_stencil_desc.get());\n  cmd_buf->active_rce->setDepthStencilState(depth_stencil_state.get());\n}\n\nvoid ngf_cmd_set_depth_bias(\n    ngf_render_encoder enc,\n    float              const_scale,\n    float              slope_scale,\n    float              clamp) NGF_NOEXCEPT {\n  auto cmd_buf = NGFMTL_ENC2CMDBUF(enc);\n  cmd_buf->active_rce->setDepthBias(const_scale, slope_scale, clamp);\n}\n\nvoid ngf_cmd_begin_debug_group(ngf_cmd_buffer cmd_buf, const char* name) NGF_NOEXCEPT {\n  auto name_nsstr = NS::String::string(name, NS::ASCIIStringEncoding);\n  cmd_buf->mtl_cmd_buffer->pushDebugGroup(name_nsstr);\n}\n\nvoid ngf_cmd_end_current_debug_group(ngf_cmd_buffer cmd_buf) NGF_NOEXCEPT {\n  cmd_buf->mtl_cmd_buffer->popDebugGroup();\n}\n\nvoid ngf_finish() NGF_NOEXCEPT {\n  if (CURRENT_CONTEXT->pending_cmd_buffer) {\n    CURRENT_CONTEXT->last_cmd_buffer =\n        ngf_id<MTL::CommandBuffer>::add_retain(CURRENT_CONTEXT->pending_cmd_buffer);\n    CURRENT_CONTEXT->pending_cmd_buffer->commit();\n    CURRENT_CONTEXT->pending_cmd_buffer = nullptr;\n  }\n\n  if (CURRENT_CONTEXT->last_cmd_buffer) { CURRENT_CONTEXT->last_cmd_buffer->waitUntilCompleted(); }\n}\n\nstatic ngf_error ngfmtl_capture_set_bytes(\n    ngf_cmd_buffer cmd_buf,\n    const void*    data,\n    size_t         size_bytes) {\n  if (!data || size_bytes == 0u) {\n    cmd_buf->pending_pc_size = 0u;\n    return NGF_ERROR_OK;\n  }\n  if (size_bytes > NGF_MAX_ENCODER_INLINE_BYTES || (size_bytes & 0x3u) != 0u) {\n    NGFI_DIAG_ERROR(\n        \"push-constant size %zu must be <= %u and a multiple of 4\",\n        size_bytes,\n        NGF_MAX_ENCODER_INLINE_BYTES);\n    return NGF_ERROR_INVALID_SIZE;\n  }\n  cmd_buf->pending_pc_size = static_cast<uint32_t>(size_bytes);\n  memcpy(cmd_buf->pending_pc_data, data, size_bytes);\n  return NGF_ERROR_OK;\n}\n\nngf_error ngf_set_bytes(\n    ngf_render_encoder enc,\n    const void*        data,\n    size_t             size_bytes) NGF_NOEXCEPT {\n  auto             cmd_buf = NGFMTL_ENC2CMDBUF(enc);\n  const ngf_error  err     = ngfmtl_capture_set_bytes(cmd_buf, data, size_bytes);\n  if (err != NGF_ERROR_OK) return err;\n  ngfmtl_apply_set_bytes_gfx(cmd_buf);\n  return NGF_ERROR_OK;\n}\n\nngf_error ngf_set_compute_bytes(\n    ngf_compute_encoder enc,\n    const void*         data,\n    size_t              size_bytes) NGF_NOEXCEPT {\n  auto             cmd_buf = NGFMTL_ENC2CMDBUF(enc);\n  const ngf_error  err     = ngfmtl_capture_set_bytes(cmd_buf, data, size_bytes);\n  if (err != NGF_ERROR_OK) return err;\n  ngfmtl_apply_set_bytes_compute(cmd_buf);\n  return NGF_ERROR_OK;\n}\n\nvoid ngf_renderdoc_capture_next_frame() NGF_NOEXCEPT {\n  NGFI_DIAG_WARNING(\"RenderDoc functionality is not implemented for Metal backend\");\n}\n\nvoid ngf_renderdoc_capture_begin() NGF_NOEXCEPT {\n  NGFI_DIAG_WARNING(\"RenderDoc functionality is not implemented for Metal backend\");\n}\n\nvoid ngf_renderdoc_capture_end() NGF_NOEXCEPT {\n  NGFI_DIAG_WARNING(\"RenderDoc functionality is not implemented for Metal backend\");\n}\n\nuintptr_t ngf_get_mtl_image_handle(ngf_image image) NGF_NOEXCEPT {\n  return (uintptr_t)(image->texture.get());\n}\n\nuintptr_t ngf_get_mtl_buffer_handle(ngf_buffer buffer) NGF_NOEXCEPT {\n  return (uintptr_t)(buffer->mtl_buffer.get());\n}\n\nuintptr_t ngf_get_mtl_cmd_buffer_handle(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT {\n  return (uintptr_t)(cmd_buffer->mtl_cmd_buffer);\n}\n\nuintptr_t ngf_get_mtl_render_encoder_handle(ngf_render_encoder render_encoder) NGF_NOEXCEPT {\n  auto buf = NGFMTL_ENC2CMDBUF(render_encoder);\n  return (uintptr_t)(buf->active_rce);\n}\n\nuintptr_t ngf_get_mtl_xfer_encoder_handle(ngf_xfer_encoder xfer_encoder) NGF_NOEXCEPT {\n  auto buf = NGFMTL_ENC2CMDBUF(xfer_encoder);\n  return (uintptr_t)(buf->active_bce);\n}\n\nuintptr_t ngf_get_mtl_compute_encoder_handle(ngf_compute_encoder compute_encoder) NGF_NOEXCEPT {\n  auto buf = NGFMTL_ENC2CMDBUF(compute_encoder);\n  return (uintptr_t)(buf->active_cce);\n}\n\nuintptr_t ngf_get_mtl_sampler_handle(ngf_sampler sampler) NGF_NOEXCEPT {\n  return (uintptr_t)(sampler->sampler.get());\n}\n\nuint32_t ngf_get_mtl_pixel_format_index(ngf_image_format format) NGF_NOEXCEPT {\n  return (uint32_t)get_mtl_pixel_format(format).format;\n}\n\nuintptr_t ngf_get_mtl_device() NGF_NOEXCEPT {\n  return (uintptr_t)(void*)MTL_DEVICE;\n}\n\nvoid ngf_mtl_set_sample_attachment_for_next_render_pass(\n    ngf_cmd_buffer cmd_buffer,\n    uintptr_t      sample_buf_attachment_descriptor) NGF_NOEXCEPT {\n  cmd_buffer->sample_buf_attachment_for_next_render_pass =\n      ngf_id<MTL::RenderPassSampleBufferAttachmentDescriptor>::add_retain(\n          static_cast<MTL::RenderPassSampleBufferAttachmentDescriptor*>(\n              (void*)sample_buf_attachment_descriptor));\n}\n\nvoid ngf_mtl_set_sample_attachment_for_next_compute_pass(\n    ngf_cmd_buffer cmd_buffer,\n    uintptr_t      sample_buf_attachment_descriptor) NGF_NOEXCEPT {\n  cmd_buffer->sample_buf_attachment_for_next_compute_pass =\n      ngf_id<MTL::ComputePassSampleBufferAttachmentDescriptor>::add_retain(\n          static_cast<MTL::ComputePassSampleBufferAttachmentDescriptor*>(\n              (void*)sample_buf_attachment_descriptor));\n}\n\n#include \"ngf-common/create-destroy.cpp\"\n"
  },
  {
    "path": "source/ngf-mtl/layer.mm",
    "content": "#include \"nicegraf.h\"\n\n#import <Metal/Metal.h>\n#import <QuartzCore/QuartzCore.h>\n\n#if TARGET_OS_OSX\n#import <AppKit/AppKit.h>\nusing NGFMTL_VIEW_TYPE = NSView;\n#else\n#import <UIKit/UIKit.h>\nusing NGFMTL_VIEW_TYPE = UIView;\n#endif\n\n// Implementation is defined in impl.cpp, header only here\n#include \"MetalSingleHeader.hpp\"\n\nstatic const CFStringRef get_mtl_colorspace(ngf_colorspace colorspace) {\n  const CFStringRef color_spaces[NGF_COLORSPACE_COUNT] = {\n    kCGColorSpaceSRGB,\n    kCGColorSpaceExtendedSRGB,\n    kCGColorSpaceExtendedLinearSRGB,\n    kCGColorSpaceDisplayP3,\n    kCGColorSpaceExtendedLinearDisplayP3,\n    kCGColorSpaceDCIP3,\n    kCGColorSpaceExtendedLinearITUR_2020,\n    kCGColorSpaceITUR_2100_PQ\n  };\n  return color_spaces[colorspace];\n}\n\n// Return type of CA::MetalLayer*\nCA::MetalLayer* ngf_layer_add_to_view(MTL::Device* device,\n                                 uint32_t width,\n                                 uint32_t height,\n                                 MTL::PixelFormat pixel_format,\n                                 ngf_colorspace colorspace,\n                                 uint32_t capacity_hint,\n                                 bool display_sync_enabled,\n                                 bool compute_access_enabled,\n                                 uintptr_t native_handle) {\n    CAMetalLayer* layer_   = [CAMetalLayer layer];\n    layer_.device          = (__bridge id<MTLDevice>)device;\n    layer_.drawableSize    = CGSizeMake(width, height);\n    layer_.pixelFormat     = (MTLPixelFormat)pixel_format; // TODO: Is this cast correct?\n    layer_.colorspace      = CGColorSpaceCreateWithName(get_mtl_colorspace(colorspace));\n    layer_.framebufferOnly = compute_access_enabled ? NO : YES;\n    #if TARGET_OS_OSX\n    if (@available(macOS 10.13.2, *)) {\n      layer_.maximumDrawableCount = capacity_hint;\n    }\n    if (@available(macOS 10.13, *)) {\n      layer_.displaySyncEnabled = display_sync_enabled;\n    }\n    #endif\n\n    const bool supports_edr = colorspace == NGF_COLORSPACE_EXTENDED_SRGB_LINEAR ||\n                              colorspace == NGF_COLORSPACE_DISPLAY_P3_LINEAR ||\n                              colorspace == NGF_COLORSPACE_ITUR_BT2020 ||\n                              colorspace == NGF_COLORSPACE_ITUR_BT2100_PQ;\n\n    if (supports_edr) {\n      #if TARGET_OS_OSX\n      if (@available(macOS 10.11, *)) {\n        layer_.wantsExtendedDynamicRangeContent = YES;\n      }\n      #else\n      if (@available(iOS 16.0, *)) {\n        layer_.wantsExtendedDynamicRangeContent = YES;\n      }\n      #endif\n    }\n\n    // Associate the newly created Metal layer with the user-provided View.\n    NGFMTL_VIEW_TYPE* view = CFBridgingRelease((void*)native_handle);\n    #if TARGET_OS_OSX\n    [view setLayer:layer_];\n    #else\n    [view.layer addSublayer:layer_];\n    [layer_ setContentsScale:view.layer.contentsScale];\n    [layer_ setContentsGravity:kCAGravityResizeAspect];\n    [layer_ setFrame:view.frame];\n    #endif\n    CFBridgingRetain(view);\n    \n    return (__bridge_retained CA::MetalLayer*)layer_;\n}\n\nCA::MetalDrawable* ngf_layer_next_drawable(CA::MetalLayer* layer) {\n    return (__bridge CA::MetalDrawable*)[(__bridge CAMetalLayer*)layer nextDrawable];\n}\n\nvoid ngf_resize_swapchain(CA::MetalLayer* layer,\n                          uint32_t width,\n                          uint32_t height,\n                          uintptr_t native_handle) {\n    CAMetalLayer* bridged_layer = (__bridge CAMetalLayer*)layer;\n    \n    bridged_layer.drawableSize = CGSizeMake(width, height);\n\n    NGFMTL_VIEW_TYPE* view = CFBridgingRelease((void*)native_handle);\n\n    [bridged_layer setContentsScale:view.layer.contentsScale];\n    [bridged_layer setFrame:view.frame];\n\n    CFBridgingRetain(view);\n}\n"
  },
  {
    "path": "source/ngf-vk/ca-metal-layer.mm",
    "content": "#if defined(__APPLE__)\n\n#include \"nicegraf.h\"\n#import <QuartzCore/QuartzCore.h>\n#import <Metal/Metal.h>\n#if TARGET_OS_OSX\n#import <AppKit/AppKit.h>\nusing NGFMTL_VIEW_TYPE = NSView;\n#else\n#import <UIKit/UIKit.h>\nusing NGFMTL_VIEW_TYPE = UIView;\n#endif\n\nextern \"C\" {\nvoid* ngfvk_create_ca_metal_layer(const ngf_swapchain_info* swapchain_info) {\n  //const MTLPixelFormat pixel_format = get_mtl_pixel_format(swapchain_info->color_format).format;\n  auto layer = [CAMetalLayer layer];\n  layer.drawableSize    = CGSizeMake(swapchain_info->width, swapchain_info->height);\n  //layer.pixelFormat     = pixel_format;\n  layer.framebufferOnly = YES;\n  #if TARGET_OS_OSX\n      if (@available(macOS 10.13.2, *)) {\n        layer.maximumDrawableCount = swapchain_info->capacity_hint;\n      }\n      if (@available(macOS 10.13, *)) {\n        layer.displaySyncEnabled = (swapchain_info->present_mode == NGF_PRESENTATION_MODE_FIFO);\n      }\n  #endif\n\n      // Associate the newly created Metal layer with the user-provided View.\n      NGFMTL_VIEW_TYPE* view = CFBridgingRelease((void*)swapchain_info->native_handle);\n  #if TARGET_OS_OSX\n      [view setLayer:layer];\n  #else\n      [view.layer addSublayer:layer];\n      [layer setContentsScale:view.layer.contentsScale];\n      [layer setContentsGravity:kCAGravityResizeAspect];\n      [layer setFrame:view.frame];\n  #endif\n      CFBridgingRetain(view);\n  return layer;\n}\n}\n#endif\n"
  },
  {
    "path": "source/ngf-vk/impl.cpp",
    "content": "/**\n * Copyright (c) 2026 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n\n#include \"ngf-common/silence.h\"\n#include \"nicegraf.h\"\n\n#include \"ngf-common/arena.h\"\n#include \"ngf-common/array.h\"\n#include \"ngf-common/chunked-list.h\"\n#include \"ngf-common/cmdbuf-state.h\"\n#include \"ngf-common/default-arenas.h\"\n#include \"ngf-common/frame-token.h\"\n#include \"ngf-common/hashtable.h\"\n#include \"ngf-common/macros.h\"\n#include \"ngf-common/unique-ptr.h\"\n#include \"ngf-common/util.h\"\n#include \"ngf-common/value-or-error.h\"\n#include \"vk_10.h\"\n\n#include <assert.h>\n#include <renderdoc_app.h>\n#include <spirv_reflect.h>\n#include <string.h>\n#include <vk_mem_alloc.h>\n\n#pragma region constants\n\nnamespace ngfvk {\nnamespace global {\n\nconstexpr uint32_t invalid_idx                    = ~((uint32_t)0u);\nconstexpr uint32_t max_phys_dev                   = 64u;  // 64 GPUs oughta be enough for everybody.\nconstexpr uint32_t img_usage_transient_attachment = (1u << 31u);\n\n// Used by every pipeline layout and by ngf_context_t::vk_default_push_layout.\nconstexpr VkPushConstantRange default_push_constant_range = {\n    .stageFlags = VK_SHADER_STAGE_ALL,\n    .offset     = 0u,\n    .size       = NGF_MAX_ENCODER_INLINE_BYTES};\n\n}  // namespace global\n}  // namespace ngfvk\n\n#pragma endregion\n\n#pragma region internal_struct_definitions\n\nstruct ngfvk_dummy_resources {\n  ngf_image                  img;\n  ngf_image                  cube;\n  ngf_buffer                 buf;\n  ngf_texel_buffer_view      tbuf;\n  ngf_sampler                samp;\n  VkAccelerationStructureKHR dummy_accel_struct;\n  VkDescriptorImageInfo      img_info;\n  VkDescriptorImageInfo      cube_info;\n  VkDescriptorImageInfo      img_arr_info;\n  VkDescriptorImageInfo      cube_arr_info;\n  VkDescriptorImageInfo      samp_info;\n  VkDescriptorImageInfo      imgsamp_info;\n  VkDescriptorImageInfo      imgsamp_arr_info;\n  VkDescriptorBufferInfo     buf_info;\n  pthread_mutex_t            img_mu;\n  bool                       image_transitioned;\n};\n\n// Singleton for holding vulkan instance, device and queue handles.\n// This is shared by all contexts.\nstruct {\n  VkInstance               instance;\n  VkPhysicalDevice         phys_dev;\n  VkDevice                 device;\n  VmaAllocator             allocator;\n  VkQueue                  gfx_queue;\n  VkQueue                  present_queue;\n  uint32_t                 gfx_family_idx;\n  uint32_t                 present_family_idx;\n  VkDebugUtilsMessengerEXT debug_messenger;\n#if defined(__linux__)\n  xcb_connection_t* xcb_connection;\n  xcb_visualid_t    xcb_visualid;\n#endif\n  ngfvk_dummy_resources dummy_res;\n} _vk;\n\n// Singleton for holding on to RenderDoc API\nstruct {\n  RENDERDOC_API_1_6_0* api;\n  bool                 capture_next;\n  bool                 is_capturing;\n} _renderdoc;\n\n// Swapchain state.\nstruct ngfvk_swapchain {\n  VkSwapchainKHR                                   vk_swapchain;\n  ngfi::fixed_array<VkImage>                       imgs;\n  ngfi::fixed_array<ngfi::unique_ptr<ngf_image_t>> wrapper_imgs;\n  ngfi::fixed_array<ngfi::unique_ptr<ngf_image_t>> multisample_imgs;\n  ngfi::fixed_array<VkImageView>                   multisample_img_views;\n  ngfi::fixed_array<VkSemaphore>                   acquire_sems;\n  ngfi::fixed_array<VkSemaphore>                   submit_sems;\n  ngfi::fixed_array<VkFramebuffer>                 framebufs;\n  ngf_image                                        depth_img;\n  uint32_t         nimgs;      // < Total number of images in the swapchain.\n  uint32_t         image_idx;  // < The index of currently acquired image.\n  uint32_t         width;\n  uint32_t         height;\n  VkPresentModeKHR present_mode;\n\n  static ngfi::maybe_ngfptr<ngfvk_swapchain> make(\n      const ngf_swapchain_info& swapchain_info,\n      ngf_render_target         rt,\n      VkSurfaceKHR              surface) noexcept;\n  ngfvk_swapchain() noexcept                        = default;\n  ngfvk_swapchain(ngfvk_swapchain&& other) noexcept = default;\n  ~ngfvk_swapchain() noexcept;\n};\n\nstruct ngfvk_alloc {\n  uintptr_t     obj_handle  = 0u;\n  VmaAllocation vma_alloc   = VK_NULL_HANDLE;\n  void*         mapped_data = nullptr;\n\n  static ngfi::value_or_ngferr<ngfvk_alloc> make(const ngf_image_info& info) NGF_NOEXCEPT;\n  static ngfi::value_or_ngferr<ngfvk_alloc> make(const ngf_buffer_info& info) NGF_NOEXCEPT;\n  static ngfi::value_or_ngferr<ngfvk_alloc> wrap(VkImage img) NGF_NOEXCEPT {\n    ngfvk_alloc result {};\n    result.obj_handle = (uintptr_t)img;\n    return ngfi::move(result);\n  }\n\n  ngfvk_alloc() NGF_NOEXCEPT = default;\n  ngfvk_alloc(ngfvk_alloc&& other) NGF_NOEXCEPT {\n    *this = ngfi::move(other);\n  }\n  ngfvk_alloc(const ngfvk_alloc&) = delete;\n  ~ngfvk_alloc() NGF_NOEXCEPT {\n    destroy();\n  }\n\n  ngfvk_alloc& operator=(ngfvk_alloc&& other) NGF_NOEXCEPT;\n  ngfvk_alloc& operator=(const ngfvk_alloc& other) NGF_NOEXCEPT = delete;\n\n  private:\n  void destroy() NGF_NOEXCEPT;\n};\n\nstruct ngfvk_buffer_view_info {\n  VkBufferViewCreateInfo vk_info;\n  VkBufferView           vk_handle;\n};\n\ntypedef uint32_t ngfvk_desc_count[NGF_DESCRIPTOR_TYPE_COUNT];\n\nstruct ngfvk_desc_pool_capacity {\n  uint32_t         sets;\n  ngfvk_desc_count descriptors;\n};\n\nstruct ngfvk_desc_binding {\n  VkDescriptorType     type;\n  VkPipelineStageFlags stage_accessors;\n  bool                 readonly;\n  bool                 is_multilayered_image;\n  bool                 is_cubemap;\n  uint32_t             ndescs_in_binding;\n};\n\nstruct ngfvk_desc_set_layout {\n  VkDescriptorSetLayout vk_handle;\n  ngfvk_desc_count      counts;\n  uint32_t              nall_descs;  // < Total number of descriptors across all bindings.\n  ngfi::fixed_array<ngfvk_desc_binding> binding_properties;\n};\n\nstruct ngfvk_desc_pool {\n  ngfvk_desc_pool*         next;\n  VkDescriptorPool         vk_pool;\n  ngfvk_desc_pool_capacity capacity;\n  ngfvk_desc_pool_capacity utilization;\n};\n\nstruct ngfvk_desc_pools_list {\n  ngfvk_desc_pool* active_pool;\n  ngfvk_desc_pool* list;\n};\n\nstruct ngfvk_desc_superpool {\n  uint16_t                                 ctx_id;\n  ngfi::fixed_array<ngfvk_desc_pools_list> pools_lists;\n};\n\n// Command buffer with its associated pool.\nstruct ngfvk_cmd_buf_with_pool {\n  VkCommandBuffer cmd_buf;\n  VkCommandPool   cmd_pool;\n};\n\n// Typed chunk lists for retiring Vulkan objects.\ntemplate<class T> struct ngfvk_retire_list {\n  ngfi::chunked_list<T> list;\n};\n\nngfi::arena& current_frame_res_arena();\n\ntemplate<class... Args> struct ngfvk_retire_lists_t : private ngfvk_retire_list<Args>... {\n  template<class T> void append(T&& v) {\n    using X = ngfi::remove_reference_t<T>;\n    ngfvk_retire_list<X>::list.append(v, current_frame_res_arena());\n  }\n\n  template<class T> ngfi::chunked_list<T>& list() {\n    return ngfvk_retire_list<T>::list;\n  }\n\n  template<class T> void clear() {\n    return ngfvk_retire_list<T>::list.clear();\n  }\n};\n\nusing ngfvk_retire_lists = ngfvk_retire_lists_t<\n    VkPipeline,\n    VkPipelineLayout,\n    VkDescriptorSetLayout,\n    ngfvk_cmd_buf_with_pool,\n    VkFramebuffer,\n    VkRenderPass,\n    VkImageView,\n    ngf_image_view,\n    ngf_sampler,\n    ngf_texel_buffer_view,\n    ngf_image,\n    ngf_buffer,\n    ngfvk_desc_pools_list*>;\n\n// Vulkan resources associated with a given frame.\nstruct ngfvk_frame_resources {\n  ngfi::arena                 res_frame_arena;\n  ngfi::array<ngf_cmd_buffer> submitted_cmd_bufs;  // < Submitted ngf command buffers.\n\n  // Resources that should be disposed of at some point after this\n  // frame's completion.\n  ngfvk_retire_lists retire;\n\n  // Fences that will be signaled at the end of the frame.\n  VkFence fences[2];\n\n  // Number of fences to wait on to complete all submissions related to this\n  // frame.\n  uint32_t nwait_fences;\n};\n\nstruct ngfvk_command_superpool {\n  ngfi::fixed_array<VkCommandPool> cmd_pools;\n  uint16_t                         ctx_id;\n\n  ngfvk_command_superpool() = default;\n  ngfvk_command_superpool(uint32_t queue_family_idx, uint32_t capacity, uint16_t ctx_id);\n  ~ngfvk_command_superpool();\n  ngfvk_command_superpool(const ngfvk_command_superpool&) = delete;\n  ngfvk_command_superpool(ngfvk_command_superpool&&)      = default;\n};\n\nstruct ngfvk_attachment_pass_desc {\n  VkImageLayout       layout;\n  VkAttachmentLoadOp  load_op;\n  VkAttachmentStoreOp store_op;\n  bool                is_resolve;\n};\n\nstruct ngfvk_renderpass_cache_entry {\n  ngf_render_target rt;\n  uint64_t          ops_key;\n  VkRenderPass      renderpass;\n};\n\n#define NGFVK_ENC2CMDBUF(enc) ((ngf_cmd_buffer)((void*)enc.pvt_data_donotuse.d0))\n\nstruct ngfvk_device_info {\n  uint32_t vendor_id;\n  uint32_t device_id;\n\n  ngfi::array<const char*, ngfi::system_alloc_callbacks> enabled_ext_names;\n  VkPhysicalDeviceFeatures                               required_features;\n  VkPhysicalDeviceShaderFloat16Int8Features              sf16i8_features;\n  VkPhysicalDeviceSynchronization2Features               sync2_features;\n  VkPhysicalDeviceBufferDeviceAddressFeatures            bda_features;\n  VkPhysicalDeviceAccelerationStructureFeaturesKHR       accls_features;\n  VkPhysicalDeviceRayQueryFeaturesKHR                    ray_query_features;\n  VkPhysicalDeviceFeatures2                              phys_dev_features2;\n};\n\nstruct ngfvk_generic_pipeline {\n  VkPipeline                         vk_pipeline;\n  ngfi::array<ngfvk_desc_set_layout> descriptor_set_layouts;\n  VkPipelineLayout                   vk_pipeline_layout;\n  VkSpecializationInfo               vk_spec_info;\n  VkRenderPass                       compat_render_pass;\n\n  static ngfi::maybe_ngfptr<ngfvk_generic_pipeline>\n  make(const ngf_graphics_pipeline_info& info) NGF_NOEXCEPT;\n\n  static ngfi::maybe_ngfptr<ngfvk_generic_pipeline>\n  make(const ngf_compute_pipeline_info& info) NGF_NOEXCEPT;\n\n  ~ngfvk_generic_pipeline() NGF_NOEXCEPT;\n\n  private:\n  ngf_error common_init(\n      const ngf_specialization_info*   spec_info,\n      VkPipelineShaderStageCreateInfo* vk_shader_stages,\n      const ngf_shader_stage*          shader_stages,\n      uint32_t                         nshader_stages) NGF_NOEXCEPT;\n};\n\n// Describes how a resource is accessed within a synchronization scope.\nstruct ngfvk_sync_barrier_masks {\n  VkAccessFlags        access_mask;  // < Ways in which the resource is accessed.\n  VkPipelineStageFlags stage_mask;   // < Pipeline stages that have access to the resource.\n};\n\n// Synchronization request, that describes the intent to access a resource.\nstruct ngfvk_sync_req {\n  ngfvk_sync_barrier_masks barrier_masks;  // < Access/stage masks.\n  VkImageLayout            layout;         // < For image resources only, current layout.\n};\n\n// Synchronization state of a resource within the context of a single command buffer.\nstruct ngfvk_sync_state {\n  ngfvk_sync_barrier_masks last_writer_masks;\n  ngfvk_sync_barrier_masks active_readers_masks;\n  uint32_t                 per_stage_readers_mask;\n  VkImageLayout            layout;\n  bool                     skip_hazard_tracking;\n};\n\n// Type of synchronized resource.\nenum ngfvk_sync_res_type { NGFVK_SYNC_RES_BUFFER, NGFVK_SYNC_RES_IMAGE, NGFVK_SYNC_RES_COUNT };\n\n// Tagged union for passing around handles to synchronized GPU resources in a generic way.\nstruct ngfvk_sync_res {\n  union {\n    ngf_image  img;\n    ngf_buffer buf;\n  } data;\n  ngfvk_sync_res_type type;\n  uint64_t            hash;\n};\n\n// Data associated with a particular synchronized resource within the context of a single cmd\n// buffer.\nstruct ngfvk_sync_res_data {\n  ngfvk_sync_req      expected_sync_req;  // < Expected sync state.\n  ngfvk_sync_state    sync_state;         // < Latest synchronization state.\n  uint32_t            pending_sync_req_idx;\n  ngfvk_sync_res_type res_type;\n  uintptr_t           res_handle;\n  bool                had_barrier;\n};\n\n// Typedef for the sync resource data hash table\nusing ngfvk_sync_res_hashtable = ngfi::hashtable<ngfvk_sync_res_data>;\n\nstruct ngfvk_sync_req_batch {\n  ngfvk_sync_res_hashtable::keyhash* sync_res_data_keys;\n  ngfvk_sync_req*                    pending_sync_reqs;\n  bool*                              freshness;\n  uint32_t                           npending_sync_reqs;\n  uint32_t                           nbuffer_sync_reqs;\n  uint32_t                           nimage_sync_reqs;\n};\n\nenum ngfvk_render_cmd_type {\n  NGFVK_RENDER_CMD_BIND_PIPELINE,\n  NGFVK_RENDER_CMD_SET_VIEWPORT,\n  NGFVK_RENDER_CMD_SET_SCISSOR,\n  NGFVK_RENDER_CMD_SET_STENCIL_REFERENCE,\n  NGFVK_RENDER_CMD_SET_STENCIL_COMPARE_MASK,\n  NGFVK_RENDER_CMD_SET_STENCIL_WRITE_MASK,\n  NGFVK_RENDER_CMD_BIND_RESOURCE,\n  NGFVK_RENDER_CMD_BIND_ATTRIB_BUFFER,\n  NGFVK_RENDER_CMD_BIND_INDEX_BUFFER,\n  NGFVK_RENDER_CMD_SET_DEPTH_BIAS,\n  NGFVK_RENDER_CMD_DRAW,\n};\n\nstruct ngfvk_barrier_data {\n  VkAccessFlags        src_access_mask;\n  VkAccessFlags        dst_access_mask;\n  VkPipelineStageFlags src_stage_mask;\n  VkPipelineStageFlags dst_stage_mask;\n  VkImageLayout        src_layout;\n  VkImageLayout        dst_layout;\n  ngfvk_sync_res       res;\n};\n\nstruct ngfvk_render_cmd {\n  union {\n    ngf_graphics_pipeline pipeline;\n    ngf_irect2d           rect;\n    struct {\n      uint32_t front;\n      uint32_t back;\n    } stencil_values;\n    ngf_resource_bind_op bind_resource;\n    struct {\n      ngf_buffer buffer;\n      uint32_t   binding;\n      size_t     offset;\n    } bind_attrib_buffer;\n    struct {\n      ngf_buffer buffer;\n      size_t     offset;\n      ngf_type   type;\n    } bind_index_buffer;\n    struct {\n      uint32_t first_element;\n      uint32_t nelements;\n      uint32_t ninstances;\n      bool     indexed;\n    } draw;\n    struct {\n      float const_factor;\n      float slope_factor;\n      float clamp;\n    } depth_bias;\n  } data;\n  ngfvk_render_cmd_type type : 8;\n};\n\nstruct ngfvk_pending_barrier_list {\n  ngfi::chunked_list<ngfvk_barrier_data> barriers;\n  uint32_t                               npending_img_bars;\n  uint32_t                               npending_buf_bars;\n};\n\n// Range of render commands for virtual bind operations.\n// Stores a pointer to the first command and the count.\nstruct ngfvk_virt_bind_range {\n  const ngfvk_render_cmd* start;\n  uint32_t                count;\n};\n\nstruct ngfvk_reflect_binding_and_stage_mask {\n  SpvReflectDescriptorBinding binding_data;\n  VkPipelineStageFlags        mask;\n};\n\n#pragma endregion\n\n#pragma region external_struct_definitions\n\nstruct ngf_cmd_buffer_t {\n  ngf_frame_token        parent_frame;         // < The frame this cmd buffer is associated with.\n  VkCommandBuffer        vk_cmd_buffer;        // < Active vulkan command buffer.\n  VkCommandPool          vk_cmd_pool;          // < Active vulkan command pool.\n  ngf_graphics_pipeline  active_gfx_pipe;      // < The bound graphics pipeline.\n  ngf_compute_pipeline   active_compute_pipe;  // < The bound compute pipeline.\n  ngf_render_target      active_rt;            // < Active render target.\n  ngf_buffer             active_attr_buf;\n  ngf_buffer             active_idx_buf;\n  ngfvk_desc_pools_list* desc_pools_list;  // < List of descriptor pools used in the buffer's frame.\n  ngfi::chunked_list<ngf_resource_bind_op>\n      pending_bind_ops;  // < Bind ops to be performed before the next draw.\n  ngfi::chunked_list<ngfvk_render_cmd>      in_pass_cmd_chnks;\n  ngfi::chunked_list<ngfvk_virt_bind_range> virt_bind_ops_ranges;\n  ngfvk_pending_barrier_list                pending_barriers;\n  ngfvk_sync_res_hashtable                  local_res_states;\n  ngf_render_pass_info   pending_render_pass_info;  // < describes the active render pass\n  uint32_t               npending_bind_ops;\n  uint32_t               pending_clear_value_count;\n  ngfi::cmd_buffer_state state;  // < State of the cmd buffer (i.e. new/recording/etc.)\n  bool                   renderpass_active : 1;    // < Has an active renderpass.\n  bool                   compute_pass_active : 1;  // < Has an active compute pass.\n  bool                   xfer_pass_active : 1;     // < Has an active transfer pass.\n  bool                   destroy_on_submit : 1;    // < Destroy after submitting.\n\n  static ngfi::maybe_ngfptr<ngf_cmd_buffer_t> make() noexcept;\n  ~ngf_cmd_buffer_t() noexcept;\n};\n\nstruct ngf_sampler_t {\n  VkSampler vksampler;\n\n  static ngfi::maybe_ngfptr<ngf_sampler_t> make(const ngf_sampler_info& info) NGF_NOEXCEPT;\n  ~ngf_sampler_t() NGF_NOEXCEPT;\n};\n\nstruct ngf_buffer_t {\n  ngfvk_alloc             alloc;\n  size_t                  size;\n  size_t                  mapped_offset;\n  ngfvk_sync_state        sync_state;\n  uint64_t                hash;\n  uint32_t                usage_flags;\n  ngf_buffer_storage_type storage_type;\n\n  static ngfi::maybe_ngfptr<ngf_buffer_t> make(const ngf_buffer_info& info) NGF_NOEXCEPT;\n};\n\nstruct ngf_texel_buffer_view_t {\n  VkBufferView vk_buf_view;\n  ngf_buffer   buffer;\n\n  static ngfi::maybe_ngfptr<ngf_texel_buffer_view_t>\n  make(const ngf_texel_buffer_view_info& info) NGF_NOEXCEPT;\n  ~ngf_texel_buffer_view_t() NGF_NOEXCEPT;\n};\n\nstruct ngf_image_t {\n  ngfvk_alloc      alloc;\n  VkImageView      vkview;\n  VkImageView      vkview_arrayed;\n  VkFormat         vk_fmt;\n  ngf_extent3d     extent;\n  ngf_image_type   type;\n  ngfvk_sync_state sync_state;\n  uint64_t         hash;\n  uint32_t         usage_flags;\n  uint32_t         nlevels;\n  uint32_t         nlayers;\n\n  static ngfi::maybe_ngfptr<ngf_image_t>\n  make(const ngf_image_info& wrapper_info, ngfvk_alloc&& alloc) NGF_NOEXCEPT;\n  static ngfi::maybe_ngfptr<ngf_image_t> make(const ngf_image_info& wrapper_info) NGF_NOEXCEPT;\n\n  ~ngf_image_t() NGF_NOEXCEPT;\n};\n\nstruct ngf_image_view_t {\n  VkImageView vk_view;\n  ngf_image   src;\n\n  static ngfi::maybe_ngfptr<ngf_image_view_t> make(const ngf_image_view_info& info) NGF_NOEXCEPT;\n\n  ~ngf_image_view_t() NGF_NOEXCEPT;\n};\n\nstruct ngf_context_t {\n  ngfi::unique_ptr<ngfvk_swapchain>     swapchain;\n  ngf_swapchain_info                    swapchain_info;\n  VkSurfaceKHR                          surface;\n  uint32_t                              frame_id;\n  uint32_t                              max_inflight_frames;\n  ngf_frame_token                       current_frame_token;\n  ngf_attachment_descriptions           default_attachment_descriptions_list;\n  ngfi::unique_ptr<ngf_render_target_t> default_render_target;\n\n  ngfi::fixed_array<ngfvk_frame_resources>  frame_res;\n  ngfi::array<ngfvk_command_superpool>      command_superpools;\n  ngfi::array<ngfvk_desc_superpool>         desc_superpools;\n  ngfi::array<ngfvk_renderpass_cache_entry> renderpass_cache;\n\n  // Push-constant-compatible with every pipeline layout (all share default_push_constant_range).\n  VkPipelineLayout vk_default_push_layout = VK_NULL_HANDLE;\n\n  static ngfi::maybe_ngfptr<ngf_context_t> make(const ngf_context_info& info);\n  ~ngf_context_t() noexcept;\n};\n\nstruct ngf_shader_stage_t {\n  VkShaderModule          vk_module;\n  VkShaderStageFlagBits   vk_stage_bits;\n  SpvReflectShaderModule  spv_reflect_module;\n  ngfi::fixed_array<char> entry_point_name;\n\n  static ngfi::maybe_ngfptr<ngf_shader_stage_t>\n  make(const ngf_shader_stage_info& info) NGF_NOEXCEPT;\n  ~ngf_shader_stage_t() NGF_NOEXCEPT;\n};\n\nstruct ngf_render_target_t {\n  VkFramebuffer                                 frame_buffer;\n  VkRenderPass                                  compat_render_pass;\n  uint32_t                                      nattachments;\n  ngfi::fixed_array<ngf_attachment_description> attachment_descs;\n  ngfi::fixed_array<VkImageView>                attachment_image_views; /* unused in default RT. */\n  ngfi::fixed_array<ngf_image>                  attachment_images;      /* unused in default RT. */\n  ngfi::fixed_array<ngfvk_attachment_pass_desc> attachment_compat_pass_descs;\n  bool                                          is_default;\n  bool                                          have_resolve_attachments;\n  uint32_t                                      width;\n  uint32_t                                      height;\n\n  static ngfi::maybe_ngfptr<ngf_render_target_t>\n  make(const ngf_render_target_info& info) NGF_NOEXCEPT;\n\n  static ngfi::maybe_ngfptr<ngf_render_target_t>\n  make(uint32_t width, uint32_t height, uint32_t nattachment_descs) NGF_NOEXCEPT;\n\n  ~ngf_render_target_t() NGF_NOEXCEPT;\n};\n\n#pragma endregion\n\n#pragma region global_vars\n\nNGFI_THREADLOCAL ngf_context CURRENT_CONTEXT = NULL;\n\nnamespace ngfvk {\nnamespace global {\n\nngf_device              phys_devices[ngfvk::global::max_phys_dev];\nngfvk_device_info       phys_device_infos[ngfvk::global::max_phys_dev];\nngf_device_capabilities phys_device_caps;\nuint32_t                num_phys_devices = 0;\n\n}  // namespace global\n}  // namespace ngfvk\n\n#pragma endregion\n\n#pragma region vk_enum_maps\n\nstatic VkFilter get_vk_filter(ngf_sampler_filter filter) {\n  static const VkFilter vkfilters[NGF_FILTER_COUNT] = {VK_FILTER_NEAREST, VK_FILTER_LINEAR};\n  return vkfilters[filter];\n}\n\nstatic VkSamplerAddressMode get_vk_address_mode(ngf_sampler_wrap_mode mode) {\n  static const VkSamplerAddressMode vkmodes[NGF_WRAP_MODE_COUNT] = {\n      VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,\n      VK_SAMPLER_ADDRESS_MODE_REPEAT,\n      VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT};\n  return vkmodes[mode];\n}\n\nstatic VkSamplerMipmapMode get_vk_mipmode(ngf_sampler_filter filter) {\n  static const VkSamplerMipmapMode vkmipmodes[NGF_FILTER_COUNT] = {\n      VK_SAMPLER_MIPMAP_MODE_NEAREST,\n      VK_SAMPLER_MIPMAP_MODE_LINEAR};\n  return vkmipmodes[filter];\n}\n\nstatic VkSampleCountFlagBits get_vk_sample_count(ngf_sample_count sample_count) {\n  switch (sample_count) {\n  case NGF_SAMPLE_COUNT_1:\n    return VK_SAMPLE_COUNT_1_BIT;\n  case NGF_SAMPLE_COUNT_2:\n    return VK_SAMPLE_COUNT_2_BIT;\n  case NGF_SAMPLE_COUNT_4:\n    return VK_SAMPLE_COUNT_4_BIT;\n  case NGF_SAMPLE_COUNT_8:\n    return VK_SAMPLE_COUNT_8_BIT;\n  case NGF_SAMPLE_COUNT_16:\n    return VK_SAMPLE_COUNT_16_BIT;\n  case NGF_SAMPLE_COUNT_32:\n    return VK_SAMPLE_COUNT_32_BIT;\n  case NGF_SAMPLE_COUNT_64:\n    return VK_SAMPLE_COUNT_64_BIT;\n  default:\n    assert(false);  // TODO: return error?\n  }\n  return VK_SAMPLE_COUNT_1_BIT;\n}\n\nstatic VkDescriptorType get_vk_descriptor_type(ngf_descriptor_type type) {\n  static const VkDescriptorType types[NGF_DESCRIPTOR_TYPE_COUNT] = {\n      VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,\n      VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,\n      VK_DESCRIPTOR_TYPE_SAMPLER,\n      VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,\n      VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,\n      VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,\n      VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,\n      VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR};\n  return types[type];\n}\n\nstatic VkImageType get_vk_image_type(ngf_image_type t) {\n  static const VkImageType types[NGF_IMAGE_TYPE_COUNT] = {\n      VK_IMAGE_TYPE_2D,\n      VK_IMAGE_TYPE_3D,\n      VK_IMAGE_TYPE_2D  // In Vulkan cubemaps are treated as array of 2D images.\n  };\n  return types[t];\n}\nstatic VkImageViewType get_vk_image_view_type(ngf_image_type t, size_t nlayers) {\n  if (t == NGF_IMAGE_TYPE_IMAGE_2D && nlayers == 1u) {\n    return VK_IMAGE_VIEW_TYPE_2D;\n  } else if (t == NGF_IMAGE_TYPE_IMAGE_2D && nlayers > 1u) {\n    return VK_IMAGE_VIEW_TYPE_2D_ARRAY;\n  } else if (t == NGF_IMAGE_TYPE_IMAGE_3D) {\n    return VK_IMAGE_VIEW_TYPE_3D;\n  } else if (t == NGF_IMAGE_TYPE_CUBE && nlayers == 1u) {\n    return VK_IMAGE_VIEW_TYPE_CUBE;\n  } else if (t == NGF_IMAGE_TYPE_CUBE && nlayers > 1u) {\n    return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;\n  } else {\n    NGFI_DIAG_ERROR(\"Invalid image type\");\n    assert(false);\n    return VK_IMAGE_VIEW_TYPE_2D;\n  }\n}\n\nstatic VkCompareOp get_vk_compare_op(ngf_compare_op op) {\n  static const VkCompareOp ops[NGF_COMPARE_OP_COUNT] = {\n      VK_COMPARE_OP_NEVER,\n      VK_COMPARE_OP_LESS,\n      VK_COMPARE_OP_LESS_OR_EQUAL,\n      VK_COMPARE_OP_EQUAL,\n      VK_COMPARE_OP_GREATER_OR_EQUAL,\n      VK_COMPARE_OP_GREATER,\n      VK_COMPARE_OP_NOT_EQUAL,\n      VK_COMPARE_OP_ALWAYS};\n  return ops[op];\n}\n\nstatic VkStencilOp get_vk_stencil_op(ngf_stencil_op op) {\n  static const VkStencilOp ops[NGF_STENCIL_OP_COUNT] = {\n      VK_STENCIL_OP_KEEP,\n      VK_STENCIL_OP_ZERO,\n      VK_STENCIL_OP_REPLACE,\n      VK_STENCIL_OP_INCREMENT_AND_CLAMP,\n      VK_STENCIL_OP_INCREMENT_AND_WRAP,\n      VK_STENCIL_OP_DECREMENT_AND_CLAMP,\n      VK_STENCIL_OP_DECREMENT_AND_WRAP,\n      VK_STENCIL_OP_INVERT};\n  return ops[op];\n}\n\nstatic VkAttachmentLoadOp get_vk_load_op(ngf_attachment_load_op op) {\n  static const VkAttachmentLoadOp ops[NGF_LOAD_OP_COUNT] = {\n      VK_ATTACHMENT_LOAD_OP_DONT_CARE,\n      VK_ATTACHMENT_LOAD_OP_LOAD,\n      VK_ATTACHMENT_LOAD_OP_CLEAR};\n  return ops[op];\n}\n\nstatic VkAttachmentStoreOp get_vk_store_op(ngf_attachment_store_op op) {\n  static const VkAttachmentStoreOp ops[NGF_STORE_OP_COUNT] = {\n      VK_ATTACHMENT_STORE_OP_DONT_CARE,\n      VK_ATTACHMENT_STORE_OP_STORE,\n      VK_ATTACHMENT_STORE_OP_DONT_CARE,\n  };\n  return ops[op];\n}\n\nstatic VkBlendFactor get_vk_blend_factor(ngf_blend_factor f) {\n  static const VkBlendFactor factors[NGF_BLEND_FACTOR_COUNT] = {\n      VK_BLEND_FACTOR_ZERO,\n      VK_BLEND_FACTOR_ONE,\n      VK_BLEND_FACTOR_SRC_COLOR,\n      VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR,\n      VK_BLEND_FACTOR_DST_COLOR,\n      VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR,\n      VK_BLEND_FACTOR_SRC_ALPHA,\n      VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,\n      VK_BLEND_FACTOR_DST_ALPHA,\n      VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA,\n      VK_BLEND_FACTOR_CONSTANT_COLOR,\n      VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR,\n      VK_BLEND_FACTOR_CONSTANT_ALPHA,\n      VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA};\n  return factors[f];\n}\n\nstatic VkBlendOp get_vk_blend_op(ngf_blend_op op) {\n  static const VkBlendOp ops[NGF_BLEND_OP_COUNT] = {\n      VK_BLEND_OP_ADD,\n      VK_BLEND_OP_SUBTRACT,\n      VK_BLEND_OP_REVERSE_SUBTRACT,\n      VK_BLEND_OP_MIN,\n      VK_BLEND_OP_MAX};\n  return ops[op];\n}\n\nstatic VkFormat get_vk_image_format(ngf_image_format f) {\n  static const VkFormat formats[NGF_IMAGE_FORMAT_COUNT] = {\n      VK_FORMAT_R8_UNORM,\n      VK_FORMAT_R8G8_UNORM,\n      VK_FORMAT_R8G8_SNORM,\n      VK_FORMAT_R8G8B8_UNORM,\n      VK_FORMAT_R8G8B8A8_UNORM,\n      VK_FORMAT_R8G8B8_SRGB,\n      VK_FORMAT_R8G8B8A8_SRGB,\n      VK_FORMAT_B8G8R8_UNORM,\n      VK_FORMAT_B8G8R8A8_UNORM,\n      VK_FORMAT_B8G8R8_SRGB,\n      VK_FORMAT_B8G8R8A8_SRGB,\n      VK_FORMAT_A2B10G10R10_UNORM_PACK32,\n      VK_FORMAT_R32_SFLOAT,\n      VK_FORMAT_R32G32_SFLOAT,\n      VK_FORMAT_R32G32B32_SFLOAT,\n      VK_FORMAT_R32G32B32A32_SFLOAT,\n      VK_FORMAT_R16_SFLOAT,\n      VK_FORMAT_R16G16_SFLOAT,\n      VK_FORMAT_R16G16B16_SFLOAT,\n      VK_FORMAT_R16G16B16A16_SFLOAT,\n      VK_FORMAT_B10G11R11_UFLOAT_PACK32,\n      VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,\n      VK_FORMAT_R16_UNORM,\n      VK_FORMAT_R16_SNORM,\n      VK_FORMAT_R16G16_UNORM,\n      VK_FORMAT_R16G16_SNORM,\n      VK_FORMAT_R16G16B16A16_UNORM,\n      VK_FORMAT_R16G16B16A16_SNORM,\n      VK_FORMAT_R8_UINT,\n      VK_FORMAT_R8_SINT,\n      VK_FORMAT_R16_UINT,\n      VK_FORMAT_R16_SINT,\n      VK_FORMAT_R16G16_UINT,\n      VK_FORMAT_R16G16B16_UINT,\n      VK_FORMAT_R16G16B16A16_UINT,\n      VK_FORMAT_R32_UINT,\n      VK_FORMAT_R32G32_UINT,\n      VK_FORMAT_R32G32B32_UINT,\n      VK_FORMAT_R32G32B32A32_UINT,\n      VK_FORMAT_BC7_UNORM_BLOCK,\n      VK_FORMAT_BC7_SRGB_BLOCK,\n      VK_FORMAT_BC6H_SFLOAT_BLOCK,\n      VK_FORMAT_BC6H_UFLOAT_BLOCK,\n      VK_FORMAT_BC5_UNORM_BLOCK,\n      VK_FORMAT_BC5_SNORM_BLOCK,\n      VK_FORMAT_ASTC_4x4_UNORM_BLOCK,\n      VK_FORMAT_ASTC_4x4_SRGB_BLOCK,\n      VK_FORMAT_ASTC_5x4_UNORM_BLOCK,\n      VK_FORMAT_ASTC_5x4_SRGB_BLOCK,\n      VK_FORMAT_ASTC_5x5_UNORM_BLOCK,\n      VK_FORMAT_ASTC_5x5_SRGB_BLOCK,\n      VK_FORMAT_ASTC_6x5_UNORM_BLOCK,\n      VK_FORMAT_ASTC_6x5_SRGB_BLOCK,\n      VK_FORMAT_ASTC_6x6_UNORM_BLOCK,\n      VK_FORMAT_ASTC_6x6_SRGB_BLOCK,\n      VK_FORMAT_ASTC_8x5_UNORM_BLOCK,\n      VK_FORMAT_ASTC_8x5_SRGB_BLOCK,\n      VK_FORMAT_ASTC_8x6_UNORM_BLOCK,\n      VK_FORMAT_ASTC_8x6_SRGB_BLOCK,\n      VK_FORMAT_ASTC_8x8_UNORM_BLOCK,\n      VK_FORMAT_ASTC_8x8_SRGB_BLOCK,\n      VK_FORMAT_ASTC_10x5_UNORM_BLOCK,\n      VK_FORMAT_ASTC_10x5_SRGB_BLOCK,\n      VK_FORMAT_ASTC_10x6_UNORM_BLOCK,\n      VK_FORMAT_ASTC_10x6_SRGB_BLOCK,\n      VK_FORMAT_ASTC_10x8_UNORM_BLOCK,\n      VK_FORMAT_ASTC_10x8_SRGB_BLOCK,\n      VK_FORMAT_ASTC_10x10_UNORM_BLOCK,\n      VK_FORMAT_ASTC_10x10_SRGB_BLOCK,\n      VK_FORMAT_ASTC_12x10_UNORM_BLOCK,\n      VK_FORMAT_ASTC_12x10_SRGB_BLOCK,\n      VK_FORMAT_ASTC_12x12_UNORM_BLOCK,\n      VK_FORMAT_ASTC_12x12_SRGB_BLOCK,\n      VK_FORMAT_D32_SFLOAT,\n      VK_FORMAT_D16_UNORM,\n      VK_FORMAT_D24_UNORM_S8_UINT,\n      VK_FORMAT_UNDEFINED};\n  return formats[f];\n}\n\nstatic VkPolygonMode get_vk_polygon_mode(ngf_polygon_mode m) {\n  static const VkPolygonMode modes[NGF_POLYGON_MODE_COUNT] = {\n      VK_POLYGON_MODE_FILL,\n      VK_POLYGON_MODE_LINE,\n      VK_POLYGON_MODE_POINT};\n  return modes[m];\n}\n\nstatic VkCullModeFlags get_vk_cull_mode(ngf_cull_mode m) {\n  static const VkCullModeFlagBits modes[NGF_CULL_MODE_COUNT] = {\n      VK_CULL_MODE_BACK_BIT,\n      VK_CULL_MODE_FRONT_BIT,\n      VK_CULL_MODE_FRONT_AND_BACK};\n  return (VkCullModeFlags)modes[m];\n}\n\nstatic VkFrontFace get_vk_front_face(ngf_front_face_mode f) {\n  static const VkFrontFace modes[NGF_FRONT_FACE_COUNT] = {\n      VK_FRONT_FACE_COUNTER_CLOCKWISE,\n      VK_FRONT_FACE_CLOCKWISE};\n  return modes[f];\n}\n\nstatic VkPrimitiveTopology get_vk_primitive_type(ngf_primitive_topology p) {\n  static const VkPrimitiveTopology topos[NGF_PRIMITIVE_TOPOLOGY_COUNT] = {\n      VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,\n      VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,\n      VK_PRIMITIVE_TOPOLOGY_LINE_LIST,\n      VK_PRIMITIVE_TOPOLOGY_LINE_STRIP};\n  return topos[p];\n}\n\nstatic VkFormat get_vk_vertex_format(ngf_type type, uint32_t size, bool norm) {\n  static const VkFormat normalized_formats[4][4] = {\n      {VK_FORMAT_R8_SNORM, VK_FORMAT_R8G8_SNORM, VK_FORMAT_R8G8B8_SNORM, VK_FORMAT_R8G8B8A8_SNORM},\n      {VK_FORMAT_R8_UNORM, VK_FORMAT_R8G8_UNORM, VK_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8A8_UNORM},\n      {VK_FORMAT_R16_SNORM,\n       VK_FORMAT_R16G16_SNORM,\n       VK_FORMAT_R16G16B16_SNORM,\n       VK_FORMAT_R16G16B16A16_SNORM},\n      {VK_FORMAT_R16_UNORM,\n       VK_FORMAT_R16G16_UNORM,\n       VK_FORMAT_R16G16B16_UNORM,\n       VK_FORMAT_R16G16B16A16_UNORM}};\n  static const VkFormat formats[9][4] = {\n      {VK_FORMAT_R8_SINT, VK_FORMAT_R8G8_SINT, VK_FORMAT_R8G8B8_SINT, VK_FORMAT_R8G8B8A8_SINT},\n      {VK_FORMAT_R8_UINT, VK_FORMAT_R8G8_UINT, VK_FORMAT_R8G8B8_UINT, VK_FORMAT_R8G8B8A8_UINT},\n      {VK_FORMAT_R16_SINT,\n       VK_FORMAT_R16G16_SINT,\n       VK_FORMAT_R16G16B16_SINT,\n       VK_FORMAT_R16G16B16A16_SINT},\n      {VK_FORMAT_R16_UINT,\n       VK_FORMAT_R16G16_UINT,\n       VK_FORMAT_R16G16B16_UINT,\n       VK_FORMAT_R16G16B16A16_UINT},\n      {VK_FORMAT_R32_SINT,\n       VK_FORMAT_R32G32_SINT,\n       VK_FORMAT_R32G32B32_SINT,\n       VK_FORMAT_R32G32B32A32_SINT},\n      {VK_FORMAT_R32_UINT,\n       VK_FORMAT_R32G32_UINT,\n       VK_FORMAT_R32G32B32_UINT,\n       VK_FORMAT_R32G32B32A32_UINT},\n      {VK_FORMAT_R32_SFLOAT,\n       VK_FORMAT_R32G32_SFLOAT,\n       VK_FORMAT_R32G32B32_SFLOAT,\n       VK_FORMAT_R32G32B32A32_SFLOAT},\n      {VK_FORMAT_R16_SFLOAT,\n       VK_FORMAT_R16G16_SFLOAT,\n       VK_FORMAT_R16G16B16_SFLOAT,\n       VK_FORMAT_R16G16B16A16_SFLOAT},\n      {VK_FORMAT_R64_SFLOAT,\n       VK_FORMAT_R64G64_SFLOAT,\n       VK_FORMAT_R64G64B64_SFLOAT,\n       VK_FORMAT_R64G64B64A64_SFLOAT}};\n\n  if ((size < 1 || size > 4) || (norm && type > NGF_TYPE_UINT16)) {\n    return VK_FORMAT_UNDEFINED;\n  } else if (norm) {\n    return normalized_formats[type][size - 1];\n  } else {\n    return formats[type][size - 1];\n  }\n}\n\nstatic VkVertexInputRate get_vk_input_rate(ngf_vertex_input_rate r) {\n  static const VkVertexInputRate rates[NGF_VERTEX_INPUT_RATE_COUNT] = {\n      VK_VERTEX_INPUT_RATE_VERTEX,\n      VK_VERTEX_INPUT_RATE_INSTANCE};\n  return rates[r];\n}\n\nstatic VkShaderStageFlagBits get_vk_shader_stage(ngf_stage_type s) {\n  static const VkShaderStageFlagBits stages[NGF_STAGE_COUNT] = {\n      VK_SHADER_STAGE_VERTEX_BIT,\n      VK_SHADER_STAGE_FRAGMENT_BIT,\n      VK_SHADER_STAGE_COMPUTE_BIT};\n  return stages[s];\n}\n\nstatic VkBufferUsageFlags get_vk_buffer_usage(uint32_t usage) {\n  VkBufferUsageFlags flags = 0u;\n  if (usage & NGF_BUFFER_USAGE_XFER_DST) flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;\n  if (usage & NGF_BUFFER_USAGE_XFER_SRC) flags |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;\n  if (usage & NGF_BUFFER_USAGE_UNIFORM_BUFFER) flags |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;\n  if (usage & NGF_BUFFER_USAGE_INDEX_BUFFER) flags |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;\n  if (usage & NGF_BUFFER_USAGE_VERTEX_BUFFER) flags |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;\n  if (usage & NGF_BUFFER_USAGE_TEXEL_BUFFER) flags |= VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;\n  if (usage & NGF_BUFFER_USAGE_STORAGE_BUFFER) flags |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;\n  if (usage & NGF_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT)\n    flags |= VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT;\n  if (usage & NGF_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT)\n    flags |= VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR;\n  if (usage & NGF_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT)\n    flags |= VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR;\n  return flags;\n}\n\nstatic VkMemoryPropertyFlags get_vk_memory_flags(ngf_buffer_storage_type s) {\n  switch (s) {\n  case NGF_BUFFER_STORAGE_HOST_READABLE:\n    return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;\n  case NGF_BUFFER_STORAGE_HOST_WRITEABLE:\n  case NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE:\n    return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;\n  case NGF_BUFFER_STORAGE_DEVICE_LOCAL:\n    return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;\n  case NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABLE:\n    return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;\n  case NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_READABLE_WRITEABLE:\n    return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |\n           VK_MEMORY_PROPERTY_HOST_CACHED_BIT;\n  }\n  return 0;\n}\n\nstatic VmaAllocatorCreateFlags ngfvk_get_vma_alloc_flags(ngf_buffer_storage_type storage_type) {\n  switch (storage_type) {\n  case NGF_BUFFER_STORAGE_HOST_WRITEABLE:\n    return VMA_ALLOCATION_CREATE_MAPPED_BIT |\n           VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;\n  case NGF_BUFFER_STORAGE_HOST_READABLE:\n  case NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE:\n    return VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;\n  case NGF_BUFFER_STORAGE_DEVICE_LOCAL:\n    return 0;\n  case NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABLE:\n    return VMA_ALLOCATION_CREATE_MAPPED_BIT |\n           VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |\n           VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT;\n  case NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_READABLE_WRITEABLE:\n    return VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |\n           VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT;\n  }\n  return 0;\n}\n\nstatic VkIndexType get_vk_index_type(ngf_type t) {\n  switch (t) {\n  case NGF_TYPE_UINT16:\n    return VK_INDEX_TYPE_UINT16;\n  case NGF_TYPE_UINT32:\n    return VK_INDEX_TYPE_UINT32;\n  default:\n    return VK_INDEX_TYPE_MAX_ENUM;\n  }\n}\n\nstatic bool ngfvk_format_is_depth(VkFormat image_format) {\n  return image_format == VK_FORMAT_D16_UNORM || image_format == VK_FORMAT_D16_UNORM_S8_UINT ||\n         image_format == VK_FORMAT_D24_UNORM_S8_UINT || image_format == VK_FORMAT_D32_SFLOAT ||\n         image_format == VK_FORMAT_D32_SFLOAT_S8_UINT;\n}\n\nstatic bool ngfvk_format_is_stencil(VkFormat image_format) {\n  return image_format == VK_FORMAT_D24_UNORM_S8_UINT ||\n         image_format == VK_FORMAT_D16_UNORM_S8_UINT ||\n         image_format == VK_FORMAT_D32_SFLOAT_S8_UINT;\n}\n\nstatic VkColorSpaceKHR get_vk_color_space(ngf_colorspace colorspace) {\n  static VkColorSpaceKHR color_spaces[NGF_COLORSPACE_COUNT] = {\n      VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,\n      VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT,\n      VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT,\n      VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT,\n      VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT,\n      VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT,\n      VK_COLOR_SPACE_BT2020_LINEAR_EXT,\n      VK_COLOR_SPACE_HDR10_ST2084_EXT};\n  return color_spaces[colorspace];\n}\n\n#pragma endregion  // vk_enum_maps\n\n#pragma region internal_funcs\n\nngf_sample_count ngfi_get_highest_sample_count(size_t counts_bitmap);\n\nngfi::arena& current_frame_res_arena() {\n  return CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena;\n}\n\n// Handler for messages from validation layers, etc.\n// All messages are forwarded to the user-provided debug callback.\nstatic VKAPI_ATTR VkBool32 VKAPI_CALL ngfvk_debug_message_callback(\n    VkDebugUtilsMessageSeverityFlagBitsEXT severity,\n    VkDebugUtilsMessageTypeFlagsEXT,\n    const VkDebugUtilsMessengerCallbackDataEXT* data,\n    void*) {\n  ngf_diagnostic_message_type ngf_msg_type;\n  switch (severity) {\n  case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT:\n  case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT:\n    ngf_msg_type = NGF_DIAGNOSTIC_INFO;\n    break;\n  case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT:\n    ngf_msg_type = NGF_DIAGNOSTIC_WARNING;\n    break;\n  case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT:\n  default:\n    ngf_msg_type = NGF_DIAGNOSTIC_ERROR;\n    break;\n  }\n  if (ngfi_diag_info.callback) {\n    ngfi_diag_info.callback(ngf_msg_type, ngfi_diag_info.userdata, data->pMessage);\n  }\n  return VK_FALSE;\n}\n\nstatic bool\nngfvk_query_presentation_support(VkPhysicalDevice phys_dev, uint32_t queue_family_index) {\n#if defined(_WIN32) || defined(_WIN64)\n  return vkGetPhysicalDeviceWin32PresentationSupportKHR(phys_dev, queue_family_index);\n#elif defined(__ANDROID__)\n  return true;  // All Android queues surfaces support present.\n#elif defined(__APPLE__)\n  return true;\n#else\n\n  if (_vk.xcb_connection == NULL) {\n    int                screen_idx = 0;\n    xcb_screen_t*      screen     = NULL;\n    xcb_connection_t*  connection = xcb_connect(NULL, &screen_idx);\n    const xcb_setup_t* setup      = xcb_get_setup(connection);\n    for (xcb_screen_iterator_t it = xcb_setup_roots_iterator(setup); screen_idx >= 0 && it.rem;\n         xcb_screen_next(&it)) {\n      if (screen_idx-- == 0) { screen = it.data; }\n    }\n    assert(screen);\n    _vk.xcb_connection = connection;\n    _vk.xcb_visualid   = screen->root_visual;\n  }\n  return vkGetPhysicalDeviceXcbPresentationSupportKHR(\n      phys_dev,\n      queue_family_index,\n      _vk.xcb_connection,\n      _vk.xcb_visualid);\n#endif\n}\n\nstatic void ngfvk_reset_renderpass_cache(ngf_context ctx) {\n  for (size_t p = 0; p < ctx->renderpass_cache.size(); ++p) {\n    ctx->frame_res[ctx->frame_id].retire.append(ctx->renderpass_cache[p].renderpass);\n  }\n  ctx->renderpass_cache.clear();\n}\n\n// Forward declaration for use in ngfvk_retire_resources\nstatic void ngfvk_reset_desc_pools_list(ngfvk_desc_pools_list* superpool);\n\nstatic void ngfvk_retire_resources(ngfvk_frame_resources* frame_res) {\n  if (frame_res->nwait_fences > 0u) {\n    VkResult wait_status = VK_SUCCESS;\n    do {\n      wait_status = vkWaitForFences(\n          _vk.device,\n          frame_res->nwait_fences,\n          frame_res->fences,\n          VK_TRUE,\n          0x3B9ACA00ul);\n    } while (wait_status == VK_TIMEOUT);\n    vkResetFences(_vk.device, frame_res->nwait_fences, frame_res->fences);\n    frame_res->nwait_fences = 0;\n  }\n\n  // Destroy retired pipelines\n  for (VkPipeline p : frame_res->retire.list<VkPipeline>()) {\n    vkDestroyPipeline(_vk.device, p, NULL);\n  }\n  frame_res->retire.clear<VkPipeline>();\n\n  // Destroy retired pipeline layouts\n  for (VkPipelineLayout l : frame_res->retire.list<VkPipelineLayout>()) {\n    vkDestroyPipelineLayout(_vk.device, l, NULL);\n  }\n  frame_res->retire.clear<VkPipelineLayout>();\n\n  // Destroy retired descriptor set layouts\n  for (VkDescriptorSetLayout l : frame_res->retire.list<VkDescriptorSetLayout>()) {\n    vkDestroyDescriptorSetLayout(_vk.device, l, NULL);\n  }\n  frame_res->retire.clear<VkDescriptorSetLayout>();\n\n  // Free retired command buffers\n  for (const ngfvk_cmd_buf_with_pool& cb : frame_res->retire.list<ngfvk_cmd_buf_with_pool>()) {\n    vkFreeCommandBuffers(_vk.device, cb.cmd_pool, 1u, &cb.cmd_buf);\n  }\n\n  // Reset command pools\n  for (const ngfvk_cmd_buf_with_pool& cb : frame_res->retire.list<ngfvk_cmd_buf_with_pool>()) {\n    vkResetCommandPool(_vk.device, cb.cmd_pool, 0);\n  }\n  frame_res->retire.clear<ngfvk_cmd_buf_with_pool>();\n\n  // Destroy retired framebuffers\n  for (VkFramebuffer fb : frame_res->retire.list<VkFramebuffer>()) {\n    vkDestroyFramebuffer(_vk.device, fb, NULL);\n  }\n  frame_res->retire.clear<VkFramebuffer>();\n\n  // Destroy retired render passes\n  for (VkRenderPass rp : frame_res->retire.list<VkRenderPass>()) {\n    vkDestroyRenderPass(_vk.device, rp, NULL);\n  }\n  frame_res->retire.clear<VkRenderPass>();\n\n  // Destroy retired samplers\n  for (ngf_sampler s : frame_res->retire.list<ngf_sampler>()) { NGFI_FREE(s); }\n  frame_res->retire.clear<ngf_sampler>();\n\n  // Destroy retired image views\n  for (VkImageView v : frame_res->retire.list<VkImageView>()) {\n    vkDestroyImageView(_vk.device, v, nullptr);\n  }\n  frame_res->retire.list<VkImageView>().clear();\n  for (ngf_image_view v : frame_res->retire.list<ngf_image_view>()) { NGFI_FREE(v); }\n  frame_res->retire.list<ngf_image_view>().clear();\n\n  // Destroy retired buffer views\n  for (ngf_texel_buffer_view v : frame_res->retire.list<ngf_texel_buffer_view>()) { NGFI_FREE(v); }\n  frame_res->retire.clear<ngf_texel_buffer_view>();\n\n  // Destroy retired images\n  for (ngf_image img : frame_res->retire.list<ngf_image>()) { NGFI_FREE(img); }\n  frame_res->retire.clear<ngf_image>();\n\n  // Destroy retired buffers\n  for (ngf_buffer buf : frame_res->retire.list<ngf_buffer>()) { NGFI_FREE(buf); }\n  frame_res->retire.clear<ngf_buffer>();\n\n  // Reset retired descriptor pool lists\n  for (ngfvk_desc_pools_list* dpl : frame_res->retire.list<ngfvk_desc_pools_list*>()) {\n    ngfvk_reset_desc_pools_list(dpl);\n  }\n  frame_res->retire.clear<ngfvk_desc_pools_list*>();\n}\n\nstatic ngf_error\nngfvk_create_desc_superpool(ngfvk_desc_superpool* superpool, uint8_t pools_lists, uint16_t ctx_id) {\n  superpool->ctx_id      = ctx_id;\n  superpool->pools_lists = ngfi::fixed_array<ngfvk_desc_pools_list> {pools_lists};\n  memset(superpool->pools_lists.data(), 0, pools_lists * sizeof(ngfvk_desc_pools_list));\n  return NGF_ERROR_OK;\n}\n\nstatic void ngfvk_destroy_desc_superpool(ngfvk_desc_superpool* superpool) {\n  for (auto& pool_list : superpool->pools_lists) {\n    ngfvk_desc_pool* p = pool_list.list;\n    while (p) {\n      vkDestroyDescriptorPool(_vk.device, p->vk_pool, NULL);\n      ngfvk_desc_pool* next = p->next;\n      NGFI_FREE(p);\n      p = next;\n    }\n  }\n  superpool->pools_lists = ngfi::fixed_array<ngfvk_desc_pools_list> {};\n}\n\nstatic ngfvk_desc_pools_list* ngfvk_find_desc_pools_list(ngf_frame_token token) {\n  const uint16_t ctx_id   = ngfi_frame_ctx_id(token);\n  const uint8_t  nframes  = ngfi_frame_max_inflight_frames(token);\n  const uint8_t  frame_id = ngfi_frame_id(token);\n\n  ngfvk_desc_superpool* superpool = NULL;\n  for (size_t i = 0; i < CURRENT_CONTEXT->desc_superpools.size(); ++i) {\n    if (CURRENT_CONTEXT->desc_superpools[i].ctx_id == ctx_id) {\n      superpool = &CURRENT_CONTEXT->desc_superpools[i];\n      break;\n    }\n  }\n\n  if (superpool == NULL) {\n    ngfvk_desc_superpool new_superpool = {\n        .ctx_id      = (uint16_t)~0,\n        .pools_lists = ngfi::fixed_array<ngfvk_desc_pools_list> {}};\n    CURRENT_CONTEXT->desc_superpools.emplace_back(ngfi::move(new_superpool));\n    superpool = &CURRENT_CONTEXT->desc_superpools.back();\n    ngfvk_create_desc_superpool(superpool, nframes, ctx_id);\n  }\n\n  return &superpool->pools_lists[frame_id];\n}\n\nstatic VkDescriptorSet ngfvk_desc_pools_list_allocate_set(\n    ngfvk_desc_pools_list*       pools,\n    const ngfvk_desc_set_layout* set_layout) {\n  // Ensure we have an active desriptor pool that is able to service the\n  // request.\n  const bool have_active_pool    = (pools->active_pool != NULL);\n  bool       fresh_pool_required = !have_active_pool;\n\n  if (have_active_pool) {\n    // Check if the active descriptor pool can fit the required descriptor\n    // set.\n    ngfvk_desc_pool*                pool     = pools->active_pool;\n    const ngfvk_desc_pool_capacity* capacity = &pool->capacity;\n    ngfvk_desc_pool_capacity*       usage    = &pool->utilization;\n    for (unsigned i = 0; !fresh_pool_required && i < NGF_DESCRIPTOR_TYPE_COUNT; ++i) {\n      fresh_pool_required |=\n          (usage->descriptors[i] + set_layout->counts[i] >= capacity->descriptors[i]);\n    }\n    fresh_pool_required |= (usage->sets + 1u >= capacity->sets);\n  }\n  if (fresh_pool_required) {\n    if (!have_active_pool || pools->active_pool->next == NULL) {\n      // TODO: make this tweakable\n      ngfvk_desc_pool_capacity capacity;\n      capacity.sets = 100u;\n      for (int i = 0; i < NGF_DESCRIPTOR_TYPE_COUNT; ++i) capacity.descriptors[i] = 100u;\n\n      // Prepare descriptor counts.\n      auto vk_pool_sizes = ngfi::tmp_alloc<VkDescriptorPoolSize>(NGF_DESCRIPTOR_TYPE_COUNT);\n      for (unsigned i = 0; i < NGF_DESCRIPTOR_TYPE_COUNT; ++i) {\n        vk_pool_sizes[i].descriptorCount = capacity.descriptors[i];\n        vk_pool_sizes[i].type            = get_vk_descriptor_type((ngf_descriptor_type)i);\n      }\n\n      // Prepare a createinfo structure for the new pool.\n      const VkDescriptorPoolCreateInfo vk_pool_ci = {\n          .sType         = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,\n          .pNext         = NULL,\n          .flags         = 0u,\n          .maxSets       = capacity.sets,\n          .poolSizeCount = NGF_DESCRIPTOR_TYPE_COUNT,\n          .pPoolSizes    = vk_pool_sizes};\n\n      // Create the new pool.\n      ngfvk_desc_pool* new_pool = NGFI_ALLOC(ngfvk_desc_pool);\n      new_pool->next            = NULL;\n      new_pool->capacity        = capacity;\n      memset(&new_pool->utilization, 0, sizeof(new_pool->utilization));\n      const VkResult vk_pool_create_result =\n          vkCreateDescriptorPool(_vk.device, &vk_pool_ci, NULL, &new_pool->vk_pool);\n      if (vk_pool_create_result == VK_SUCCESS) {\n        if (pools->active_pool != NULL && pools->active_pool->next == NULL) {\n          pools->active_pool->next = new_pool;\n        } else if (pools->active_pool == NULL) {\n          pools->list = new_pool;\n        } else {  // shouldn't happen\n          assert(false);\n        }\n        pools->active_pool = new_pool;\n      } else {\n        NGFI_FREE(new_pool);\n        assert(false);\n      }\n    } else {\n      pools->active_pool = pools->active_pool->next;\n    }\n  }\n\n  // Allocate the new descriptor set from the pool.\n  ngfvk_desc_pool* pool = pools->active_pool;\n\n  const VkDescriptorSetAllocateInfo vk_desc_set_info = {\n      .sType              = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,\n      .pNext              = NULL,\n      .descriptorPool     = pool->vk_pool,\n      .descriptorSetCount = 1u,\n      .pSetLayouts        = &set_layout->vk_handle};\n  VkDescriptorSet result = VK_NULL_HANDLE;\n  const VkResult  desc_set_alloc_result =\n      vkAllocateDescriptorSets(_vk.device, &vk_desc_set_info, &result);\n  if (desc_set_alloc_result != VK_SUCCESS) { return VK_NULL_HANDLE; }\n\n  // Update usage counters for the active descriptor pool.\n  for (unsigned i = 0; i < NGF_DESCRIPTOR_TYPE_COUNT; ++i) {\n    pool->utilization.descriptors[i] += set_layout->counts[i];\n  }\n  pool->utilization.sets++;\n\n  // Bind dummy resources.\n  auto     dummy_writes = ngfi::tmp_alloc<VkWriteDescriptorSet>(set_layout->nall_descs);\n  uint32_t num_writes   = 0u;\n  for (uint32_t b = 0u; b < set_layout->binding_properties.size(); ++b) {\n    if (set_layout->binding_properties[b].type == VK_DESCRIPTOR_TYPE_MAX_ENUM) continue;\n    for (uint32_t array_idx = 0u; array_idx < set_layout->binding_properties[b].ndescs_in_binding;\n         ++array_idx) {\n      VkWriteDescriptorSet* desc_w = &dummy_writes[num_writes++];\n      desc_w->sType                = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;\n      desc_w->pNext                = NULL;\n      desc_w->descriptorCount      = 1u;\n      desc_w->descriptorType       = set_layout->binding_properties[b].type;\n      desc_w->dstArrayElement      = array_idx;\n      desc_w->dstBinding           = b;\n      desc_w->dstSet               = result;\n\n      const bool is_multilayered_image = set_layout->binding_properties[b].is_multilayered_image;\n      const bool is_cubemap            = set_layout->binding_properties[b].is_cubemap;\n\n      switch (desc_w->descriptorType) {\n      case VK_DESCRIPTOR_TYPE_SAMPLER:\n        desc_w->pImageInfo = &_vk.dummy_res.samp_info;\n        break;\n      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:\n        desc_w->pImageInfo =\n            is_multilayered_image ? &_vk.dummy_res.imgsamp_arr_info : &_vk.dummy_res.imgsamp_info;\n        break;\n      case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:\n      case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:\n        if (!is_cubemap) {\n          desc_w->pImageInfo =\n              is_multilayered_image ? &_vk.dummy_res.img_arr_info : &_vk.dummy_res.img_info;\n        } else {\n          desc_w->pImageInfo =\n              is_multilayered_image ? &_vk.dummy_res.cube_arr_info : &_vk.dummy_res.cube_info;\n        }\n        break;\n      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:\n      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:\n        desc_w->pBufferInfo = &_vk.dummy_res.buf_info;\n        break;\n      case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:\n        desc_w->pTexelBufferView = &_vk.dummy_res.tbuf->vk_buf_view;\n        break;\n      case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: {\n        auto dummy_accel_info   = ngfi::tmp_alloc<VkWriteDescriptorSetAccelerationStructureKHR>();\n        dummy_accel_info->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR;\n        dummy_accel_info->pNext = NULL;\n        dummy_accel_info->accelerationStructureCount = 1;\n        dummy_accel_info->pAccelerationStructures    = &_vk.dummy_res.dummy_accel_struct;\n        desc_w->pNext                                = dummy_accel_info;\n        break;\n      }\n      default:\n        assert(false);\n      }\n    }\n  }\n  vkUpdateDescriptorSets(_vk.device, num_writes, dummy_writes, 0, NULL);\n\n  return result;\n}\nstatic ngf_error ngfvk_create_vk_image_view(\n    VkImage         image,\n    VkImageViewType image_type,\n    VkFormat        image_format,\n    uint32_t        nmips,\n    uint32_t        nlayers,\n    VkImageView*    result) {\n  const bool is_depth    = ngfvk_format_is_depth(image_format);\n  const bool is_stencil  = ngfvk_format_is_stencil(image_format);\n  const auto stencil_bit = is_stencil ? VK_IMAGE_ASPECT_STENCIL_BIT : ((VkImageAspectFlagBits)0);\n  const auto aspect_mask = (VkImageAspectFlags)(is_depth ? (VK_IMAGE_ASPECT_DEPTH_BIT | stencil_bit)\n                                                         : VK_IMAGE_ASPECT_COLOR_BIT);\n\n  const VkImageViewCreateInfo image_view_info = {\n      .sType    = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,\n      .pNext    = NULL,\n      .flags    = 0u,\n      .image    = image,\n      .viewType = image_type,\n      .format   = image_format,\n      .components =\n          {.r = VK_COMPONENT_SWIZZLE_IDENTITY,\n           .g = VK_COMPONENT_SWIZZLE_IDENTITY,\n           .b = VK_COMPONENT_SWIZZLE_IDENTITY,\n           .a = VK_COMPONENT_SWIZZLE_IDENTITY},\n      .subresourceRange = {\n          .aspectMask     = aspect_mask,\n          .baseMipLevel   = 0u,\n          .levelCount     = nmips,\n          .baseArrayLayer = 0u,\n          .layerCount     = nlayers}};\n  const VkResult create_view_vkerr = vkCreateImageView(_vk.device, &image_view_info, NULL, result);\n  if (create_view_vkerr != VK_SUCCESS) {\n    return NGF_ERROR_INVALID_OPERATION;\n  } else {\n    return NGF_ERROR_OK;\n  }\n}\n\nstatic VkResult ngfvk_renderpass_from_attachment_descs(\n    uint32_t                          nattachments,\n    const ngf_attachment_description* attachment_descs,\n    const ngfvk_attachment_pass_desc* attachment_compat_pass_descs,\n    VkRenderPass*                     result) {\n  auto     vk_attachment_descs        = ngfi::tmp_alloc<VkAttachmentDescription>(nattachments);\n  auto     vk_color_attachment_refs   = ngfi::tmp_alloc<VkAttachmentReference>(nattachments);\n  auto     vk_resolve_attachment_refs = ngfi::tmp_alloc<VkAttachmentReference>(nattachments);\n  uint32_t ncolor_attachments         = 0u;\n  uint32_t nresolve_attachments       = 0u;\n  VkAttachmentReference depth_stencil_attachment_ref;\n  bool                  have_depth_stencil_attachment = false;\n\n  for (uint32_t a = 0u; a < nattachments; ++a) {\n    const ngf_attachment_description* ngf_attachment_desc  = &attachment_descs[a];\n    const ngfvk_attachment_pass_desc* attachment_pass_desc = &attachment_compat_pass_descs[a];\n    const bool has_stencil = ngf_attachment_desc->type == NGF_ATTACHMENT_DEPTH_STENCIL;\n    VkAttachmentDescription*          vk_attachment_desc   = &vk_attachment_descs[a];\n\n    vk_attachment_desc->flags   = 0u;\n    vk_attachment_desc->format  = get_vk_image_format(ngf_attachment_desc->format);\n    vk_attachment_desc->samples = get_vk_sample_count(ngf_attachment_desc->sample_count);\n    vk_attachment_desc->loadOp  = attachment_pass_desc->load_op;\n    vk_attachment_desc->storeOp = attachment_pass_desc->store_op;\n    vk_attachment_desc->stencilLoadOp = has_stencil ? attachment_pass_desc->load_op : VK_ATTACHMENT_LOAD_OP_DONT_CARE;\n    vk_attachment_desc->stencilStoreOp = has_stencil ? attachment_pass_desc->store_op : VK_ATTACHMENT_STORE_OP_DONT_CARE;\n    vk_attachment_desc->initialLayout = attachment_pass_desc->layout;\n    vk_attachment_desc->finalLayout   = attachment_pass_desc->layout;\n\n    if (ngf_attachment_desc->type == NGF_ATTACHMENT_COLOR) {\n      if (!attachment_pass_desc->is_resolve) {\n        VkAttachmentReference* vk_color_attachment_reference =\n            &vk_color_attachment_refs[ncolor_attachments++];\n        vk_color_attachment_reference->attachment = a;\n        vk_color_attachment_reference->layout     = attachment_pass_desc->layout;\n      } else {\n        VkAttachmentReference* vk_resolve_attachment_reference =\n            &vk_resolve_attachment_refs[nresolve_attachments++];\n        vk_resolve_attachment_reference->attachment = a;\n        vk_resolve_attachment_reference->layout     = attachment_pass_desc->layout;\n      }\n    }\n    if (ngf_attachment_desc->type == NGF_ATTACHMENT_DEPTH ||\n        ngf_attachment_desc->type == NGF_ATTACHMENT_DEPTH_STENCIL) {\n      if (have_depth_stencil_attachment) {\n        // TODO: insert diag. log here\n        return VK_ERROR_UNKNOWN;\n      } else {\n        have_depth_stencil_attachment           = true;\n        depth_stencil_attachment_ref.attachment = a;\n        depth_stencil_attachment_ref.layout     = attachment_pass_desc->layout;\n      }\n    }\n  }\n  if (nresolve_attachments > 0u && nresolve_attachments != ncolor_attachments) {\n    // TODO: insert diag. log here.\n    return VK_ERROR_UNKNOWN;\n  }\n\n  const VkSubpassDescription subpass_desc = {\n      .flags                = 0u,\n      .pipelineBindPoint    = VK_PIPELINE_BIND_POINT_GRAPHICS,\n      .inputAttachmentCount = 0u,\n      .pInputAttachments    = NULL,\n      .colorAttachmentCount = ncolor_attachments,\n      .pColorAttachments    = vk_color_attachment_refs,\n      .pResolveAttachments  = nresolve_attachments > 0u ? vk_resolve_attachment_refs : NULL,\n      .pDepthStencilAttachment =\n          have_depth_stencil_attachment ? &depth_stencil_attachment_ref : NULL,\n      .preserveAttachmentCount = 0u,\n      .pPreserveAttachments    = NULL};\n\n  const VkRenderPassCreateInfo renderpass_ci = {\n      .sType           = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,\n      .pNext           = NULL,\n      .flags           = 0u,\n      .attachmentCount = nattachments,\n      .pAttachments    = vk_attachment_descs,\n      .subpassCount    = 1u,\n      .pSubpasses      = &subpass_desc,\n      .dependencyCount = 0u,\n      .pDependencies   = NULL};\n\n  return vkCreateRenderPass(_vk.device, &renderpass_ci, NULL, result);\n}\nstatic inline uint64_t ngfvk_ptr_hash(void* data) {\n  uint64_t mmh3_out[2] = {0, 0};\n  ngfi::detail::mmh3_x64_128(reinterpret_cast<uintptr_t>(data), 0x9e3779b9, mmh3_out);\n  return mmh3_out[0] ^ mmh3_out[1];\n}\n\nngfi::maybe_ngfptr<ngf_context_t> ngf_context_t::make(const ngf_context_info& info) {\n  auto ctx = ngfi::unique_ptr<ngf_context_t>::make();\n  if (!ctx) { return NGF_ERROR_OUT_OF_MEM; }\n\n  ngf_error                 err            = NGF_ERROR_OK;\n  VkResult                  vk_err         = VK_SUCCESS;\n  const ngf_swapchain_info* swapchain_info = info.swapchain_info;\n\n  // Create swapchain if necessary.\n  if (swapchain_info != NULL) {\n    // Begin by creating the window surface.\n#if defined(_WIN32) || defined(_WIN64)\n    const VkWin32SurfaceCreateInfoKHR surface_info = {\n        .sType     = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR,\n        .pNext     = NULL,\n        .flags     = 0,\n        .hinstance = GetModuleHandle(NULL),\n        .hwnd      = (HWND)swapchain_info->native_handle};\n#elif defined(__ANDROID__)\n    const VkAndroidSuraceCreateInfoKHR surface_info = {\n        .sType  = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR,\n        .pNext  = NULL,\n        .flags  = 0,\n        .window = swapchain_info->native_handle};\n#elif defined(__APPLE__)\n    const VkMetalSurfaceCreateInfoEXT surface_info = {\n        .sType  = VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT,\n        .pNext  = NULL,\n        .flags  = 0,\n        .pLayer = (const CAMetalLayer*)ngfvk_create_ca_metal_layer(swapchain_info)};\n#else\n    const VkXcbSurfaceCreateInfoKHR surface_info = {\n        .sType      = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR,\n        .pNext      = NULL,\n        .flags      = 0,\n        .connection = _vk.xcb_connection,\n        .window     = (xcb_window_t)swapchain_info->native_handle};\n#endif\n    vk_err = VK_CREATE_SURFACE_FN(_vk.instance, &surface_info, NULL, &ctx->surface);\n    if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n    VkBool32 surface_supported = false;\n    vkGetPhysicalDeviceSurfaceSupportKHR(\n        _vk.phys_dev,\n        _vk.present_family_idx,\n        ctx->surface,\n        &surface_supported);\n    if (!surface_supported) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n\n    // Create the default rendertarget object.\n    const bool default_rt_has_depth = swapchain_info->depth_format != NGF_IMAGE_FORMAT_UNDEFINED;\n    const bool default_rt_is_multisampled = (unsigned int)swapchain_info->sample_count > 1u;\n    const bool default_rt_no_stencil = swapchain_info->depth_format == NGF_IMAGE_FORMAT_DEPTH32 ||\n                                       swapchain_info->depth_format == NGF_IMAGE_FORMAT_DEPTH16;\n\n    const uint32_t nattachment_descs =\n        1u + (default_rt_has_depth ? 1u : 0u) + (default_rt_is_multisampled ? 1u : 0u);\n\n    ctx->default_render_target = ngfi::move(\n        ngf_render_target_t::make(swapchain_info->width, swapchain_info->height, nattachment_descs)\n            .value());\n\n    uint32_t                    attachment_desc_idx = 0u;\n    ngf_attachment_description* color_attachment_desc =\n        &ctx->default_render_target->attachment_descs[attachment_desc_idx];\n    color_attachment_desc->format       = swapchain_info->color_format;\n    color_attachment_desc->sample_count = swapchain_info->sample_count;\n    color_attachment_desc->type         = NGF_ATTACHMENT_COLOR;\n    color_attachment_desc->is_resolve   = false;\n\n    ngfvk_attachment_pass_desc* color_attachment_pass_desc =\n        &ctx->default_render_target->attachment_compat_pass_descs[attachment_desc_idx];\n    color_attachment_pass_desc->layout     = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;\n    color_attachment_pass_desc->is_resolve = false;\n    color_attachment_pass_desc->load_op    = VK_ATTACHMENT_LOAD_OP_CLEAR;\n    color_attachment_pass_desc->store_op   = VK_ATTACHMENT_STORE_OP_DONT_CARE;\n\n    if (default_rt_has_depth) {\n      ++attachment_desc_idx;\n\n      ngf_attachment_description* depth_attachment_desc =\n          &ctx->default_render_target->attachment_descs[attachment_desc_idx];\n      depth_attachment_desc->format       = swapchain_info->depth_format;\n      depth_attachment_desc->sample_count = swapchain_info->sample_count;\n      depth_attachment_desc->type =\n          default_rt_no_stencil ? NGF_ATTACHMENT_DEPTH : NGF_ATTACHMENT_DEPTH_STENCIL;\n      depth_attachment_desc->is_resolve = false;\n\n      ngfvk_attachment_pass_desc* depth_attachment_pass_desc =\n          &ctx->default_render_target->attachment_compat_pass_descs[attachment_desc_idx];\n      depth_attachment_pass_desc->layout     = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;\n      depth_attachment_pass_desc->is_resolve = false;\n      depth_attachment_pass_desc->load_op    = VK_ATTACHMENT_LOAD_OP_CLEAR;\n      depth_attachment_pass_desc->store_op   = VK_ATTACHMENT_STORE_OP_DONT_CARE;\n    }\n\n    if (default_rt_is_multisampled) {\n      ++attachment_desc_idx;\n\n      ngf_attachment_description* resolve_attachment_desc =\n          &ctx->default_render_target->attachment_descs[attachment_desc_idx];\n      resolve_attachment_desc->format       = swapchain_info->color_format;\n      resolve_attachment_desc->sample_count = NGF_SAMPLE_COUNT_1;\n      resolve_attachment_desc->type         = NGF_ATTACHMENT_COLOR;\n      resolve_attachment_desc->is_resolve   = true;\n\n      ngfvk_attachment_pass_desc* resolve_attachment_pass_desc =\n          &ctx->default_render_target->attachment_compat_pass_descs[attachment_desc_idx];\n      resolve_attachment_pass_desc->layout     = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;\n      resolve_attachment_pass_desc->is_resolve = true;\n      resolve_attachment_pass_desc->load_op    = VK_ATTACHMENT_LOAD_OP_CLEAR;\n      resolve_attachment_pass_desc->store_op   = VK_ATTACHMENT_STORE_OP_DONT_CARE;\n\n      ctx->default_render_target->have_resolve_attachments = true;\n    }\n\n    ngfvk_renderpass_from_attachment_descs(\n        nattachment_descs,\n        ctx->default_render_target->attachment_descs.data(),\n        ctx->default_render_target->attachment_compat_pass_descs.data(),\n        &ctx->default_render_target->compat_render_pass);\n\n    // Create the swapchain itself.\n    auto maybe_swapchain =\n        ngfvk_swapchain::make(*swapchain_info, ctx->default_render_target.get(), ctx->surface);\n    if (maybe_swapchain.has_error()) return maybe_swapchain.error();\n    ctx->swapchain = ngfi::move(maybe_swapchain.value());\n    if (err != NGF_ERROR_OK) { return err; }\n    ctx->swapchain_info = *swapchain_info;\n  } else {\n    ctx->default_render_target = NULL;\n  }\n\n  // Create frame resource holders.\n  const uint32_t max_inflight_frames = swapchain_info ? ctx->swapchain->nimgs : 3u;\n  ctx->max_inflight_frames           = max_inflight_frames;\n  ctx->frame_res = ngfi::fixed_array<ngfvk_frame_resources> {max_inflight_frames};\n  if (ctx->frame_res.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; }\n  for (uint32_t f = 0u; f < max_inflight_frames; ++f) {\n    ctx->frame_res[f].res_frame_arena.set_block_size(1024);\n    ctx->frame_res[f].submitted_cmd_bufs.reserve(8u);\n    const VkFenceCreateInfo fence_info = {\n        .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,\n        .pNext = NULL,\n        .flags = 0u};\n    ctx->frame_res[f].nwait_fences = 0;\n    for (uint32_t i = 0u; i < sizeof(ctx->frame_res[f].fences) / sizeof(VkFence); ++i) {\n      vk_err = vkCreateFence(_vk.device, &fence_info, NULL, &ctx->frame_res[f].fences[i]);\n      if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n    }\n  }\n\n  ctx->frame_id            = 0u;\n  ctx->current_frame_token = ~0u;\n\n  ctx->command_superpools.reserve(3);\n  ctx->desc_superpools.reserve(3);\n  ctx->renderpass_cache.reserve(8);\n\n  {\n    const VkPipelineLayoutCreateInfo default_push_layout_info = {\n        .sType                  = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,\n        .pNext                  = NULL,\n        .flags                  = 0u,\n        .setLayoutCount         = 0u,\n        .pSetLayouts            = NULL,\n        .pushConstantRangeCount = 1u,\n        .pPushConstantRanges    = &ngfvk::global::default_push_constant_range};\n    vk_err = vkCreatePipelineLayout(\n        _vk.device, &default_push_layout_info, NULL, &ctx->vk_default_push_layout);\n    if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n  }\n\n  return ngfi::move(ctx);\n}\n\nngf_context_t::~ngf_context_t() noexcept {\n  vkDeviceWaitIdle(_vk.device);\n\n  if (vk_default_push_layout != VK_NULL_HANDLE) {\n    vkDestroyPipelineLayout(_vk.device, vk_default_push_layout, NULL);\n  }\n\n  if (default_render_target) {\n    swapchain =\n        ngfi::unique_ptr<ngfvk_swapchain> {};  // swapchain must be destroyed before surface.\n    if (surface != VK_NULL_HANDLE) { vkDestroySurfaceKHR(_vk.instance, surface, NULL); }\n  }\n\n  default_render_target =\n      ngfi::unique_ptr<ngf_render_target_t> {};  // explicitly destroy default RT here.\n  for (ngfvk_frame_resources& fr : frame_res) {\n    ngfvk_retire_resources(&fr);\n    for (uint32_t i = 0u; i < sizeof(fr.fences) / sizeof(VkFence); ++i) {\n      vkDestroyFence(_vk.device, fr.fences[i], NULL);\n    }\n  }\n\n  for (size_t p = 0; p < desc_superpools.size(); ++p) {\n    ngfvk_destroy_desc_superpool(&desc_superpools[p]);\n  }\n\n  ngfvk_reset_renderpass_cache(this);\n\n  if (CURRENT_CONTEXT == this) CURRENT_CONTEXT = nullptr;\n}\n\nngfi::maybe_ngfptr<ngf_render_target_t>\nngf_render_target_t::make(const ngf_render_target_info& info) NGF_NOEXCEPT {\n  auto rt = ngfi::unique_ptr<ngf_render_target_t>::make();\n  if (!rt) return NGF_ERROR_OUT_OF_MEM;\n\n  uint32_t ncolor_attachments   = 0u;\n  uint32_t nresolve_attachments = 0u;\n  for (uint32_t a = 0u; a < info.attachment_descriptions->ndescs; ++a) {\n    if (info.attachment_descriptions->descs[a].type == NGF_ATTACHMENT_COLOR) {\n      if (info.attachment_descriptions->descs[a].is_resolve) {\n        ++nresolve_attachments;\n      } else {\n        ++ncolor_attachments;\n      }\n    }\n  }\n  if (nresolve_attachments > 0 && ncolor_attachments != nresolve_attachments) {\n    NGFI_DIAG_ERROR(\"the same number of resolve and color attachments must be provided\");\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n\n  ngfi::fixed_array<ngfvk_attachment_pass_desc> vk_attachment_pass_descs {\n      info.attachment_descriptions->ndescs};\n  ngfi::fixed_array<VkImageView> attachment_views {info.attachment_descriptions->ndescs};\n  ngfi::fixed_array<ngf_image>   attachment_images {info.attachment_descriptions->ndescs};\n\n  for (uint32_t a = 0u; a < info.attachment_descriptions->ndescs; ++a) {\n    const ngf_attachment_description* ngf_attachment_desc = &info.attachment_descriptions->descs[a];\n    ngfvk_attachment_pass_desc*       attachment_pass_desc = &vk_attachment_pass_descs[a];\n    const ngf_attachment_type         attachment_type      = ngf_attachment_desc->type;\n\n    rt->have_resolve_attachments |= ngf_attachment_desc->is_resolve;\n\n    switch (attachment_type) {\n    case NGF_ATTACHMENT_COLOR:\n      attachment_pass_desc->layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;\n      break;\n    case NGF_ATTACHMENT_DEPTH:\n    case NGF_ATTACHMENT_DEPTH_STENCIL:\n      attachment_pass_desc->layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;\n      break;\n    default:\n      assert(false);\n    }\n\n    const ngf_image_ref* attachment_img_ref = &info.attachment_image_refs[a];\n    const ngf_image      attachment_img     = attachment_img_ref->image;\n    attachment_pass_desc->is_resolve        = ngf_attachment_desc->is_resolve;\n\n    // These are needed just to create a compatible render pass, load/store ops don't affect\n    // render pass compatibility.\n    const ngf_attachment_load_op  load_op  = NGF_LOAD_OP_DONTCARE;\n    const ngf_attachment_store_op store_op = NGF_STORE_OP_DONTCARE;\n    attachment_pass_desc->load_op          = get_vk_load_op(load_op);\n    attachment_pass_desc->store_op         = get_vk_store_op(store_op);\n    const bool attachment_is_cubemap       = attachment_img_ref->image->type == NGF_IMAGE_TYPE_CUBE;\n\n    const VkImageAspectFlags subresource_aspect_flags =\n        (attachment_type == NGF_ATTACHMENT_COLOR ? VK_IMAGE_ASPECT_COLOR_BIT : 0u) |\n        (attachment_type == NGF_ATTACHMENT_DEPTH ? VK_IMAGE_ASPECT_DEPTH_BIT : 0u) |\n        (attachment_type == NGF_ATTACHMENT_DEPTH_STENCIL\n             ? VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT\n             : 0u);\n    const VkImageViewCreateInfo image_view_create_info = {\n        .sType    = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,\n        .pNext    = NULL,\n        .flags    = 0u,\n        .image    = (VkImage)attachment_img->alloc.obj_handle,\n        .viewType = VK_IMAGE_VIEW_TYPE_2D,\n        .format   = attachment_img->vk_fmt,\n        .components =\n            {\n                .r = VK_COMPONENT_SWIZZLE_IDENTITY,\n                .g = VK_COMPONENT_SWIZZLE_IDENTITY,\n                .b = VK_COMPONENT_SWIZZLE_IDENTITY,\n                .a = VK_COMPONENT_SWIZZLE_IDENTITY,\n            },\n        .subresourceRange = {\n            .aspectMask     = subresource_aspect_flags,\n            .baseMipLevel   = attachment_img_ref->mip_level,\n            .levelCount     = 1u,\n            .baseArrayLayer = attachment_is_cubemap ? 6u * attachment_img_ref->layer +\n                                                          attachment_img_ref->cubemap_face\n                                                    : attachment_img_ref->layer,\n            .layerCount     = 1u,\n        }};\n    VkResult vk_err =\n        vkCreateImageView(_vk.device, &image_view_create_info, NULL, &attachment_views[a]);\n    if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n    attachment_images[a] = attachment_img;\n  }\n  rt->attachment_image_views = ngfi::move(attachment_views);\n  rt->attachment_images      = ngfi::move(attachment_images);\n\n  const VkResult renderpass_create_result = ngfvk_renderpass_from_attachment_descs(\n      info.attachment_descriptions->ndescs,\n      info.attachment_descriptions->descs,\n      vk_attachment_pass_descs.data(),\n      &rt->compat_render_pass);\n  if (renderpass_create_result != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n\n  rt->width            = info.attachment_image_refs[0].image->extent.width;\n  rt->height           = info.attachment_image_refs[0].image->extent.height;\n  rt->nattachments     = info.attachment_descriptions->ndescs;\n  rt->attachment_descs = ngfi::fixed_array<ngf_attachment_description> {rt->nattachments};\n  rt->attachment_compat_pass_descs = ngfi::move(vk_attachment_pass_descs);\n  memcpy(\n      &rt->attachment_descs[0],\n      info.attachment_descriptions->descs,\n      sizeof(rt->attachment_descs[0]) * info.attachment_descriptions->ndescs);\n\n  // Create a framebuffer.\n  const VkFramebufferCreateInfo fb_info = {\n      .sType           = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,\n      .pNext           = NULL,\n      .flags           = 0u,\n      .renderPass      = rt->compat_render_pass,\n      .attachmentCount = info.attachment_descriptions->ndescs,\n      .pAttachments    = rt->attachment_image_views.data(),\n      .width           = rt->width,\n      .height          = rt->height,\n      .layers          = 1u};\n  VkResult vk_err = vkCreateFramebuffer(_vk.device, &fb_info, NULL, &rt->frame_buffer);\n  if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n\n  return rt;\n}\n\nngfi::maybe_ngfptr<ngf_render_target_t>\nngf_render_target_t::make(uint32_t width, uint32_t height, uint32_t nattachment_descs)\n    NGF_NOEXCEPT {\n  auto rt = ngfi::unique_ptr<ngf_render_target_t>::make();\n  if (!rt) return NGF_ERROR_OUT_OF_MEM;\n\n  rt->is_default       = true;\n  rt->width            = width;\n  rt->height           = height;\n  rt->frame_buffer     = VK_NULL_HANDLE;\n  rt->nattachments     = nattachment_descs;\n  rt->attachment_descs = ngfi::fixed_array<ngf_attachment_description> {nattachment_descs};\n  rt->attachment_compat_pass_descs =\n      ngfi::fixed_array<ngfvk_attachment_pass_desc> {nattachment_descs};\n\n  return rt;\n}\n\nngf_render_target_t::~ngf_render_target_t() NGF_NOEXCEPT {\n  if (CURRENT_CONTEXT) {\n    ngfvk_frame_resources* res = &CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id];\n    if (!is_default) {\n      if (frame_buffer != VK_NULL_HANDLE) { res->retire.append(frame_buffer); }\n    }\n    if (compat_render_pass != VK_NULL_HANDLE) { res->retire.append(compat_render_pass); }\n    for (VkImageView v : attachment_image_views) { res->retire.append(v); }\n    // clear out the entire renderpass cache to make sure the entries associated\n    // with this target don't stick around.\n    // TODO: clear out all caches across all contexts.\n    ngfvk_reset_renderpass_cache(CURRENT_CONTEXT);\n  }\n}\n\nngfi::maybe_ngfptr<ngf_texel_buffer_view_t>\nngf_texel_buffer_view_t::make(const ngf_texel_buffer_view_info& info) NGF_NOEXCEPT {\n  auto buf_view = ngfi::unique_ptr<ngf_texel_buffer_view_t>::make();\n  if (!buf_view) return NGF_ERROR_OUT_OF_MEM;\n  const VkBufferViewCreateInfo vk_buf_view_ci = {\n      .sType  = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,\n      .pNext  = NULL,\n      .flags  = 0u,\n      .buffer = (VkBuffer)info.buffer->alloc.obj_handle,\n      .format = get_vk_image_format(info.texel_format),\n      .offset = info.offset,\n      .range  = info.size};\n  const VkResult vk_result =\n      vkCreateBufferView(_vk.device, &vk_buf_view_ci, NULL, &buf_view->vk_buf_view);\n  if (vk_result != VK_SUCCESS) return NGF_ERROR_OBJECT_CREATION_FAILED;\n  buf_view->buffer = info.buffer;\n  return buf_view;\n}\n\nngf_texel_buffer_view_t::~ngf_texel_buffer_view_t() NGF_NOEXCEPT {\n  vkDestroyBufferView(_vk.device, vk_buf_view, nullptr);\n}\n\nngfi::maybe_ngfptr<ngfvk_generic_pipeline>\nngfvk_generic_pipeline::make(const ngf_graphics_pipeline_info& info) NGF_NOEXCEPT {\n  ngfi::tmp_arena().reset();\n  auto pipeline = ngfi::unique_ptr<ngfvk_generic_pipeline>::make();\n  if (!pipeline) return NGF_ERROR_OUT_OF_MEM;\n\n  VkPipelineShaderStageCreateInfo vk_shader_stages[5];\n  if (info.nshader_stages > 5) return NGF_ERROR_OBJECT_CREATION_FAILED;\n  ngf_error err = pipeline->common_init(\n      info.spec_info,\n      vk_shader_stages,\n      info.shader_stages,\n      info.nshader_stages);\n  if (err != NGF_ERROR_OK) return err;\n\n  // Prepare vertex input.\n  auto vk_binding_descs =\n      ngfi::tmp_alloc<VkVertexInputBindingDescription>(info.input_info->nvert_buf_bindings);\n  auto vk_attrib_descs =\n      ngfi::tmp_alloc<VkVertexInputAttributeDescription>(info.input_info->nattribs);\n\n  if ((vk_binding_descs == nullptr && info.input_info->nvert_buf_bindings > 0) ||\n      (vk_attrib_descs == nullptr && info.input_info->nattribs > 0)) {\n    return NGF_ERROR_OUT_OF_MEM;\n  }\n\n  for (uint32_t i = 0u; i < info.input_info->nvert_buf_bindings; ++i) {\n    VkVertexInputBindingDescription*   vk_binding_desc = &vk_binding_descs[i];\n    const ngf_vertex_buf_binding_desc* binding_desc    = &info.input_info->vert_buf_bindings[i];\n    vk_binding_desc->binding                           = binding_desc->binding;\n    vk_binding_desc->stride                            = binding_desc->stride;\n    vk_binding_desc->inputRate = get_vk_input_rate(binding_desc->input_rate);\n  }\n\n  for (uint32_t i = 0u; i < info.input_info->nattribs; ++i) {\n    VkVertexInputAttributeDescription* vk_attrib_desc = &vk_attrib_descs[i];\n    const ngf_vertex_attrib_desc*      attrib_desc    = &info.input_info->attribs[i];\n    vk_attrib_desc->location                          = attrib_desc->location;\n    vk_attrib_desc->binding                           = attrib_desc->binding;\n    vk_attrib_desc->offset                            = attrib_desc->offset;\n    vk_attrib_desc->format =\n        get_vk_vertex_format(attrib_desc->type, attrib_desc->size, attrib_desc->normalized);\n  }\n\n  VkPipelineVertexInputStateCreateInfo vertex_input = {\n      .sType                           = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,\n      .pNext                           = NULL,\n      .flags                           = 0u,\n      .vertexBindingDescriptionCount   = info.input_info->nvert_buf_bindings,\n      .pVertexBindingDescriptions      = vk_binding_descs,\n      .vertexAttributeDescriptionCount = info.input_info->nattribs,\n      .pVertexAttributeDescriptions    = vk_attrib_descs};\n\n  // Prepare input assembly.\n  VkPipelineInputAssemblyStateCreateInfo input_assembly = {\n      .sType                  = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,\n      .pNext                  = NULL,\n      .flags                  = 0u,\n      .topology               = get_vk_primitive_type(info.input_assembly_info->primitive_topology),\n      .primitiveRestartEnable = info.input_assembly_info->enable_primitive_restart};\n\n  // Prepare tessellation state.\n  VkPipelineTessellationStateCreateInfo tess = {\n      .sType              = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO,\n      .pNext              = NULL,\n      .flags              = 0u,\n      .patchControlPoints = 1u};\n\n  // Prepare viewport/scissor state.\n  const VkViewport dummy_viewport =\n      {.x = .0f, .y = .0f, .width = .0f, .height = .0f, .minDepth = .0f, .maxDepth = .0f};\n  const VkRect2D dummy_scissor = {.offset = {.x = 0, .y = 0}, .extent = {.width = 0, .height = 0}};\n  VkPipelineViewportStateCreateInfo viewport_state = {\n      .sType         = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,\n      .pNext         = NULL,\n      .flags         = 0u,\n      .viewportCount = 1u,\n      .pViewports    = &dummy_viewport,\n      .scissorCount  = 1u,\n      .pScissors     = &dummy_scissor};\n\n  // Prepare rasterization state.\n  VkPipelineRasterizationStateCreateInfo rasterization = {\n      .sType                   = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,\n      .pNext                   = NULL,\n      .flags                   = 0u,\n      .depthClampEnable        = VK_FALSE,\n      .rasterizerDiscardEnable = info.rasterization->discard,\n      .polygonMode             = get_vk_polygon_mode(info.rasterization->polygon_mode),\n      .cullMode                = get_vk_cull_mode(info.rasterization->cull_mode),\n      .frontFace               = get_vk_front_face(info.rasterization->front_face),\n      .depthBiasEnable         = info.rasterization->enable_depth_bias ? VK_TRUE : VK_FALSE,\n      .depthBiasConstantFactor = 0.0f,\n      .depthBiasClamp          = 0.0f,\n      .depthBiasSlopeFactor    = 0.0f,\n      .lineWidth               = 1.0f};\n\n  // Prepare multisampling.\n  VkPipelineMultisampleStateCreateInfo multisampling = {\n      .sType                 = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,\n      .pNext                 = NULL,\n      .flags                 = 0u,\n      .rasterizationSamples  = get_vk_sample_count(info.multisample->sample_count),\n      .sampleShadingEnable   = VK_FALSE,\n      .minSampleShading      = 0.0f,\n      .pSampleMask           = NULL,\n      .alphaToCoverageEnable = info.multisample->alpha_to_coverage ? VK_TRUE : VK_FALSE,\n      .alphaToOneEnable      = VK_FALSE};\n\n  // Prepare depth/stencil.\n  VkPipelineDepthStencilStateCreateInfo depth_stencil = {\n      .sType                 = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,\n      .pNext                 = NULL,\n      .flags                 = 0u,\n      .depthTestEnable       = info.depth_stencil->depth_test,\n      .depthWriteEnable      = info.depth_stencil->depth_write,\n      .depthCompareOp        = get_vk_compare_op(info.depth_stencil->depth_compare),\n      .depthBoundsTestEnable = VK_FALSE,\n      .stencilTestEnable     = info.depth_stencil->stencil_test,\n      .front =\n          {.failOp      = get_vk_stencil_op(info.depth_stencil->front_stencil.fail_op),\n           .passOp      = get_vk_stencil_op(info.depth_stencil->front_stencil.pass_op),\n           .depthFailOp = get_vk_stencil_op(info.depth_stencil->front_stencil.depth_fail_op),\n           .compareOp   = get_vk_compare_op(info.depth_stencil->front_stencil.compare_op),\n           .compareMask = info.depth_stencil->front_stencil.compare_mask,\n           .writeMask   = info.depth_stencil->front_stencil.write_mask,\n           .reference   = info.depth_stencil->front_stencil.reference},\n      .back =\n          {.failOp      = get_vk_stencil_op(info.depth_stencil->back_stencil.fail_op),\n           .passOp      = get_vk_stencil_op(info.depth_stencil->back_stencil.pass_op),\n           .depthFailOp = get_vk_stencil_op(info.depth_stencil->back_stencil.depth_fail_op),\n           .compareOp   = get_vk_compare_op(info.depth_stencil->back_stencil.compare_op),\n           .compareMask = info.depth_stencil->back_stencil.compare_mask,\n           .writeMask   = info.depth_stencil->back_stencil.write_mask,\n           .reference   = info.depth_stencil->back_stencil.reference},\n      .minDepthBounds = 0.0f,\n      .maxDepthBounds = 1.0f};\n\n  uint32_t ncolor_attachments = 0u;\n  for (uint32_t i = 0; i < info.compatible_rt_attachment_descs->ndescs; ++i) {\n    if (info.compatible_rt_attachment_descs->descs[i].type == NGF_ATTACHMENT_COLOR &&\n        !info.compatible_rt_attachment_descs->descs[i].is_resolve)\n      ++ncolor_attachments;\n  }\n\n  // Prepare blend state.\n  VkPipelineColorBlendAttachmentState blend_states[16];\n  memset(blend_states, 0, sizeof(blend_states));\n  for (size_t i = 0u; i < ncolor_attachments; ++i) {\n    if (info.color_attachment_blend_states) {\n      const ngf_blend_info* blend = &info.color_attachment_blend_states[i];\n\n      const VkPipelineColorBlendAttachmentState attachment_blend_state = {\n          .blendEnable         = blend->enable,\n          .srcColorBlendFactor = blend->enable ? get_vk_blend_factor(blend->src_color_blend_factor)\n                                               : VK_BLEND_FACTOR_ONE,\n          .dstColorBlendFactor = blend->enable ? get_vk_blend_factor(blend->dst_color_blend_factor)\n                                               : VK_BLEND_FACTOR_ZERO,\n          .colorBlendOp = blend->enable ? get_vk_blend_op(blend->blend_op_color) : VK_BLEND_OP_ADD,\n          .srcAlphaBlendFactor = blend->enable ? get_vk_blend_factor(blend->src_alpha_blend_factor)\n                                               : VK_BLEND_FACTOR_ONE,\n          .dstAlphaBlendFactor = blend->enable ? get_vk_blend_factor(blend->dst_alpha_blend_factor)\n                                               : VK_BLEND_FACTOR_ZERO,\n          .alphaBlendOp = blend->enable ? get_vk_blend_op(blend->blend_op_alpha) : VK_BLEND_OP_ADD,\n          .colorWriteMask =\n              (VkColorComponentFlags)(((blend->color_write_mask & NGF_COLOR_MASK_WRITE_BIT_R)\n                                           ? VK_COLOR_COMPONENT_R_BIT\n                                           : 0) |\n                                      ((blend->color_write_mask & NGF_COLOR_MASK_WRITE_BIT_G)\n                                           ? VK_COLOR_COMPONENT_G_BIT\n                                           : 0) |\n                                      ((blend->color_write_mask & NGF_COLOR_MASK_WRITE_BIT_B)\n                                           ? VK_COLOR_COMPONENT_B_BIT\n                                           : 0) |\n                                      ((blend->color_write_mask & NGF_COLOR_MASK_WRITE_BIT_A)\n                                           ? VK_COLOR_COMPONENT_A_BIT\n                                           : 0))};\n      blend_states[i] = attachment_blend_state;\n    } else {\n      blend_states[i].blendEnable    = VK_FALSE;\n      blend_states[i].colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |\n                                       VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;\n    }\n  }\n\n  if (ncolor_attachments >= NGFI_ARRAYSIZE(blend_states)) {\n    NGFI_DIAG_ERROR(\"too many attachments specified\");\n    return NGF_ERROR_OBJECT_CREATION_FAILED;\n  }\n\n  VkPipelineColorBlendStateCreateInfo color_blend = {\n      .sType           = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,\n      .pNext           = NULL,\n      .flags           = 0u,\n      .logicOpEnable   = VK_FALSE,\n      .logicOp         = VK_LOGIC_OP_SET,\n      .attachmentCount = ncolor_attachments,\n      .pAttachments    = blend_states,\n      .blendConstants =\n          {info.blend_consts[0], info.blend_consts[1], info.blend_consts[2], info.blend_consts[3]}};\n\n  // Dynamic state.\n  const VkDynamicState dynamic_states[] = {\n      VK_DYNAMIC_STATE_VIEWPORT,\n      VK_DYNAMIC_STATE_SCISSOR,\n      VK_DYNAMIC_STATE_DEPTH_BOUNDS,\n      VK_DYNAMIC_STATE_DEPTH_BIAS};\n  const uint32_t                   ndynamic_states = NGFI_ARRAYSIZE(dynamic_states);\n  VkPipelineDynamicStateCreateInfo dynamic_state   = {\n        .sType             = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,\n        .pNext             = NULL,\n        .flags             = 0u,\n        .dynamicStateCount = ndynamic_states,\n        .pDynamicStates    = dynamic_states};\n\n  // Create a compatible render pass object.\n  auto attachment_compat_pass_descs =\n      ngfi::tmp_alloc<ngfvk_attachment_pass_desc>(info.compatible_rt_attachment_descs->ndescs);\n  for (uint32_t i = 0u; i < info.compatible_rt_attachment_descs->ndescs; ++i) {\n    attachment_compat_pass_descs[i].load_op  = VK_ATTACHMENT_LOAD_OP_DONT_CARE;\n    attachment_compat_pass_descs[i].store_op = VK_ATTACHMENT_STORE_OP_DONT_CARE;\n    attachment_compat_pass_descs[i].is_resolve =\n        info.compatible_rt_attachment_descs->descs[i].is_resolve;\n    attachment_compat_pass_descs[i].layout = VK_IMAGE_LAYOUT_GENERAL;\n  }\n\n  VkResult vk_err = ngfvk_renderpass_from_attachment_descs(\n      info.compatible_rt_attachment_descs->ndescs,\n      info.compatible_rt_attachment_descs->descs,\n      attachment_compat_pass_descs,\n      &pipeline->compat_render_pass);\n  if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n\n  // Create required pipeline.\n  const VkGraphicsPipelineCreateInfo vk_pipeline_info = {\n      .sType               = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,\n      .pNext               = NULL,\n      .flags               = 0u,\n      .stageCount          = info.nshader_stages,\n      .pStages             = vk_shader_stages,\n      .pVertexInputState   = &vertex_input,\n      .pInputAssemblyState = &input_assembly,\n      .pTessellationState  = &tess,\n      .pViewportState      = &viewport_state,\n      .pRasterizationState = &rasterization,\n      .pMultisampleState   = &multisampling,\n      .pDepthStencilState  = &depth_stencil,\n      .pColorBlendState    = &color_blend,\n      .pDynamicState       = &dynamic_state,\n      .layout              = pipeline->vk_pipeline_layout,\n      .renderPass          = pipeline->compat_render_pass,\n      .subpass             = 0u,\n      .basePipelineHandle  = VK_NULL_HANDLE,\n      .basePipelineIndex   = -1};\n  vk_err = vkCreateGraphicsPipelines(\n      _vk.device,\n      VK_NULL_HANDLE,\n      1u,\n      &vk_pipeline_info,\n      NULL,\n      &pipeline->vk_pipeline);\n  if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n  return pipeline;\n}\n\nngfi::maybe_ngfptr<ngfvk_generic_pipeline>\nngfvk_generic_pipeline::make(const ngf_compute_pipeline_info& info) NGF_NOEXCEPT {\n  ngfi::tmp_arena().reset();\n  auto pipeline = ngfi::unique_ptr<ngfvk_generic_pipeline>::make();\n  if (!pipeline) return NGF_ERROR_OUT_OF_MEM;\n  VkPipelineShaderStageCreateInfo vk_shader_stage {};\n  ngf_error err = pipeline->common_init(info.spec_info, &vk_shader_stage, &info.shader_stage, 1u);\n  if (err != NGF_ERROR_OK) return err;\n  const VkComputePipelineCreateInfo vk_pipeline_ci = {\n      .sType              = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,\n      .pNext              = NULL,\n      .flags              = 0,\n      .stage              = vk_shader_stage,\n      .layout             = pipeline->vk_pipeline_layout,\n      .basePipelineHandle = VK_NULL_HANDLE,\n      .basePipelineIndex  = -1};\n  VkResult vk_err = vkCreateComputePipelines(\n      _vk.device,\n      VK_NULL_HANDLE,\n      1,\n      &vk_pipeline_ci,\n      NULL,\n      &pipeline->vk_pipeline);\n  if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n  return pipeline;\n}\n\nstatic int ngfvk_binding_comparator(const void* a, const void* b) {\n  auto a_binding = (const ngfvk_reflect_binding_and_stage_mask*)a;\n  auto b_binding = (const ngfvk_reflect_binding_and_stage_mask*)b;\n  if (a_binding->binding_data.set < b_binding->binding_data.set)\n    return -1;\n  else if (a_binding->binding_data.set == b_binding->binding_data.set) {\n    if (a_binding->binding_data.binding < b_binding->binding_data.binding)\n      return -1;\n    else if (a_binding->binding_data.binding == b_binding->binding_data.binding)\n      return 0;\n  }\n  return 1;\n}\n\nstatic ngf_descriptor_type\nngfvk_get_ngf_descriptor_type(SpvReflectDescriptorType spv_reflect_type) {\n  switch (spv_reflect_type) {\n  case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER:\n    return NGF_DESCRIPTOR_UNIFORM_BUFFER;\n  case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE:\n    return NGF_DESCRIPTOR_IMAGE;\n  case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER:\n    return NGF_DESCRIPTOR_SAMPLER;\n  case SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:\n    return NGF_DESCRIPTOR_IMAGE_AND_SAMPLER;\n  case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:\n    return NGF_DESCRIPTOR_TEXEL_BUFFER;\n  case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER:\n    return NGF_DESCRIPTOR_STORAGE_BUFFER;\n  case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE:\n    return NGF_DESCRIPTOR_STORAGE_IMAGE;\n  case SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:\n    return NGF_DESCRIPTOR_ACCELERATION_STRUCTURE;\n  default:\n    return NGF_DESCRIPTOR_TYPE_COUNT;\n  }\n}\nngf_error ngfvk_generic_pipeline::common_init(\n    const ngf_specialization_info*   spec_info,\n    VkPipelineShaderStageCreateInfo* vk_shader_stages,\n    const ngf_shader_stage*          shader_stages,\n    uint32_t                         nshader_stages) NGF_NOEXCEPT {\n  if (spec_info) {\n    auto spec_map_entries = ngfi::tmp_alloc<VkSpecializationMapEntry>(spec_info->nspecializations);\n\n    vk_spec_info.pData         = spec_info->value_buffer;\n    vk_spec_info.mapEntryCount = spec_info->nspecializations;\n    vk_spec_info.pMapEntries   = spec_map_entries;\n\n    size_t total_data_size = 0u;\n    for (size_t i = 0; i < spec_info->nspecializations; ++i) {\n      VkSpecializationMapEntry*          vk_specialization = &spec_map_entries[i];\n      const ngf_constant_specialization* specialization    = &spec_info->specializations[i];\n      vk_specialization->constantID                        = specialization->constant_id;\n      vk_specialization->offset                            = specialization->offset;\n      size_t specialization_size                           = 0u;\n      switch (specialization->type) {\n      case NGF_TYPE_INT8:\n      case NGF_TYPE_UINT8:\n        specialization_size = 1u;\n        break;\n      case NGF_TYPE_INT16:\n      case NGF_TYPE_UINT16:\n      case NGF_TYPE_HALF_FLOAT:\n        specialization_size = 2u;\n        break;\n      case NGF_TYPE_INT32:\n      case NGF_TYPE_UINT32:\n      case NGF_TYPE_FLOAT:\n        specialization_size = 4u;\n        break;\n      case NGF_TYPE_DOUBLE:\n        specialization_size = 8u;\n        break;\n      default:\n        assert(false);\n      }\n      vk_specialization->size = specialization_size;\n      total_data_size += specialization_size;\n    }\n    vk_spec_info.dataSize = total_data_size;\n  }\n\n  for (uint32_t s = 0u; s < nshader_stages; ++s) {\n    const ngf_shader_stage stage            = shader_stages[s];\n    vk_shader_stages[s].sType               = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;\n    vk_shader_stages[s].pNext               = NULL;\n    vk_shader_stages[s].flags               = 0u;\n    vk_shader_stages[s].stage               = stage->vk_stage_bits;\n    vk_shader_stages[s].module              = stage->vk_module;\n    vk_shader_stages[s].pName               = stage->entry_point_name.data(),\n    vk_shader_stages[s].pSpecializationInfo = &vk_spec_info;\n  }\n\n  descriptor_set_layouts.reserve(4);\n\n  // Extract and dedupe all descriptor bindings.\n  uint32_t ntotal_bindings = 0u;\n  for (uint32_t i = 0u; i < nshader_stages; ++i) {\n    ntotal_bindings += shader_stages[i]->spv_reflect_module.descriptor_binding_count;\n  }\n  auto bindings = ngfi::tmp_alloc<ngfvk_reflect_binding_and_stage_mask>(ntotal_bindings);\n\n  uint32_t bindings_offset = 0u;\n  for (uint32_t i = 0u; i < nshader_stages; ++i) {\n    const SpvReflectShaderModule* spv_module    = &shader_stages[i]->spv_reflect_module;\n    const uint32_t                binding_count = spv_module->descriptor_binding_count;\n    for (size_t j = bindings_offset; j < bindings_offset + binding_count; ++j) {\n      bindings[j].binding_data = spv_module->descriptor_bindings[j - bindings_offset];\n      switch (spv_module->entry_points[0].shader_stage) {\n      case SPV_REFLECT_SHADER_STAGE_VERTEX_BIT:\n        bindings[j].mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;\n        break;\n      case SPV_REFLECT_SHADER_STAGE_FRAGMENT_BIT:\n        bindings[j].mask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;\n        break;\n      case SPV_REFLECT_SHADER_STAGE_COMPUTE_BIT:\n        bindings[j].mask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;\n        break;\n      default:\n        assert(false);\n        break;\n      }\n    }\n    bindings_offset += binding_count;\n  }\n  qsort(\n      bindings,\n      ntotal_bindings,\n      sizeof(ngfvk_reflect_binding_and_stage_mask),\n      ngfvk_binding_comparator);\n  const uint32_t last_binding_idx = ntotal_bindings > 0 ? ntotal_bindings - 1u : 0u;\n  const uint32_t max_set_id =\n      ntotal_bindings > 0 ? bindings[last_binding_idx].binding_data.set : 0u;\n  const uint32_t nall_sets             = ntotal_bindings > 0 ? max_set_id + 1u : 0u;\n  auto           nall_bindings_per_set = ngfi::tmp_alloc<uint32_t>(nall_sets);\n  memset(nall_bindings_per_set, 0, nall_sets * sizeof(nall_bindings_per_set[0]));\n  uint32_t nunique_bindings = 0u;\n  for (uint32_t cur = 0u; cur < ntotal_bindings; ++cur) {\n    const ngfvk_reflect_binding_and_stage_mask* cur_binding = &bindings[cur];\n    ngfvk_reflect_binding_and_stage_mask*       last_unique_binding =\n        nunique_bindings == 0 ? NULL : &bindings[nunique_bindings - 1];\n    const SpvReflectDescriptorBinding* last_unique_binding_data =\n        !last_unique_binding ? NULL : &last_unique_binding->binding_data;\n    const SpvReflectDescriptorBinding* cur_binding_data = &cur_binding->binding_data;\n    if (!last_unique_binding_data ||\n        (last_unique_binding_data->set != cur_binding_data->set ||\n         last_unique_binding_data->binding != cur_binding_data->binding)) {\n      bindings[nunique_bindings++] = *cur_binding;\n      nall_bindings_per_set[cur_binding_data->set] =\n          NGFI_MAX(nall_bindings_per_set[cur_binding_data->set], cur_binding_data->binding + 1u);\n    } else {\n      last_unique_binding->mask |= cur_binding->mask;\n    }\n  }\n\n  // Create descriptor set layouts.\n  auto     vk_set_layouts = ngfi::tmp_alloc<VkDescriptorSetLayout>(max_set_id + 1);\n  uint32_t last_set_id    = ~0u;\n  for (uint32_t cur = 0u; cur < nunique_bindings;) {\n    ngfvk_desc_set_layout set_layout;\n    memset((void*)&set_layout, 0, sizeof(set_layout));\n    const uint32_t current_set_id = bindings[cur].binding_data.set;\n    if (last_set_id == ~0u || current_set_id - last_set_id > 1u) {\n      // there is a gap in descriptor sets, fill it in with empty layouts;\n      for (uint32_t i = last_set_id == ~0u ? 0u : last_set_id + 1; i < current_set_id; ++i) {\n        const VkDescriptorSetLayoutCreateInfo vk_ds_info = {\n            .sType        = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,\n            .pNext        = NULL,\n            .flags        = 0u,\n            .bindingCount = 0u,\n            .pBindings    = NULL};\n        vkCreateDescriptorSetLayout(_vk.device, &vk_ds_info, NULL, &set_layout.vk_handle);\n        vk_set_layouts[i] = set_layout.vk_handle;\n        descriptor_set_layouts.emplace_back(ngfi::move(set_layout));\n      }\n    }\n    const uint32_t nall_bindings = nall_bindings_per_set[bindings[cur].binding_data.set];\n    if (nall_bindings > 0u) {\n      set_layout.binding_properties = ngfi::fixed_array<ngfvk_desc_binding> {nall_bindings};\n      for (size_t i = 0u; i < nall_bindings; ++i) {\n        set_layout.binding_properties[i].type = VK_DESCRIPTOR_TYPE_MAX_ENUM;\n      }\n      memset(\n          set_layout.binding_properties.data(),\n          0,\n          sizeof(ngfvk_desc_binding) * set_layout.binding_properties.size());\n    }\n    const uint32_t first_binding_in_set = cur;\n    while (cur < nunique_bindings && current_set_id == bindings[cur].binding_data.set) cur++;\n    const uint32_t nbindings_in_set = cur - first_binding_in_set;\n    auto vk_descriptor_bindings = ngfi::tmp_alloc<VkDescriptorSetLayoutBinding>(nbindings_in_set);\n    for (uint32_t i = first_binding_in_set; i < cur; ++i) {\n      VkDescriptorSetLayoutBinding*      vk_d = &vk_descriptor_bindings[i - first_binding_in_set];\n      const SpvReflectDescriptorBinding* d    = &bindings[i].binding_data;\n      const ngf_descriptor_type ngf_desc_type = ngfvk_get_ngf_descriptor_type(d->descriptor_type);\n      if (ngf_desc_type == NGF_DESCRIPTOR_TYPE_COUNT) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n      vk_d->binding                               = d->binding;\n      vk_d->descriptorCount                       = d->count;\n      vk_d->descriptorType                        = get_vk_descriptor_type(ngf_desc_type);\n      vk_d->stageFlags                            = VK_SHADER_STAGE_ALL;\n      vk_d->pImmutableSamplers                    = NULL;\n      const ngfvk_desc_binding binding_properties = {\n          .type            = vk_d->descriptorType,\n          .stage_accessors = bindings[i].mask,\n          .readonly = ((d->block.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE) != 0),\n          .is_multilayered_image = (d->image.arrayed != 0),\n          .is_cubemap            = (d->image.dim == SpvDimCube),\n          .ndescs_in_binding     = vk_d->descriptorCount};\n      set_layout.binding_properties[d->binding] = binding_properties;\n      set_layout.counts[ngf_desc_type]++;\n      set_layout.nall_descs += vk_d->descriptorCount;\n    }\n    const VkDescriptorSetLayoutCreateInfo vk_ds_info = {\n        .sType        = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,\n        .pNext        = NULL,\n        .flags        = 0u,\n        .bindingCount = nbindings_in_set,\n        .pBindings    = vk_descriptor_bindings};\n    const VkResult vk_err =\n        vkCreateDescriptorSetLayout(_vk.device, &vk_ds_info, NULL, &set_layout.vk_handle);\n    vk_set_layouts[current_set_id] = set_layout.vk_handle;\n    descriptor_set_layouts.emplace_back(ngfi::move(set_layout));\n    if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n    last_set_id = current_set_id;\n  }\n\n  // Pipeline layout.\n  const uint32_t ndescriptor_sets = static_cast<uint32_t>(descriptor_set_layouts.size());\n\n  const VkPipelineLayoutCreateInfo vk_pipeline_layout_info = {\n      .sType                  = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,\n      .pNext                  = NULL,\n      .flags                  = 0u,\n      .setLayoutCount         = ndescriptor_sets,\n      .pSetLayouts            = vk_set_layouts,\n      .pushConstantRangeCount = 1u,\n      .pPushConstantRanges    = &ngfvk::global::default_push_constant_range};\n  const VkResult vk_err =\n      vkCreatePipelineLayout(_vk.device, &vk_pipeline_layout_info, NULL, &vk_pipeline_layout);\n  if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n\n  return NGF_ERROR_OK;\n}\nngfvk_generic_pipeline::~ngfvk_generic_pipeline() NGF_NOEXCEPT {\n  auto res = &CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id];\n  if (vk_pipeline != VK_NULL_HANDLE) { res->retire.append(vk_pipeline); }\n  if (vk_pipeline_layout != VK_NULL_HANDLE) { res->retire.append(vk_pipeline_layout); }\n  for (size_t l = 0; l < descriptor_set_layouts.size(); ++l) {\n    ngfvk_desc_set_layout* layout    = &descriptor_set_layouts[l];\n    VkDescriptorSetLayout  vk_layout = layout->vk_handle;\n    res->retire.append(vk_layout);\n  }\n  if (compat_render_pass != VK_NULL_HANDLE) res->retire.append(compat_render_pass);\n}\nngfi::maybe_ngfptr<ngf_shader_stage_t>\nngf_shader_stage_t::make(const ngf_shader_stage_info& info) NGF_NOEXCEPT {\n  auto stage = ngfi::unique_ptr<ngf_shader_stage_t>::make();\n  if (!stage) return NGF_ERROR_OUT_OF_MEM;\n  VkShaderModuleCreateInfo vk_sm_info = {\n      .sType    = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,\n      .pNext    = NULL,\n      .flags    = 0u,\n      .codeSize = (info.content_length),\n      .pCode    = (uint32_t*)info.content};\n  VkResult vkerr = vkCreateShaderModule(_vk.device, &vk_sm_info, NULL, &stage->vk_module);\n  if (vkerr != VK_SUCCESS) return NGF_ERROR_OBJECT_CREATION_FAILED;\n  const SpvReflectResult spverr =\n      spvReflectCreateShaderModule(info.content_length, info.content, &stage->spv_reflect_module);\n  if (spverr != SPV_REFLECT_RESULT_SUCCESS) return NGF_ERROR_OBJECT_CREATION_FAILED;\n  stage->vk_stage_bits           = get_vk_shader_stage(info.type);\n  size_t entry_point_name_length = strlen(info.entry_point_name) + 1u;\n  stage->entry_point_name        = ngfi::fixed_array<char> {entry_point_name_length};\n  strncpy(stage->entry_point_name.data(), info.entry_point_name, entry_point_name_length);\n  return stage;\n}\n\nngf_shader_stage_t::~ngf_shader_stage_t() NGF_NOEXCEPT {\n  if (vk_module != VK_NULL_HANDLE) {\n    vkDestroyShaderModule(_vk.device, vk_module, NULL);\n    spvReflectDestroyShaderModule(&spv_reflect_module);\n  }\n}\nngfi::maybe_ngfptr<ngf_buffer_t> ngf_buffer_t::make(const ngf_buffer_info& info) NGF_NOEXCEPT {\n  auto a = ngfvk_alloc::make(info);\n  if (a.has_error()) { return a.error(); }\n\n  auto buf = ngfi::unique_ptr<ngf_buffer_t>::make();\n  if (!buf) return NGF_ERROR_OUT_OF_MEM;\n  buf->alloc        = ngfi::move(a.value());\n  buf->size         = info.size;\n  buf->storage_type = info.storage_type;\n  buf->usage_flags  = info.buffer_usage;\n  buf->hash         = ngfvk_ptr_hash(buf.get());\n  memset(&buf->sync_state, 0, sizeof(buf->sync_state));\n  buf->sync_state.layout = VK_IMAGE_LAYOUT_UNDEFINED;\n\n  return buf;\n}\n\nngfi::maybe_ngfptr<ngf_image_view_t>\nngf_image_view_t::make(const ngf_image_view_info& info) NGF_NOEXCEPT {\n  auto view = ngfi::unique_ptr<ngf_image_view_t>::make();\n  if (!view) return NGF_ERROR_OUT_OF_MEM;\n  const VkImageViewCreateInfo vk_view_info = {\n      .sType    = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,\n      .pNext    = NULL,\n      .flags    = 0u,\n      .image    = (VkImage)info.src_image->alloc.obj_handle,\n      .viewType = get_vk_image_view_type(info.view_type, info.nlayers),\n      .format   = get_vk_image_format(info.view_format),\n      .components =\n          {.r = VK_COMPONENT_SWIZZLE_R,\n           .g = VK_COMPONENT_SWIZZLE_G,\n           .b = VK_COMPONENT_SWIZZLE_B,\n           .a = VK_COMPONENT_SWIZZLE_A},\n      .subresourceRange = {\n          .aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT,\n          .baseMipLevel   = info.base_mip_level,\n          .levelCount     = info.nmips,\n          .baseArrayLayer = info.base_layer,\n          .layerCount     = info.nlayers}};\n  const VkResult vk_err = vkCreateImageView(_vk.device, &vk_view_info, NULL, &view->vk_view);\n  if (vk_err != VK_SUCCESS) return NGF_ERROR_OBJECT_CREATION_FAILED;\n  view->src = info.src_image;\n  return view;\n}\n\nngf_image_view_t::~ngf_image_view_t() NGF_NOEXCEPT {\n  vkDestroyImageView(_vk.device, vk_view, nullptr);\n}\n\nngfi::maybe_ngfptr<ngf_image_t>\nngf_image_t::make(const ngf_image_info& info, ngfvk_alloc&& alloc) NGF_NOEXCEPT {\n  auto       result     = ngfi::unique_ptr<ngf_image_t>::make();\n  const bool is_cubemap = info.type == NGF_IMAGE_TYPE_CUBE;\n  result->alloc         = ngfi::move(alloc);\n  result->extent.width  = NGFI_MAX(1, info.extent.width);\n  result->extent.height = NGFI_MAX(1, info.extent.height);\n  result->extent.depth  = NGFI_MAX(1, info.extent.depth);\n  result->nlayers       = info.nlayers * (is_cubemap ? 6u : 1u);\n  result->nlevels       = info.nmips;\n  result->type          = info.type;\n  result->usage_flags   = info.usage_hint;\n  result->vk_fmt        = get_vk_image_format(info.format);\n  memset(&result->sync_state, 0, sizeof(result->sync_state));\n  result->sync_state.layout = VK_IMAGE_LAYOUT_UNDEFINED;\n  result->hash              = ngfvk_ptr_hash(result.get());\n\n  ngf_error err = NGF_ERROR_OK;\n  if (result->alloc.vma_alloc) {\n    err = ngfvk_create_vk_image_view(\n        (VkImage)result->alloc.obj_handle,\n        get_vk_image_view_type(info.type, info.nlayers),\n        result->vk_fmt,\n        result->nlevels,\n        result->nlayers,\n        &result->vkview);\n    if (err != NGF_ERROR_OK) return err;\n    err = ngfvk_create_vk_image_view(\n        (VkImage)result->alloc.obj_handle,\n        get_vk_image_view_type(info.type, 2u),  // force _ARRAY type view\n        result->vk_fmt,\n        result->nlevels,\n        result->nlayers,\n        &result->vkview_arrayed);\n    if (err != NGF_ERROR_OK) return err;\n  } else {\n    result->vkview = result->vkview_arrayed = VK_NULL_HANDLE;\n  }\n  return result;\n}\n\nngfi::maybe_ngfptr<ngf_image_t> ngf_image_t::make(const ngf_image_info& info) NGF_NOEXCEPT {\n  auto maybe_alloc = ngfvk_alloc::make(info);\n  if (maybe_alloc.has_error()) return maybe_alloc.error();\n  return ngf_image_t::make(info, ngfi::move(maybe_alloc.value()));\n}\n\nngf_image_t::~ngf_image_t() noexcept {\n  if (vkview) { vkDestroyImageView(_vk.device, vkview, NULL); }\n  if (vkview_arrayed) { vkDestroyImageView(_vk.device, vkview_arrayed, NULL); }\n}\n\nngfi::value_or_ngferr<ngfvk_alloc> ngfvk_alloc::make(const ngf_image_info& info) NGF_NOEXCEPT {\n  const bool is_sampled_from  = info.usage_hint & NGF_IMAGE_USAGE_SAMPLE_FROM;\n  const bool is_storage       = info.usage_hint & NGF_IMAGE_USAGE_STORAGE;\n  const bool is_xfer_dst      = info.usage_hint & NGF_IMAGE_USAGE_XFER_DST;\n  const bool is_xfer_src      = info.usage_hint & NGF_IMAGE_USAGE_XFER_SRC;\n  const bool is_attachment    = info.usage_hint & NGF_IMAGE_USAGE_ATTACHMENT;\n  const bool enable_auto_mips = info.usage_hint & NGF_IMAGE_USAGE_MIPMAP_GENERATION;\n  const bool is_transient     = info.usage_hint & ngfvk::global::img_usage_transient_attachment;\n  const bool is_depth_stencil = info.format == NGF_IMAGE_FORMAT_DEPTH16 ||\n                                info.format == NGF_IMAGE_FORMAT_DEPTH32 ||\n                                info.format == NGF_IMAGE_FORMAT_DEPTH24_STENCIL8;\n\n  const VkImageUsageFlagBits attachment_usage_bits =\n      is_depth_stencil ? VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT\n                       : VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;\n  const auto usage_flags =\n      (VkImageUsageFlags)((is_sampled_from ? VK_IMAGE_USAGE_SAMPLED_BIT : 0u) |\n                          (is_storage ? VK_IMAGE_USAGE_STORAGE_BIT : 0u) |\n                          (is_attachment ? attachment_usage_bits : 0u) |\n                          (is_transient ? VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT : 0) |\n                          (is_xfer_dst ? VK_IMAGE_USAGE_TRANSFER_DST_BIT : 0u) |\n                          (is_xfer_src ? VK_IMAGE_USAGE_TRANSFER_SRC_BIT : 0u) |\n                          (enable_auto_mips\n                               ? (VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT)\n                               : 0u));\n\n  const bool               is_cubemap      = info.type == NGF_IMAGE_TYPE_CUBE;\n  const VkFormat           vk_image_format = get_vk_image_format(info.format);\n  const VkImageType        vk_image_type   = get_vk_image_type(info.type);\n  const VkImageCreateFlags create_flags    = is_cubemap ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u;\n  VkImageFormatProperties  dummy_props;\n  const bool               optimal_tiling_supported = vkGetPhysicalDeviceImageFormatProperties(\n                                            _vk.phys_dev,\n                                            vk_image_format,\n                                            vk_image_type,\n                                            VK_IMAGE_TILING_OPTIMAL,\n                                            usage_flags,\n                                            create_flags,\n                                            &dummy_props) == VK_SUCCESS;\n  const VkImageCreateInfo vk_image_info = {\n      .sType     = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n      .pNext     = NULL,\n      .flags     = create_flags,\n      .imageType = vk_image_type,\n      .format    = vk_image_format,\n      .extent =\n          {.width = info.extent.width, .height = info.extent.height, .depth = info.extent.depth},\n      .mipLevels   = info.nmips,\n      .arrayLayers = info.nlayers * (!is_cubemap ? 1u : 6u),\n      .samples     = get_vk_sample_count(info.sample_count),\n      .tiling      = optimal_tiling_supported ? VK_IMAGE_TILING_OPTIMAL : VK_IMAGE_TILING_LINEAR,\n      .usage       = usage_flags,\n      .sharingMode = VK_SHARING_MODE_EXCLUSIVE,\n      .queueFamilyIndexCount = 0,\n      .pQueueFamilyIndices   = NULL,\n      .initialLayout         = VK_IMAGE_LAYOUT_UNDEFINED};\n  VmaAllocationCreateInfo vma_alloc_info = {\n      .flags          = 0u,\n      .usage          = VMA_MEMORY_USAGE_GPU_ONLY,\n      .requiredFlags  = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,\n      .preferredFlags = 0u,\n      .memoryTypeBits = 0u,\n      .pool           = VK_NULL_HANDLE,\n      .pUserData      = (void*)0x1};\n  VkImage        img;\n  VmaAllocation  alloc;\n  const VkResult vk_err = vmaCreateImage(\n      _vk.allocator,\n      &vk_image_info,\n      &vma_alloc_info,\n      (VkImage*)&img,\n      &alloc,\n      nullptr);\n  if (vk_err == VK_SUCCESS) {\n    ngfvk_alloc result;\n    result.obj_handle = (uintptr_t)img;\n    result.vma_alloc  = alloc;\n    return result;\n  } else {\n    return NGF_ERROR_OBJECT_CREATION_FAILED;\n  }\n}\n\nngfi::value_or_ngferr<ngfvk_alloc> ngfvk_alloc::make(const ngf_buffer_info& info) NGF_NOEXCEPT {\n  if (info.buffer_usage == 0u) {\n    NGFI_DIAG_ERROR(\"Buffer usage not specified.\");\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n  if (info.storage_type > NGF_BUFFER_STORAGE_DEVICE_LOCAL &&\n      !ngfvk::global::phys_device_caps.device_local_memory_is_host_visible) {\n    NGFI_DIAG_ERROR(\"Host-visible device-local storage requested, but not supported.\");\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n  const VkBufferUsageFlags    vk_usage_flags  = get_vk_buffer_usage(info.buffer_usage);\n  const VkMemoryPropertyFlags vk_mem_flags    = get_vk_memory_flags(info.storage_type);\n  const bool           vk_mem_is_host_visible = vk_mem_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;\n  const VmaMemoryUsage vma_usage_flags        = info.storage_type >= NGF_BUFFER_STORAGE_DEVICE_LOCAL\n                                                    ? VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE\n                                                    : VMA_MEMORY_USAGE_AUTO_PREFER_HOST;\n  const VkBufferCreateInfo buf_vk_info        = {\n             .sType                 = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,\n             .pNext                 = NULL,\n             .flags                 = 0u,\n             .size                  = info.size,\n             .usage                 = vk_usage_flags,\n             .sharingMode           = VK_SHARING_MODE_EXCLUSIVE,\n             .queueFamilyIndexCount = 0,\n             .pQueueFamilyIndices   = NULL};\n\n  const VmaAllocationCreateInfo buf_alloc_info = {\n      .flags          = ngfvk_get_vma_alloc_flags(info.storage_type),\n      .usage          = vma_usage_flags,\n      .requiredFlags  = vk_mem_flags,\n      .preferredFlags = 0u,\n      .memoryTypeBits = 0u,\n      .pool           = VK_NULL_HANDLE,\n      .pUserData      = NULL};\n\n  VkBuffer          buf;\n  VmaAllocation     alloc;\n  VmaAllocationInfo alloc_info {};\n  const VkResult    vkresult =\n      vmaCreateBuffer(_vk.allocator, &buf_vk_info, &buf_alloc_info, &buf, &alloc, &alloc_info);\n  if (vkresult == VK_SUCCESS) {\n    ngfvk_alloc result {};\n    result.obj_handle  = (uintptr_t)buf;\n    result.vma_alloc   = alloc;\n    result.mapped_data = vk_mem_is_host_visible ? alloc_info.pMappedData : nullptr;\n    return result;\n  } else {\n    return NGF_ERROR_OBJECT_CREATION_FAILED;\n  }\n}\n\nngfvk_alloc& ngfvk_alloc::operator=(ngfvk_alloc&& other) NGF_NOEXCEPT {\n  destroy();\n  obj_handle        = other.obj_handle;\n  other.obj_handle  = 0;\n  vma_alloc         = other.vma_alloc;\n  other.vma_alloc   = VK_NULL_HANDLE;\n  mapped_data       = other.mapped_data;\n  other.mapped_data = nullptr;\n  return *this;\n}\n\nvoid ngfvk_alloc::destroy() NGF_NOEXCEPT {\n  if (vma_alloc) {\n    VmaAllocationInfo alloc_info {};\n    vmaGetAllocationInfo(_vk.allocator, vma_alloc, &alloc_info);\n    if (alloc_info.pUserData) {\n      vmaDestroyImage(_vk.allocator, (VkImage)obj_handle, vma_alloc);\n    } else {\n      vmaDestroyBuffer(_vk.allocator, (VkBuffer)obj_handle, vma_alloc);\n    }\n  }\n}\n\nstatic ngf_error ngfvk_maybe_acquire_swapchain_image() {\n  if (CURRENT_CONTEXT->swapchain &&\n      CURRENT_CONTEXT->swapchain->vk_swapchain != VK_NULL_HANDLE) {\n    if (CURRENT_CONTEXT->swapchain->image_idx == ngfvk::global::invalid_idx) {\n      const VkResult acquire_result = vkAcquireNextImageKHR(\n          _vk.device,\n          CURRENT_CONTEXT->swapchain->vk_swapchain,\n          UINT64_MAX,\n          CURRENT_CONTEXT->swapchain->acquire_sems[CURRENT_CONTEXT->frame_id],\n          VK_NULL_HANDLE,\n          &CURRENT_CONTEXT->swapchain->image_idx);\n      if (acquire_result == VK_SUBOPTIMAL_KHR) {\n        NGFI_DIAG_WARNING(\"suboptimal swapchain configuration reported by vulkan\");\n      } else if (acquire_result != VK_SUCCESS) {\n        NGFI_DIAG_ERROR(\"failed to acquire swapchain image\");\n        return NGF_ERROR_INVALID_OPERATION;\n      }\n    }\n    return NGF_ERROR_OK;\n  } else {\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n}\n\nngfvk_swapchain::~ngfvk_swapchain() noexcept {\n  vkDeviceWaitIdle(_vk.device);\n  for (VkSemaphore sem : acquire_sems) {\n    if (sem != VK_NULL_HANDLE) { vkDestroySemaphore(_vk.device, sem, nullptr); }\n  }\n  for (VkSemaphore sem : submit_sems) {\n    if (sem != VK_NULL_HANDLE) { vkDestroySemaphore(_vk.device, sem, nullptr); }\n  }\n  for (VkFramebuffer fb : framebufs) {\n    if (fb != VK_NULL_HANDLE) vkDestroyFramebuffer(_vk.device, fb, nullptr);\n  }\n  for (VkImageView view : multisample_img_views) {\n    if (view != VK_NULL_HANDLE) vkDestroyImageView(_vk.device, view, nullptr);\n  }\n  if (vk_swapchain != VK_NULL_HANDLE) { vkDestroySwapchainKHR(_vk.device, vk_swapchain, nullptr); }\n  if (depth_img) { ngf_destroy_image(depth_img); }\n}\n\nngfi::maybe_ngfptr<ngfvk_swapchain> ngfvk_swapchain::make(\n    const ngf_swapchain_info& swapchain_info,\n    ngf_render_target         rt,\n    VkSurfaceKHR              surface) noexcept {\n  ngf_error        err          = NGF_ERROR_OK;\n  VkResult         vk_err       = VK_SUCCESS;\n  VkPresentModeKHR present_mode = VK_PRESENT_MODE_FIFO_KHR;\n\n  auto swapchain = ngfi::unique_ptr<ngfvk_swapchain>::make();\n\n  // Check available present modes and fall back on FIFO if the requested\n  // present mode is not supported.\n  uint32_t npresent_modes = 0u;\n  vkGetPhysicalDeviceSurfacePresentModesKHR(_vk.phys_dev, surface, &npresent_modes, nullptr);\n  ngfi::fixed_array<VkPresentModeKHR> present_modes {npresent_modes};\n  vkGetPhysicalDeviceSurfacePresentModesKHR(\n      _vk.phys_dev,\n      surface,\n      &npresent_modes,\n      present_modes.data());\n  static const VkPresentModeKHR modes[] = {VK_PRESENT_MODE_FIFO_KHR, VK_PRESENT_MODE_IMMEDIATE_KHR};\n  const VkPresentModeKHR        requested_present_mode = modes[swapchain_info.present_mode];\n  for (uint32_t p = 0u; p < npresent_modes; ++p) {\n    if (present_modes[p] == requested_present_mode) {\n      present_mode = present_modes[p];\n      break;\n    }\n  }\n\n  // Check if the requested surface format is valid.\n  uint32_t nformats = 0u;\n  vkGetPhysicalDeviceSurfaceFormatsKHR(_vk.phys_dev, surface, &nformats, nullptr);\n  ngfi::fixed_array<VkSurfaceFormatKHR> formats {nformats};\n  assert(formats.data());\n  vkGetPhysicalDeviceSurfaceFormatsKHR(_vk.phys_dev, surface, &nformats, formats.data());\n  const VkFormat requested_format = get_vk_image_format(swapchain_info.color_format);\n  if (!(nformats == 1 && formats[0].format == VK_FORMAT_UNDEFINED)) {\n    bool found = false;\n    for (size_t f = 0; !found && f < nformats; ++f) {\n      found = formats[f].format == requested_format;\n    }\n    if (!found) {\n      NGFI_DIAG_ERROR(\"Invalid swapchain image format requested.\");\n      return NGF_ERROR_INVALID_FORMAT;\n    }\n  }\n\n  // Determine min/max extents.\n  VkSurfaceCapabilitiesKHR surface_caps;\n  vkGetPhysicalDeviceSurfaceCapabilitiesKHR(_vk.phys_dev, surface, &surface_caps);\n  const VkExtent2D min_surface_extent = surface_caps.minImageExtent;\n  const VkExtent2D max_surface_extent = surface_caps.maxImageExtent;\n\n  // Determine if we should use exclusive or concurrent sharing mode for\n  // swapchain images.\n  const bool          exclusive_sharing = _vk.gfx_family_idx == _vk.present_family_idx;\n  const VkSharingMode sharing_mode =\n      exclusive_sharing ? VK_SHARING_MODE_EXCLUSIVE : VK_SHARING_MODE_CONCURRENT;\n  const uint32_t num_sharing_queue_families = exclusive_sharing ? 0 : 2;\n  const uint32_t sharing_queue_families[]   = {_vk.gfx_family_idx, _vk.present_family_idx};\n\n  // Determine usage flags.\n  const auto storage_bit =\n      (VkImageUsageFlagBits)(swapchain_info.enable_compute_access ? VK_IMAGE_USAGE_STORAGE_BIT : 0);\n  const auto usage_mask = (VkImageUsageFlags)(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | storage_bit);\n\n  // Create swapchain.\n  const VkSwapchainCreateInfoKHR vk_sc_info = {\n      .sType           = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,\n      .pNext           = NULL,\n      .flags           = 0,\n      .surface         = surface,\n      .minImageCount   = swapchain_info.capacity_hint,\n      .imageFormat     = requested_format,\n      .imageColorSpace = get_vk_color_space(swapchain_info.colorspace),\n      .imageExtent =\n          {.width = NGFI_MIN(\n               max_surface_extent.width,\n               NGFI_MAX(min_surface_extent.width, swapchain_info.width)),\n           .height = NGFI_MIN(\n               max_surface_extent.height,\n               NGFI_MAX(min_surface_extent.height, swapchain_info.height))},\n      .imageArrayLayers      = 1,\n      .imageUsage            = usage_mask,\n      .imageSharingMode      = sharing_mode,\n      .queueFamilyIndexCount = num_sharing_queue_families,\n      .pQueueFamilyIndices   = sharing_queue_families,\n      .preTransform          = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR,\n      .compositeAlpha        = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,\n      .presentMode           = present_mode};\n  vk_err = vkCreateSwapchainKHR(_vk.device, &vk_sc_info, NULL, &swapchain->vk_swapchain);\n  if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n\n  // Obtain swapchain images.\n  vk_err = vkGetSwapchainImagesKHR(_vk.device, swapchain->vk_swapchain, &swapchain->nimgs, nullptr);\n  if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n  swapchain->imgs = ngfi::fixed_array<VkImage> {swapchain->nimgs};\n  if (swapchain->imgs.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; }\n  vk_err = vkGetSwapchainImagesKHR(\n      _vk.device,\n      swapchain->vk_swapchain,\n      &swapchain->nimgs,\n      swapchain->imgs.data());\n  if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n\n  // Create \"wrapper\" ngf_image objects for swapchain images.\n  swapchain->wrapper_imgs = ngfi::fixed_array<ngfi::unique_ptr<ngf_image_t>> {swapchain->nimgs};\n  if (swapchain->wrapper_imgs.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; }\n  const ngf_image_info wrapper_image_info = {\n      .type         = NGF_IMAGE_TYPE_IMAGE_2D,\n      .extent       = {.width = swapchain_info.width, .height = swapchain_info.height, .depth = 1},\n      .nmips        = 1u,\n      .nlayers      = 1u,\n      .format       = swapchain_info.color_format,\n      .sample_count = NGF_SAMPLE_COUNT_1,\n      .usage_hint   = NGF_IMAGE_USAGE_ATTACHMENT};\n  for (size_t i = 0u; i < swapchain->nimgs; ++i) {\n    auto wrap_img = ngf_image_t::make(\n        wrapper_image_info,\n        ngfi::move(ngfvk_alloc::wrap(swapchain->imgs[i]).value()));\n    if (wrap_img.has_error()) return wrap_img.error();\n    swapchain->wrapper_imgs[i] = ngfi::move(wrap_img.value());\n  }\n\n  // Create multisampled images, if necessary.\n  const bool is_multisampled = (unsigned int)swapchain_info.sample_count > 1u;\n  if (is_multisampled) {\n    const ngf_image_info ms_image_info = {\n        .type    = NGF_IMAGE_TYPE_IMAGE_2D,\n        .extent  = {.width = swapchain_info.width, .height = swapchain_info.height, .depth = 1u},\n        .nmips   = 1u,\n        .nlayers = 1u,\n        .format  = swapchain_info.color_format,\n        .sample_count = swapchain_info.sample_count,\n        .usage_hint   = NGF_IMAGE_USAGE_ATTACHMENT | ngfvk::global::img_usage_transient_attachment,\n    };\n    swapchain->multisample_imgs =\n        ngfi::fixed_array<ngfi::unique_ptr<ngf_image_t>> {swapchain->nimgs};\n    if (swapchain->multisample_imgs.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; }\n    for (size_t i = 0u; i < swapchain->nimgs; ++i) {\n      auto maybe_ms_alloc = ngfvk_alloc::make(ms_image_info);\n      if (maybe_ms_alloc.has_error()) { return maybe_ms_alloc.error(); }\n      auto maybe_ms_img = ngf_image_t::make(ms_image_info, ngfi::move(maybe_ms_alloc.value()));\n      if (maybe_ms_img.has_error()) { return maybe_ms_img.error(); }\n      swapchain->multisample_imgs[i] = ngfi::move(maybe_ms_img.value());\n    }\n    // Create image views for multisample images.\n    swapchain->multisample_img_views = ngfi::fixed_array<VkImageView> {swapchain->nimgs};\n    if (swapchain->multisample_img_views.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; }\n    for (uint32_t i = 0u; i < swapchain->nimgs; ++i) {\n      err = ngfvk_create_vk_image_view(\n          (VkImage)swapchain->multisample_imgs[i]->alloc.obj_handle,\n          VK_IMAGE_VIEW_TYPE_2D,\n          requested_format,\n          1u,\n          1u,\n          &swapchain->multisample_img_views[i]);\n      if (err != NGF_ERROR_OK) { return err; }\n    }\n  }\n\n  // Create image views for swapchain images.\n  for (uint32_t i = 0u; i < swapchain->nimgs; ++i) {\n    err = ngfvk_create_vk_image_view(\n        swapchain->imgs[i],\n        VK_IMAGE_VIEW_TYPE_2D,\n        requested_format,\n        1u,\n        1u,\n        &swapchain->wrapper_imgs[i]->vkview);\n    if (err != NGF_ERROR_OK) { return err; }\n  }\n\n  // Create an image for the depth attachment if necessary.\n  const bool have_depth_attachment = swapchain_info.depth_format != NGF_IMAGE_FORMAT_UNDEFINED;\n  if (have_depth_attachment) {\n    const ngf_image_info depth_image_info = {\n        .type    = NGF_IMAGE_TYPE_IMAGE_2D,\n        .extent  = {.width = swapchain_info.width, .height = swapchain_info.height, .depth = 1u},\n        .nmips   = 1u,\n        .nlayers = 1u,\n        .format  = swapchain_info.depth_format,\n        .sample_count = swapchain_info.sample_count,\n        .usage_hint   = NGF_IMAGE_USAGE_ATTACHMENT |\n                      (is_multisampled ? ngfvk::global::img_usage_transient_attachment : 0u)};\n    err = ngf_create_image(&depth_image_info, &swapchain->depth_img);\n    if (err != NGF_ERROR_OK) { return err; }\n  } else {\n    swapchain->depth_img = nullptr;\n  }\n\n  // Create framebuffers for swapchain images.\n  swapchain->framebufs = ngfi::fixed_array<VkFramebuffer> {swapchain->nimgs};\n  if (swapchain->framebufs.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; }\n\n  const bool     have_resolve_attachment      = (unsigned int)swapchain_info.sample_count > 1u;\n  const uint32_t depth_stencil_attachment_idx = swapchain->depth_img ? 1u : VK_ATTACHMENT_UNUSED;\n  const uint32_t resolve_attachment_idx =\n      have_resolve_attachment ? (swapchain->depth_img ? 2u : 1u) : VK_ATTACHMENT_UNUSED;\n  const uint32_t nattachments = rt->nattachments;\n  for (uint32_t f = 0u; f < swapchain->nimgs; ++f) {\n    VkImageView attachment_views[3] {};\n    attachment_views[0] =\n        is_multisampled ? swapchain->multisample_img_views[f] : swapchain->wrapper_imgs[f]->vkview;\n    if (depth_stencil_attachment_idx != VK_ATTACHMENT_UNUSED) {\n      attachment_views[depth_stencil_attachment_idx] = swapchain->depth_img->vkview;\n    }\n    if (resolve_attachment_idx != VK_ATTACHMENT_UNUSED) {\n      attachment_views[resolve_attachment_idx] = swapchain->wrapper_imgs[f]->vkview;\n    }\n    const VkFramebufferCreateInfo fb_info = {\n        .sType           = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,\n        .pNext           = NULL,\n        .flags           = 0u,\n        .renderPass      = rt->compat_render_pass,\n        .attachmentCount = nattachments,\n        .pAttachments    = attachment_views,\n        .width           = swapchain_info.width,\n        .height          = swapchain_info.height,\n        .layers          = 1u};\n    vk_err = vkCreateFramebuffer(_vk.device, &fb_info, NULL, &swapchain->framebufs[f]);\n    if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n  }\n\n  // Create semaphores to be signaled when a swapchain image is acquired,\n  // and when a swapchain image is ready to be presented.\n  swapchain->acquire_sems = ngfi::fixed_array<VkSemaphore> {swapchain->nimgs};\n  swapchain->submit_sems = ngfi::fixed_array<VkSemaphore> { swapchain->nimgs};\n  if (swapchain->acquire_sems.data() == nullptr ||\n      swapchain->submit_sems.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; }\n  memset(&swapchain->acquire_sems[0], 0, sizeof(VkSemaphore) * swapchain->nimgs);\n  memset(&swapchain->submit_sems[0], 0, sizeof(VkSemaphore) * swapchain->nimgs);\n  for (uint32_t s = 0u; s < swapchain->nimgs; ++s) {\n    const VkSemaphoreCreateInfo sem_info = {\n        .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n        .pNext = NULL,\n        .flags = 0};\n    vk_err = vkCreateSemaphore(_vk.device, &sem_info, NULL, &swapchain->acquire_sems[s]);\n    if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n    vk_err = vkCreateSemaphore(_vk.device, &sem_info, NULL, &swapchain->submit_sems[s]);\n    if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n\n  }\n  swapchain->image_idx = 0U;\n  swapchain->width     = swapchain_info.width;\n  swapchain->height    = swapchain_info.height;\n  return ngfi::move(swapchain);\n}\n\nstatic void ngfvk_cleanup_pending_binds(ngf_cmd_buffer cmd_buf) {\n  cmd_buf->pending_bind_ops.clear();\n  cmd_buf->npending_bind_ops = 0u;\n}\n\nstatic ngf_error ngfvk_encoder_start(ngf_cmd_buffer cmd_buf) {\n  NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_RECORDING);\n  return NGF_ERROR_OK;\n}\n\nstatic ngf_error\nngfvk_initialize_generic_encoder(ngf_cmd_buffer cmd_buf, struct ngfi_private_encoder_data* enc) {\n  enc->d0 = (uintptr_t)cmd_buf;\n  return NGF_ERROR_OK;\n}\n\nstatic ngf_error\nngfvk_encoder_end(ngf_cmd_buffer cmd_buf, struct ngfi_private_encoder_data* generic_enc) {\n  (void)generic_enc;\n  NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT);\n  return NGF_ERROR_OK;\n}\n\nngfvk_command_superpool::ngfvk_command_superpool(\n    uint32_t queue_family_idx,\n    uint32_t capacity,\n    uint16_t ctx_id)\n    : cmd_pools {capacity},\n      ctx_id {ctx_id} {\n  memset(cmd_pools.data(), 0, sizeof(cmd_pools[0]) * capacity);\n  for (VkCommandPool& pool : cmd_pools) {\n    const VkCommandPoolCreateInfo pool_ci = {\n        .sType            = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,\n        .pNext            = nullptr,\n        .flags            = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT,\n        .queueFamilyIndex = queue_family_idx};\n    if (vkCreateCommandPool(_vk.device, &pool_ci, NULL, &pool) != VK_SUCCESS) { break; }\n  }\n}\n\nngfvk_command_superpool::~ngfvk_command_superpool() {\n  for (VkCommandPool pool : cmd_pools) {\n    if (pool) vkDestroyCommandPool(_vk.device, pool, nullptr);\n  }\n}\n\nstatic ngfvk_command_superpool* ngfvk_find_command_superpool(uint16_t ctx_id, uint8_t nframes) {\n  ngfvk_command_superpool* result = NULL;\n  for (size_t i = 0; i < CURRENT_CONTEXT->command_superpools.size(); ++i) {\n    if (CURRENT_CONTEXT->command_superpools[i].ctx_id == ctx_id) {\n      result = &CURRENT_CONTEXT->command_superpools[i];\n      break;\n    }\n  }\n\n  if (result == nullptr) {\n    result = CURRENT_CONTEXT->command_superpools.emplace_back(\n        ngfvk_command_superpool {_vk.gfx_family_idx, nframes, ctx_id});\n  }\n\n  return result;\n}\n\nstatic ngf_error ngfvk_cmd_buffer_allocate_for_frame(\n    ngf_frame_token  frame_token,\n    VkCommandPool*   pool,\n    VkCommandBuffer* cmd_buf) {\n  const ngfvk_command_superpool* superpool = ngfvk_find_command_superpool(\n      ngfi_frame_ctx_id(frame_token),\n      ngfi_frame_max_inflight_frames(frame_token));\n  if (superpool == nullptr || superpool->cmd_pools.empty()) {\n    NGFI_DIAG_ERROR(\"failed to allocate command buffer\");\n    return NGF_ERROR_OBJECT_CREATION_FAILED;\n  }\n  *pool = superpool->cmd_pools[ngfi_frame_id(frame_token)];\n  const VkCommandBufferAllocateInfo vk_cmdbuf_info = {\n      .sType              = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,\n      .pNext              = NULL,\n      .commandPool        = *pool,\n      .level              = VK_COMMAND_BUFFER_LEVEL_PRIMARY,\n      .commandBufferCount = 1u};\n  const VkResult vk_err = vkAllocateCommandBuffers(_vk.device, &vk_cmdbuf_info, cmd_buf);\n  if (vk_err != VK_SUCCESS) {\n    NGFI_DIAG_ERROR(\"Failed to allocate cmd buffer, VK error: %d\", vk_err);\n    return NGF_ERROR_OBJECT_CREATION_FAILED;\n  }\n  const VkCommandBufferBeginInfo cmd_buf_begin = {\n      .sType            = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n      .pNext            = NULL,\n      .flags            = 0,\n      .pInheritanceInfo = NULL};\n  vkBeginCommandBuffer(*cmd_buf, &cmd_buf_begin);\n  return NGF_ERROR_OK;\n}\n\nngfi::maybe_ngfptr<ngf_cmd_buffer_t> ngf_cmd_buffer_t::make() NGF_NOEXCEPT {\n  auto cmd_buf = ngfi::unique_ptr<ngf_cmd_buffer_t>::make();\n  if (!cmd_buf) { return NGF_ERROR_OUT_OF_MEM; }\n  cmd_buf->parent_frame                       = ~0u;\n  cmd_buf->state                              = ngfi::CMD_BUFFER_STATE_NEW;\n  cmd_buf->active_gfx_pipe                    = NULL;\n  cmd_buf->active_compute_pipe                = NULL;\n  cmd_buf->active_attr_buf                    = NULL;\n  cmd_buf->active_idx_buf                     = NULL;\n  cmd_buf->renderpass_active                  = false;\n  cmd_buf->compute_pass_active                = false;\n  cmd_buf->destroy_on_submit                  = false;\n  cmd_buf->active_rt                          = NULL;\n  cmd_buf->desc_pools_list                    = NULL;\n  cmd_buf->vk_cmd_buffer                      = VK_NULL_HANDLE;\n  cmd_buf->vk_cmd_pool                        = VK_NULL_HANDLE;\n  cmd_buf->pending_barriers.npending_img_bars = 0;\n  cmd_buf->pending_barriers.npending_buf_bars = 0;\n  cmd_buf->local_res_states                   = ngfvk_sync_res_hashtable {100u};\n  return ngfi::move(cmd_buf);\n}\n\nngf_cmd_buffer_t::~ngf_cmd_buffer_t() noexcept {\n  if (vk_cmd_buffer != VK_NULL_HANDLE) {\n    vkFreeCommandBuffers(_vk.device, vk_cmd_pool, 1u, &vk_cmd_buffer);\n  }\n  ngfvk_cleanup_pending_binds(this);\n  in_pass_cmd_chnks.clear();\n  virt_bind_ops_ranges.clear();\n}\n\nstatic void ngfvk_execute_pending_binds(ngf_cmd_buffer cmd_buf) {\n  // Binding resources requires an active pipeline.\n  ngfvk_generic_pipeline* pipeline_data = NULL;\n  if (!(cmd_buf->renderpass_active ^ cmd_buf->compute_pass_active)) {\n    NGFI_DIAG_ERROR(\"either a render or compute pass needs to be active to bind resources\");\n    return;\n  }\n  if (cmd_buf->renderpass_active)\n    pipeline_data = (ngfvk_generic_pipeline*)(cmd_buf->active_gfx_pipe);\n  else if (cmd_buf->compute_pass_active)\n    pipeline_data = (ngfvk_generic_pipeline*)(cmd_buf->active_compute_pipe);\n  assert(pipeline_data);\n\n  // Get the number of active descriptor set layouts in the pipeline.\n  const uint32_t ndesc_set_layouts =\n      static_cast<uint32_t>(pipeline_data->descriptor_set_layouts.size());\n\n  // Reset temp. storage to make sure we have all of it available.\n  ngfi::tmp_arena().reset();\n\n  // Allocate an array of descriptor set handles from temporary storage and\n  // set them all to null. As we process bind operations, we'll allocate\n  // descriptor sets and put them into the array as necessary.\n  auto vk_desc_sets = ngfi::tmp_alloc<VkDescriptorSet>(ndesc_set_layouts);\n  memset(vk_desc_sets, (uintptr_t)VK_NULL_HANDLE, ndesc_set_layouts * sizeof(vk_desc_sets[0]));\n\n  // Allocate an array of vulkan descriptor set writes from temp storage, one write per\n  // pending bind op.\n  auto vk_writes = ngfi::tmp_alloc<VkWriteDescriptorSet>(cmd_buf->npending_bind_ops);\n\n  // Find a descriptor pools list to allocate from.\n  ngfvk_desc_pools_list* pools = ngfvk_find_desc_pools_list(cmd_buf->parent_frame);\n  cmd_buf->desc_pools_list     = pools;\n\n  // Process each bind operation, constructing a corresponding\n  // vulkan descriptor set write operation.\n  uint32_t descriptor_write_idx = 0u;\n  for (const ngf_resource_bind_op& bind_op_ref : cmd_buf->pending_bind_ops) {\n    const ngf_resource_bind_op* bind_op = &bind_op_ref;\n    // Ensure that a valid descriptor set is referenced by this\n    // bind operation.\n    if (bind_op->target_set >= ndesc_set_layouts) {\n      NGFI_DIAG_WARNING(\n          \"invalid descriptor set %d referenced by bind operation (pipeline has \"\n          \"%d sets) - ignoring\",\n          bind_op->target_set,\n          ndesc_set_layouts);\n      continue;\n    }\n    // Find the corresponding descriptor set layout.\n    const ngfvk_desc_set_layout* set_layout =\n        &pipeline_data->descriptor_set_layouts[bind_op->target_set];\n    // Ensure that a valid binding is referenced by this bind operation.\n    if (bind_op->target_binding >= set_layout->binding_properties.size()) {\n      NGFI_DIAG_WARNING(\n          \"invalid binding %d referenced by bind operation (descriptor set has %d bindings) - \"\n          \"ignoring\",\n          bind_op->target_binding,\n          set_layout->binding_properties.size());\n      continue;\n    }\n\n    if (set_layout->binding_properties[bind_op->target_binding].type !=\n        get_vk_descriptor_type(bind_op->type)) {\n      NGFI_DIAG_WARNING(\n          \"attempting to bind descriptor with unmatching type (set %d binding %d) - ignoring\",\n          bind_op->target_set,\n          bind_op->target_binding);\n      continue;\n    }\n\n    // Allocate a new descriptor set if necessary.\n    const bool need_new_desc_set = vk_desc_sets[bind_op->target_set] == VK_NULL_HANDLE;\n    if (need_new_desc_set) {\n      VkDescriptorSet set = ngfvk_desc_pools_list_allocate_set(pools, set_layout);\n      if (set == VK_NULL_HANDLE) {\n        NGFI_DIAG_ERROR(\"Failed to bind graphics resources - could not allocate descriptor set\");\n        return;\n      }\n      vk_desc_sets[bind_op->target_set] = set;\n    }\n\n    // At this point, we have a valid descriptor set in the `vk_sets` array.\n    // We'll use it in the write operation corresponding to the current bind_op.\n    VkDescriptorSet set = vk_desc_sets[bind_op->target_set];\n\n    // Construct a vulkan descriptor set write corresponding to this bind\n    // operation.\n    VkWriteDescriptorSet* vk_write = &vk_writes[descriptor_write_idx];\n\n    vk_write->sType           = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;\n    vk_write->pNext           = NULL;\n    vk_write->dstSet          = set;\n    vk_write->dstBinding      = bind_op->target_binding;\n    vk_write->descriptorCount = 1u;\n    vk_write->dstArrayElement = bind_op->array_index;\n    vk_write->descriptorType  = get_vk_descriptor_type(bind_op->type);\n\n    switch (bind_op->type) {\n    case NGF_DESCRIPTOR_STORAGE_BUFFER:\n    case NGF_DESCRIPTOR_UNIFORM_BUFFER: {\n      const ngf_buffer_bind_info* bind_info    = &bind_op->info.buffer;\n      auto                        vk_bind_info = ngfi::tmp_alloc<VkDescriptorBufferInfo>();\n\n      vk_bind_info->buffer = (VkBuffer)bind_info->buffer->alloc.obj_handle;\n      vk_bind_info->offset = bind_info->offset;\n      vk_bind_info->range  = bind_info->range;\n\n      vk_write->pBufferInfo = vk_bind_info;\n      break;\n    }\n    case NGF_DESCRIPTOR_TEXEL_BUFFER: {\n      vk_write->pTexelBufferView = &(bind_op->info.texel_buffer_view->vk_buf_view);\n      break;\n    }\n    case NGF_DESCRIPTOR_STORAGE_IMAGE:\n      if (cmd_buf->renderpass_active) {\n        NGFI_DIAG_WARNING(\"Binding storage images to non-compute shader is currently unsupported.\");\n        continue;\n      }\n    /* break omitted intentionally */\n    case NGF_DESCRIPTOR_IMAGE:\n    case NGF_DESCRIPTOR_SAMPLER:\n    case NGF_DESCRIPTOR_IMAGE_AND_SAMPLER: {\n      const ngf_image_sampler_bind_info* bind_info = &bind_op->info.image_sampler;\n      const bool                         is_multilayered_image =\n          set_layout->binding_properties[bind_op->target_binding].is_multilayered_image;\n      VkImageView image_view = VK_NULL_HANDLE;\n      if (bind_op->type == NGF_DESCRIPTOR_IMAGE || bind_op->type == NGF_DESCRIPTOR_STORAGE_IMAGE ||\n          bind_op->type == NGF_DESCRIPTOR_IMAGE_AND_SAMPLER) {\n        image_view = bind_info->is_image_view\n                         ? bind_info->resource.view->vk_view\n                         : (is_multilayered_image ? bind_info->resource.image->vkview_arrayed\n                                                  : bind_info->resource.image->vkview);\n      }\n      auto vk_bind_info         = ngfi::tmp_alloc<VkDescriptorImageInfo>();\n      vk_bind_info->imageView   = VK_NULL_HANDLE;\n      vk_bind_info->imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;\n      vk_bind_info->sampler     = VK_NULL_HANDLE;\n      if (bind_op->type == NGF_DESCRIPTOR_IMAGE ||\n          bind_op->type == NGF_DESCRIPTOR_IMAGE_AND_SAMPLER) {\n        vk_bind_info->imageView   = image_view;\n        vk_bind_info->imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;\n      } else if (bind_op->type == NGF_DESCRIPTOR_STORAGE_IMAGE) {\n        vk_bind_info->imageView   = image_view;\n        vk_bind_info->imageLayout = VK_IMAGE_LAYOUT_GENERAL;\n      } else if (\n          bind_op->type == NGF_DESCRIPTOR_SAMPLER ||\n          bind_op->type == NGF_DESCRIPTOR_IMAGE_AND_SAMPLER) {\n        vk_bind_info->sampler = bind_info->sampler->vksampler;\n      }\n      vk_write->pImageInfo = vk_bind_info;\n      break;\n    }\n    case NGF_DESCRIPTOR_ACCELERATION_STRUCTURE: {\n      auto accel_struct_info   = ngfi::tmp_alloc<VkWriteDescriptorSetAccelerationStructureKHR>();\n      accel_struct_info->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR;\n      accel_struct_info->pNext = NULL;\n      accel_struct_info->accelerationStructureCount = 1u;\n      accel_struct_info->pAccelerationStructures =\n          (const VkAccelerationStructureKHR*)&bind_op->info.acceleration_structure;\n      vk_write->pNext = accel_struct_info;\n      break;\n    }\n\n    default:\n      assert(false);\n    }\n    ++descriptor_write_idx;\n  }\n  // perform all the vulkan descriptor set write operations to populate the\n  // newly allocated descriptor sets.\n  vkUpdateDescriptorSets(_vk.device, descriptor_write_idx, vk_writes, 0, NULL);\n\n  // bind each of the descriptor sets individually (this ensures that desc.\n  // sets bound for a compatible pipeline earlier in this command buffer\n  // don't get clobbered).\n  for (uint32_t s = 0; s < ndesc_set_layouts; ++s) {\n    if (vk_desc_sets[s] != VK_NULL_HANDLE) {\n      vkCmdBindDescriptorSets(\n          cmd_buf->vk_cmd_buffer,\n          cmd_buf->renderpass_active ? VK_PIPELINE_BIND_POINT_GRAPHICS\n                                     : VK_PIPELINE_BIND_POINT_COMPUTE,\n          pipeline_data->vk_pipeline_layout,\n          s,\n          1,\n          &vk_desc_sets[s],\n          0,\n          NULL);\n    }\n  }\n  ngfvk_cleanup_pending_binds(cmd_buf);\n}\n\n// Returns a bitstring uniquely identifying the series of load/store op combos\n// for each attachment.\nstatic uint64_t ngfvk_renderpass_ops_key(\n    const ngf_render_target        rt,\n    const ngf_attachment_load_op*  load_ops,\n    const ngf_attachment_store_op* store_ops) {\n  const uint32_t num_rt_attachments = rt->nattachments;\n  const uint32_t nattachments =\n      rt->is_default ? (NGFI_MIN(2, num_rt_attachments)) : num_rt_attachments;\n  assert(nattachments < (8u * sizeof(uint64_t) / 4u));\n  uint64_t result = 0u;\n  for (uint32_t i = 0u; i < nattachments; ++i) {\n    const uint64_t load_op_bits  = (uint64_t)load_ops[i];\n    const uint64_t store_op_bits = (uint64_t)store_ops[i];\n    assert(load_op_bits <= 3);\n    assert(store_op_bits <= 2);\n    const uint64_t attachment_ops_combo = (load_op_bits << 2u) | store_op_bits;\n    result |= attachment_ops_combo << (i * 4u);\n  }\n  // For default RT, the load/store ops of the resolve attachments are not\n  // specified by the client code explicitly. We always treat them as\n  // DONT_CARE / STORE.\n  if (rt->is_default && nattachments < num_rt_attachments &&\n      rt->attachment_compat_pass_descs[nattachments].is_resolve) {\n    result = result | ((uint64_t)0x1u << (4u * nattachments));\n  }\n  return result;\n}\n\n// Macros for accessing load/store ops encoded in a renderpass ops key.\n#define NGFVK_ATTACHMENT_OPS_COMBO(idx, ops_key) ((ops_key >> (4u * idx)) & 15u)\n#define NGFVK_ATTACHMENT_LOAD_OP_FROM_KEY(idx, ops_key) \\\n  (get_vk_load_op((ngf_attachment_load_op)(NGFVK_ATTACHMENT_OPS_COMBO(idx, ops_key) >> 2u)))\n#define NGFVK_ATTACHMENT_STORE_OP_FROM_KEY(idx, ops_key) \\\n  (get_vk_store_op((ngf_attachment_store_op)(NGFVK_ATTACHMENT_OPS_COMBO(idx, ops_key) & 3u)))\n\n// Looks up a renderpass object from the current context's renderpass cache, and creates\n// one if it doesn't exist.\nstatic VkRenderPass ngfvk_lookup_renderpass(ngf_render_target rt, uint64_t ops_key) {\n  VkRenderPass result = VK_NULL_HANDLE;\n  for (size_t r = 0; r < CURRENT_CONTEXT->renderpass_cache.size(); ++r) {\n    const ngfvk_renderpass_cache_entry* cache_entry = &CURRENT_CONTEXT->renderpass_cache[r];\n    if (cache_entry->rt == rt && cache_entry->ops_key == ops_key) {\n      result = cache_entry->renderpass;\n      break;\n    }\n  }\n\n  if (result == VK_NULL_HANDLE) {\n    const uint32_t nattachments       = rt->nattachments;\n    auto attachment_compat_pass_descs = ngfi::tmp_alloc<ngfvk_attachment_pass_desc>(nattachments);\n    const size_t rt_attachment_pass_descs_size =\n        rt->nattachments * sizeof(ngfvk_attachment_pass_desc);\n    memcpy(\n        attachment_compat_pass_descs,\n        rt->attachment_compat_pass_descs.data(),\n        rt_attachment_pass_descs_size);\n\n    for (uint32_t i = 0; i < rt->nattachments; ++i) {\n      attachment_compat_pass_descs[i].load_op  = NGFVK_ATTACHMENT_LOAD_OP_FROM_KEY(i, ops_key);\n      attachment_compat_pass_descs[i].store_op = NGFVK_ATTACHMENT_STORE_OP_FROM_KEY(i, ops_key);\n    }\n\n    ngfvk_renderpass_from_attachment_descs(\n        nattachments,\n        rt->attachment_descs.data(),\n        attachment_compat_pass_descs,\n        &result);\n    const ngfvk_renderpass_cache_entry cache_entry = {\n        .rt         = rt,\n        .ops_key    = ops_key,\n        .renderpass = result};\n    CURRENT_CONTEXT->renderpass_cache.push_back(cache_entry);\n  }\n\n  return result;\n}\n\nstatic bool ngfvk_init_loader_if_necessary() {\n  return !vkGetInstanceProcAddr ? vkl_init_loader() : true;\n}\n\nstatic VkResult ngfvk_create_instance(\n    bool        request_validation,\n    bool        request_debug_groups,\n    VkInstance* instance_ptr,\n    bool*       validation_enabled) {\n  // Scan through the list of instance-level extensions, determine which are supported.\n  bool     swapchain_colorspace_supported = false;\n  uint32_t ninst_exts                     = 0u;\n  vkEnumerateInstanceExtensionProperties(NULL, &ninst_exts, NULL);\n  auto ext_props = (VkExtensionProperties*)malloc(sizeof(VkExtensionProperties) * ninst_exts);\n  if (ext_props == NULL) { return VK_ERROR_OUT_OF_HOST_MEMORY; }\n  vkEnumerateInstanceExtensionProperties(NULL, &ninst_exts, ext_props);\n  for (size_t i = 0; i < ninst_exts && !swapchain_colorspace_supported; ++i) {\n    swapchain_colorspace_supported =\n        (strcmp(VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME, ext_props[i].extensionName) == 0u);\n  }\n  free(ext_props);\n\n  // Query the supported instance version.\n  uint32_t instance_version = VK_API_VERSION_1_0;\n  if (vkEnumerateInstanceVersion) { vkEnumerateInstanceVersion(&instance_version); }\n\n  // nicegraf requires Vulkan 1.1+\n  if (instance_version < VK_API_VERSION_1_1) { return VK_ERROR_INCOMPATIBLE_DRIVER; }\n\n  // Use the highest supported version up to 1.2.\n  const uint32_t api_version = NGFI_MIN(instance_version, VK_API_VERSION_1_2);\n\n  // Names of instance-level extensions.\n  const char* ext_names[] = {\n      \"VK_KHR_surface\",\n      VK_SURFACE_EXT,\n      VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,\n      NULL,\n      NULL};\n  const uint32_t max_optional_exts  = 2u;\n  uint32_t       optional_ext_count = 0u;\n  const uint32_t nmandatory_exts    = NGFI_ARRAYSIZE(ext_names) - max_optional_exts;\n  if (swapchain_colorspace_supported) {\n    ext_names[nmandatory_exts + optional_ext_count++] = VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME;\n  }\n  if (request_validation || request_debug_groups) {\n    ext_names[nmandatory_exts + optional_ext_count++] = VK_EXT_DEBUG_UTILS_EXTENSION_NAME;\n  }\n  assert(max_optional_exts >= optional_ext_count);\n\n  const VkApplicationInfo app_info = {// Application information.\n                                      .sType            = VK_STRUCTURE_TYPE_APPLICATION_INFO,\n                                      .pNext            = NULL,\n                                      .pApplicationName = NULL,  // TODO: allow specifying app name.\n                                      .pEngineName      = \"nicegraf\",\n                                      .engineVersion = VK_MAKE_VERSION(NGF_VER_MAJ, NGF_VER_MIN, 0),\n                                      .apiVersion    = api_version};\n\n  // Names of instance layers to enable.\n  const char* validation_layer_name = \"VK_LAYER_KHRONOS_validation\";\n  const char* enabled_layers[]      = {validation_layer_name};\n\n  // Check if validation layers are supported.\n  uint32_t nlayers = 0u;\n  vkEnumerateInstanceLayerProperties(&nlayers, NULL);\n  auto layer_props = ngfi::tmp_alloc<VkLayerProperties>(nlayers);\n  vkEnumerateInstanceLayerProperties(&nlayers, layer_props);\n  bool validation_supported = false;\n  for (size_t l = 0u; !validation_supported && l < nlayers; ++l) {\n    validation_supported = (strcmp(validation_layer_name, layer_props[l].layerName) == 0u);\n  }\n\n  // Enable validation only if detailed verbosity is requested.\n  const bool enable_validation = validation_supported && request_validation;\n  if (validation_enabled) { *validation_enabled = enable_validation; }\n\n  // Create a Vulkan instance.\n  const uint32_t             nunused_exts = (max_optional_exts - optional_ext_count);\n  const VkInstanceCreateInfo inst_info    = {\n         .sType                   = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,\n         .pNext                   = NULL,\n         .flags                   = 0u,\n         .pApplicationInfo        = &app_info,\n         .enabledLayerCount       = enable_validation ? 1u : 0u,\n         .ppEnabledLayerNames     = enabled_layers,\n         .enabledExtensionCount   = (uint32_t)NGFI_ARRAYSIZE(ext_names) - nunused_exts,\n         .ppEnabledExtensionNames = ext_names};\n  VkResult vk_err = vkCreateInstance(&inst_info, NULL, instance_ptr);\n  if (vk_err != VK_SUCCESS) {\n    NGFI_DIAG_ERROR(\"Failed to create a Vulkan instance, VK error %d.\", vk_err);\n    return vk_err;\n  }\n\n  return VK_SUCCESS;\n}\n\nstatic void ngfvk_cmd_bind_resources(\n    ngf_cmd_buffer              buf,\n    const ngf_resource_bind_op* bind_operations,\n    uint32_t                    nbind_operations) {\n  for (uint32_t i = 0; i < nbind_operations; ++i) {\n    buf->pending_bind_ops.append(\n        bind_operations[i],\n        CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena);\n    ++buf->npending_bind_ops;\n  }\n}\n\nstatic void ngfvk_cmd_buf_reset_render_cmds(ngf_cmd_buffer cmd_buf) {\n  cmd_buf->in_pass_cmd_chnks.clear();\n}\n\nstatic void ngfvk_cmd_buf_add_render_cmd(\n    ngf_cmd_buffer          cmd_buf,\n    const ngfvk_render_cmd* cmd,\n    bool                    in_renderpass) {\n  if (in_renderpass) {\n    cmd_buf->in_pass_cmd_chnks.append(\n        *cmd,\n        CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena);\n  } else {\n    assert(false);\n  }\n}\n\nstatic void ngfvk_cmd_buf_reset_res_states(ngf_cmd_buffer cmd_buf) {\n  cmd_buf->local_res_states.clear();\n}\n\nstatic inline ngfvk_sync_res ngfvk_sync_res_from_buf(ngf_buffer buf) {\n  ngfvk_sync_res sync_res = {\n      .data = {.buf = buf},\n      .type = NGFVK_SYNC_RES_BUFFER,\n      .hash = buf->hash};\n  return sync_res;\n}\n\nstatic inline ngfvk_sync_res ngfvk_sync_res_from_img(ngf_image img) {\n  ngfvk_sync_res sync_res = {.data = {.img = img}, .type = NGFVK_SYNC_RES_IMAGE, .hash = img->hash};\n  return sync_res;\n}\n\nstatic uintptr_t ngfvk_handle_from_sync_res(const ngfvk_sync_res* res) {\n  return res->type == NGFVK_SYNC_RES_BUFFER ? (uintptr_t)res->data.img : (uintptr_t)res->data.buf;\n}\n\n// Look up resource state in a given cmd buffer.\n// If an entry corresponding to the resource doesn't already exist, it gets created.\nstatic bool ngfvk_cmd_buf_lookup_sync_res(\n    ngf_cmd_buffer        cmd_buf,\n    const ngfvk_sync_res* sync_res,\n    ngfvk_sync_res_data** sync_res_data_out) {\n  ngfvk_sync_res_data                     new_res_state {};\n  bool                                    new_res = false;\n  const ngfvk_sync_res_hashtable::keyhash keyhash = {\n      ngfvk_handle_from_sync_res(sync_res),\n      sync_res->hash};\n\n  *sync_res_data_out =\n      cmd_buf->local_res_states.get_or_insert_prehashed(keyhash, new_res_state, new_res);\n\n  if (new_res) {\n    ngfvk_sync_res_data* sync_res_data = *sync_res_data_out;\n    memset(sync_res_data, 0, sizeof(new_res_state));\n    sync_res_data->expected_sync_req.layout = VK_IMAGE_LAYOUT_UNDEFINED;\n    sync_res_data->res_handle               = ngfvk_handle_from_sync_res(sync_res);\n    sync_res_data->res_type                 = sync_res->type;\n    sync_res_data->pending_sync_req_idx     = ~0u;\n  }\n\n  return new_res;\n}\n\nstatic inline uint32_t ngfvk_next_nonzero_bit(uint32_t* mask) {\n  const uint32_t old_mask = *mask;\n  return (*mask = old_mask & (old_mask - 1), *mask ^ old_mask);\n}\n\nstatic inline uint32_t ngfvk_stage_idx(VkPipelineStageFlagBits bit) {\n  switch (bit) {\n  case VK_PIPELINE_STAGE_VERTEX_INPUT_BIT:\n    return 0;\n  case VK_PIPELINE_STAGE_VERTEX_SHADER_BIT:\n    return 1;\n  case VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT:\n    return 2;\n  case VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT:\n    return 3;\n  case VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT:\n    return 4;\n  case VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT:\n    return 5;\n  case VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT:\n    return 6;\n  case VK_PIPELINE_STAGE_TRANSFER_BIT:\n    return 7;\n  default:\n    assert(false);\n  }\n  return ~0u;\n}\n\nstatic inline uint32_t ngfvk_access_idx(VkAccessFlagBits bit) {\n  switch (bit) {\n  case VK_ACCESS_SHADER_READ_BIT:\n    return 0u;\n  case VK_ACCESS_SHADER_WRITE_BIT:\n    return 1u;\n  case VK_ACCESS_UNIFORM_READ_BIT:\n    return 2u;\n  case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:\n    return 0u;\n  case VK_ACCESS_INDEX_READ_BIT:\n    return 1u;\n  case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:\n    return 0u;\n  case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:\n    return 1u;\n  case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:\n    return 0u;\n  case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:\n    return 1u;\n  case VK_ACCESS_TRANSFER_READ_BIT:\n    return 0u;\n  case VK_ACCESS_TRANSFER_WRITE_BIT:\n    return 1u;\n  default:\n    assert(false);\n  }\n  return ~0u;\n}\n\nstatic uint32_t ngfvk_per_stage_access_mask(const ngfvk_sync_barrier_masks* barrier_masks) {\n  static const VkAccessFlags valid_access_flags[] = {\n      VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDEX_READ_BIT,  // VERTEX_INPUT\n      VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT,          // VERTEX_SHADER\n      VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT,          // FRAGMENT_SHADER\n      VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT |\n          VK_ACCESS_SHADER_WRITE_BIT,  // COMPUTE_SHADER\n      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |\n          VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,  // EARLY_FRAGMENT_TESTS\n      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |\n          VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,  // LATE_FRAGMENT_TESTS\n      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |\n          VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,                   // COLOR_ATTACHMENT_OUTPUT\n      VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT  // TRANSFER\n  };\n  static const uint32_t bits_per_stage = 3u;\n\n  uint32_t stage_mask = (uint32_t)barrier_masks->stage_mask;\n  uint32_t result     = 0u;\n\n  while (stage_mask) {\n    const VkPipelineStageFlagBits stage_bit =\n        (VkPipelineStageFlagBits)ngfvk_next_nonzero_bit(&stage_mask);\n    const uint32_t stg_idx     = ngfvk_stage_idx(stage_bit);\n    uint32_t       access_mask = (uint32_t)barrier_masks->access_mask;\n    while (access_mask) {\n      const VkAccessFlagBits access_bit = (VkAccessFlagBits)ngfvk_next_nonzero_bit(&access_mask);\n      if (valid_access_flags[stg_idx] & access_bit) {\n        const uint32_t acc_idx = ngfvk_access_idx(access_bit);\n        result |= (1 << (bits_per_stage * stg_idx + acc_idx));\n      }\n    }\n  }\n  return result;\n}\n\n// Checks whether a barrier is needed before performing an operation on a resource, given its\n// sync state.\n// If a barrier is not needed, returns false. Otherwise, populates the barrier data appropriately\n// and returns true.\nstatic bool ngfvk_sync_barrier(\n    ngfvk_sync_state*     sync_state,\n    const ngfvk_sync_req* sync_req,\n    ngfvk_barrier_data*   barrier) {\n  const VkPipelineStageFlags dst_stage_mask  = sync_req->barrier_masks.stage_mask;\n  const VkAccessFlags        dst_access_mask = sync_req->barrier_masks.access_mask;\n  const VkImageLayout        dst_layout      = sync_req->layout;\n\n  // Mask of all accesses we care about, that perform writes.\n  static const VkAccessFlags all_write_accesses_mask =\n      VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |\n      VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;\n\n  // Reset all barrier data.\n  memset(barrier, 0, sizeof(*barrier));\n\n  // Decide if the requested operation necessitates a write to the resource.\n  // Layout transitions are read-modify-write operations, thus if a layout transition is required\n  // for the operation, we _always_ need a write, even if the actual requested access type\n  // specified in `dst_access_mask` is read-only.\n  const bool need_layout_transition = dst_layout != sync_state->layout;\n  const bool dst_stages_want_write  = (all_write_accesses_mask & dst_access_mask);\n  const bool need_write             = dst_stages_want_write || need_layout_transition;\n\n  if (!need_write) {\n    // Case for read-only operations.\n    // Those can run concurrently with other read-only operations, and only need to wait for\n    // any outstanding writes to complete.\n\n    const uint32_t per_stg_acc_mask = ngfvk_per_stage_access_mask(&sync_req->barrier_masks);\n    const bool     accesses_seen_write =\n        ((sync_state->per_stage_readers_mask & per_stg_acc_mask) == per_stg_acc_mask);\n\n    if (sync_state->last_writer_masks.stage_mask != VK_PIPELINE_STAGE_NONE &&\n        !accesses_seen_write) {\n      // If there was a preceding write, and the stage requesting the read-only operation\n      // hasn't consumed it yet, a barrier is necessary.\n      barrier->src_stage_mask |= sync_state->last_writer_masks.stage_mask;\n      barrier->src_access_mask |=\n          sync_state->last_writer_masks.access_mask & all_write_accesses_mask;\n    }\n    // Add the requested operation to the mask of ongoing reads.\n    sync_state->active_readers_masks.stage_mask |= dst_stage_mask;\n    sync_state->active_readers_masks.access_mask |= dst_access_mask;\n    sync_state->per_stage_readers_mask |= per_stg_acc_mask;\n  } else {\n    // Case for modifying operations.\n    // No more than a single modifying operation may be in progress at a given time.\n    // Modifying operations have to wait for all outstanding reads and writes to complete.\n\n    // Add any outstanding readers to the barrier's source mask.\n    barrier->src_stage_mask |= sync_state->active_readers_masks.stage_mask;\n    barrier->src_access_mask |= sync_state->active_readers_masks.access_mask;\n\n    // No active readers remain after a modifying op, so zero out their corresponding masks.\n    sync_state->active_readers_masks.stage_mask  = 0u;\n    sync_state->active_readers_masks.access_mask = 0u;\n    sync_state->per_stage_readers_mask           = 0u;\n\n    // If there is an outstanding write, emit a barrier for it.\n    // Note that we skip this if there were any outsdtanding reads, those already depend on the\n    // write to finish, so it's sufficient to just depend on them.\n    if (barrier->src_stage_mask == 0 &&\n        sync_state->last_writer_masks.stage_mask != VK_PIPELINE_STAGE_NONE) {\n      barrier->src_stage_mask |= sync_state->last_writer_masks.stage_mask;\n      barrier->src_access_mask |= sync_state->last_writer_masks.access_mask;\n    }\n\n    // Update last writer stage and access mask.\n    sync_state->last_writer_masks.stage_mask  = dst_stage_mask;\n    sync_state->last_writer_masks.access_mask = dst_access_mask;\n\n    // If the requested access was actually readonly, mark it as synced with the last write\n    // since in that context the last write is made by the layout transition, the results of which\n    // are made available and visible to the destination stage automatically.\n    if ((dst_access_mask & all_write_accesses_mask) == 0u) {\n      sync_state->active_readers_masks.stage_mask |= dst_stage_mask;\n      sync_state->active_readers_masks.access_mask |= dst_access_mask;\n      sync_state->per_stage_readers_mask |= ngfvk_per_stage_access_mask(&sync_req->barrier_masks);\n    }\n  }\n\n  // We need a barrier if we found any source stages to wait on, or if a layout transition was\n  // necessary.\n  const bool need_barrier = barrier->src_stage_mask != 0u || need_layout_transition;\n\n  if (need_barrier) {\n    barrier->dst_access_mask = dst_access_mask;\n    barrier->dst_stage_mask  = dst_stage_mask;\n    barrier->src_stage_mask =\n        barrier->src_stage_mask ? barrier->src_stage_mask : VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;\n    barrier->src_layout = sync_state->layout;\n    barrier->dst_layout = dst_layout;\n  }\n\n  // Update the layout in synchronization state.\n  sync_state->layout = dst_layout;\n\n  return need_barrier;\n}\n\nstatic void ngfvk_sync_req_batch_init(uint32_t nmax_sync_reqs, ngfvk_sync_req_batch* result) {\n  memset(result, 0, sizeof(*result));\n  result->pending_sync_reqs  = ngfi::tmp_alloc<ngfvk_sync_req>(nmax_sync_reqs);\n  result->sync_res_data_keys = ngfi::tmp_alloc<ngfvk_sync_res_hashtable::keyhash>(nmax_sync_reqs);\n  result->freshness          = ngfi::tmp_alloc<bool>(nmax_sync_reqs);\n  memset(result->freshness, 0, sizeof(bool) * nmax_sync_reqs);\n}\n\n// Merges a given sync request with the resource's already pending sync request. Returns `false` and\n// does nothing if the operation requested by the given sync request is incompatible with the\n// pending sync request.\nstatic bool ngfvk_sync_req_merge(ngfvk_sync_req* dst_sync_req, const ngfvk_sync_req* sync_req) {\n  static const VkAccessFlags NGFVK_RENDER_ACCESSES_MASK =\n      VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;\n  static const VkAccessFlags NGFVK_WRITE_ACCESSES_MASK =\n      VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;\n  const bool render_requested =\n      ((sync_req->barrier_masks.access_mask & NGFVK_RENDER_ACCESSES_MASK) != 0);\n  const bool write_requested =\n      ((sync_req->barrier_masks.access_mask & NGFVK_WRITE_ACCESSES_MASK) != 0);\n  const bool render_pending =\n      ((dst_sync_req->barrier_masks.access_mask & NGFVK_RENDER_ACCESSES_MASK) != 0);\n  const bool write_pending =\n      ((dst_sync_req->barrier_masks.access_mask & NGFVK_WRITE_ACCESSES_MASK) != 0);\n  const bool read_requested      = !write_requested && (sync_req->barrier_masks.access_mask != 0);\n  const bool read_pending        = !write_pending && (dst_sync_req->barrier_masks.access_mask != 0);\n  const bool layout_incompatible = dst_sync_req->layout != VK_IMAGE_LAYOUT_UNDEFINED &&\n                                   dst_sync_req->layout != VK_IMAGE_LAYOUT_GENERAL &&\n                                   sync_req->layout != VK_IMAGE_LAYOUT_GENERAL &&\n                                   dst_sync_req->layout != sync_req->layout;\n  // Using a resource as a render target is not compatible with any other type of access.\n  // Using a resource in a manner that requires it to be simultaneously in two incompatible layouts\n  // results in transitioning to the GENERAL layout which is compatible with all kinds of accesses.\n  // Merging modifying and non-modifying sync requests is allowed because the same resource might\n  // be accessed with different descriptors in a GPU program (e.g. an image can be accessed both\n  // as a sampled texture and as a storage image).\n  if ((render_requested && (write_pending || read_pending || render_pending)) ||\n      (render_pending && (write_requested || read_requested))) {\n    NGFI_DIAG_ERROR(\"Attempt to use a resource with incompatible accesses within a single \"\n                    \"draw/dispatch. Ignoring.\");\n    return false;\n  }\n\n  dst_sync_req->barrier_masks.access_mask |= sync_req->barrier_masks.access_mask;\n  dst_sync_req->barrier_masks.stage_mask |= sync_req->barrier_masks.stage_mask;\n  const bool preserve_general_layout =\n      (dst_sync_req->layout == VK_IMAGE_LAYOUT_GENERAL ||\n       sync_req->layout == VK_IMAGE_LAYOUT_GENERAL);\n  dst_sync_req->layout =\n      (preserve_general_layout || layout_incompatible) ? VK_IMAGE_LAYOUT_GENERAL : sync_req->layout;\n  return true;\n}\n\nstatic bool ngfvk_sync_req_batch_add(\n    ngfvk_sync_req_batch*              batch,\n    ngfvk_sync_res_hashtable::key_type key,\n    uint64_t                           hash,\n    ngfvk_sync_res_data*               sync_res_data,\n    bool                               fresh,\n    const ngfvk_sync_req*              sync_req) {\n  if (sync_res_data->pending_sync_req_idx == ~0u) {\n    sync_res_data->pending_sync_req_idx = batch->npending_sync_reqs++;\n    if (sync_res_data->res_type == NGFVK_SYNC_RES_BUFFER) {\n      batch->nbuffer_sync_reqs++;\n    } else if (sync_res_data->res_type == NGFVK_SYNC_RES_IMAGE) {\n      batch->nimage_sync_reqs++;\n    }\n    memset(\n        &batch->pending_sync_reqs[sync_res_data->pending_sync_req_idx],\n        0,\n        sizeof(batch->pending_sync_reqs[0]));\n    batch->pending_sync_reqs[sync_res_data->pending_sync_req_idx].layout =\n        VK_IMAGE_LAYOUT_UNDEFINED;\n    batch->sync_res_data_keys[sync_res_data->pending_sync_req_idx].key  = key;\n    batch->sync_res_data_keys[sync_res_data->pending_sync_req_idx].hash = hash;\n  }\n  if (fresh && sync_res_data->pending_sync_req_idx < batch->npending_sync_reqs) {\n    batch->freshness[sync_res_data->pending_sync_req_idx] = true;\n  }\n  return ngfvk_sync_req_merge(\n      &batch->pending_sync_reqs[sync_res_data->pending_sync_req_idx],\n      sync_req);\n}\n\nstatic bool ngfvk_sync_req_batch_add_with_lookup(\n    ngfvk_sync_req_batch* batch,\n    ngf_cmd_buffer        cmd_buf,\n    const ngfvk_sync_res* res,\n    const ngfvk_sync_req* sync_req) {\n  switch(res->type) { // Ignore resources marked as read-only.\n  case NGFVK_SYNC_RES_BUFFER:\n      if (res->data.buf->sync_state.skip_hazard_tracking) return false;\n      break;\n  case NGFVK_SYNC_RES_IMAGE:\n      if (res->data.img->sync_state.skip_hazard_tracking) return false;\n      break;\n  default:;\n  }\n  ngfvk_sync_res_data* sync_res_data;\n\n  const bool fresh = ngfvk_cmd_buf_lookup_sync_res(cmd_buf, res, &sync_res_data);\n\n  return ngfvk_sync_req_batch_add(\n      batch,\n      ngfvk_handle_from_sync_res(res),\n      res->hash,\n      sync_res_data,\n      fresh,\n      sync_req);\n}\n\nstatic void ngfvk_sync_commit_pending_barriers_legacy(\n    ngfvk_pending_barrier_list* pending_bars,\n    VkCommandBuffer             cmd_buf) {\n  auto img_bars = ngfi::tmp_alloc<VkImageMemoryBarrier>(pending_bars->npending_img_bars);\n  auto buf_bars = ngfi::tmp_alloc<VkBufferMemoryBarrier>(pending_bars->npending_buf_bars);\n  VkPipelineStageFlags src_stage_mask = 0u;\n  VkPipelineStageFlags dst_stage_mask = 0u;\n  uint32_t             nimg_bars      = 0u;\n  uint32_t             nbuf_bars      = 0u;\n\n  for (const ngfvk_barrier_data& barrier_ref : pending_bars->barriers) {\n    const ngfvk_barrier_data* barrier = &barrier_ref;\n    src_stage_mask |= barrier->src_stage_mask;\n    dst_stage_mask |= barrier->dst_stage_mask;\n    switch (barrier->res.type) {\n    case NGFVK_SYNC_RES_IMAGE: {\n      const ngf_image       img                      = barrier->res.data.img;\n      VkImageMemoryBarrier* image_barrier            = &img_bars[nimg_bars++];\n      image_barrier->sType                           = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;\n      image_barrier->pNext                           = NULL;\n      image_barrier->srcQueueFamilyIndex             = VK_QUEUE_FAMILY_IGNORED;\n      image_barrier->dstQueueFamilyIndex             = VK_QUEUE_FAMILY_IGNORED;\n      image_barrier->srcAccessMask                   = barrier->src_access_mask;\n      image_barrier->dstAccessMask                   = barrier->dst_access_mask;\n      image_barrier->oldLayout                       = barrier->src_layout;\n      image_barrier->newLayout                       = barrier->dst_layout;\n      image_barrier->image                           = (VkImage)img->alloc.obj_handle;\n      image_barrier->subresourceRange.baseArrayLayer = 0u;\n      image_barrier->subresourceRange.baseMipLevel   = 0u;\n      image_barrier->subresourceRange.layerCount     = img->nlayers;\n      image_barrier->subresourceRange.levelCount     = img->nlevels;\n      const bool is_depth                            = ngfvk_format_is_depth(img->vk_fmt);\n      const bool is_stencil                          = ngfvk_format_is_stencil(img->vk_fmt);\n      image_barrier->subresourceRange.aspectMask =\n          (is_depth ? VK_IMAGE_ASPECT_DEPTH_BIT : 0u) |\n          (is_stencil ? VK_IMAGE_ASPECT_STENCIL_BIT : 0u) |\n          ((!is_depth && !is_stencil) ? VK_IMAGE_ASPECT_COLOR_BIT : 0u);\n      break;\n    }\n    case NGFVK_SYNC_RES_BUFFER: {\n      const ngf_buffer       buf            = barrier->res.data.buf;\n      VkBufferMemoryBarrier* buffer_barrier = &buf_bars[nbuf_bars++];\n      buffer_barrier->sType                 = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;\n      buffer_barrier->pNext                 = NULL;\n      buffer_barrier->srcQueueFamilyIndex   = VK_QUEUE_FAMILY_IGNORED;\n      buffer_barrier->dstQueueFamilyIndex   = VK_QUEUE_FAMILY_IGNORED;\n      buffer_barrier->srcAccessMask         = barrier->src_access_mask;\n      buffer_barrier->dstAccessMask         = barrier->dst_access_mask;\n      buffer_barrier->offset                = 0u;\n      buffer_barrier->buffer                = (VkBuffer)buf->alloc.obj_handle;\n      buffer_barrier->size                  = buf->size;\n      break;\n    }\n    default:\n      assert(false);\n      break;\n    }\n  }\n  pending_bars->barriers.clear();\n  pending_bars->npending_buf_bars = 0u;\n  pending_bars->npending_img_bars = 0u;\n  if (nbuf_bars > 0 || nimg_bars > 0) {\n    vkCmdPipelineBarrier(\n        cmd_buf,\n        src_stage_mask,\n        dst_stage_mask,\n        0u,\n        0u,\n        NULL,\n        nbuf_bars,\n        buf_bars,\n        nimg_bars,\n        img_bars);\n  }\n}\n\nstatic void ngfvk_sync_commit_pending_barriers_sync2(\n    ngfvk_pending_barrier_list* pending_bars,\n    VkCommandBuffer             cmd_buf) {\n  auto     img_bars  = ngfi::tmp_alloc<VkImageMemoryBarrier2>(pending_bars->npending_img_bars);\n  auto     buf_bars  = ngfi::tmp_alloc<VkBufferMemoryBarrier2>(pending_bars->npending_buf_bars);\n  uint32_t nimg_bars = 0u;\n  uint32_t nbuf_bars = 0u;\n  for (const ngfvk_barrier_data& barrier_ref : pending_bars->barriers) {\n    const ngfvk_barrier_data* barrier = &barrier_ref;\n    switch (barrier->res.type) {\n    case NGFVK_SYNC_RES_IMAGE: {\n      const ngf_image        img                     = barrier->res.data.img;\n      VkImageMemoryBarrier2* image_barrier           = &img_bars[nimg_bars++];\n      image_barrier->sType                           = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2;\n      image_barrier->pNext                           = NULL;\n      image_barrier->srcQueueFamilyIndex             = VK_QUEUE_FAMILY_IGNORED;\n      image_barrier->dstQueueFamilyIndex             = VK_QUEUE_FAMILY_IGNORED;\n      image_barrier->srcStageMask                    = barrier->src_stage_mask;\n      image_barrier->dstStageMask                    = barrier->dst_stage_mask;\n      image_barrier->srcAccessMask                   = barrier->src_access_mask;\n      image_barrier->dstAccessMask                   = barrier->dst_access_mask;\n      image_barrier->oldLayout                       = barrier->src_layout;\n      image_barrier->newLayout                       = barrier->dst_layout;\n      image_barrier->image                           = (VkImage)img->alloc.obj_handle;\n      image_barrier->subresourceRange.baseArrayLayer = 0u;\n      image_barrier->subresourceRange.baseMipLevel   = 0u;\n      image_barrier->subresourceRange.layerCount     = img->nlayers;\n      image_barrier->subresourceRange.levelCount     = img->nlevels;\n      const bool is_depth                            = ngfvk_format_is_depth(img->vk_fmt);\n      const bool is_stencil                          = ngfvk_format_is_stencil(img->vk_fmt);\n      image_barrier->subresourceRange.aspectMask =\n          (is_depth ? VK_IMAGE_ASPECT_DEPTH_BIT : 0u) |\n          (is_stencil ? VK_IMAGE_ASPECT_STENCIL_BIT : 0u) |\n          ((!is_depth && !is_stencil) ? VK_IMAGE_ASPECT_COLOR_BIT : 0u);\n      break;\n    }\n    case NGFVK_SYNC_RES_BUFFER: {\n      const ngf_buffer        buf            = barrier->res.data.buf;\n      VkBufferMemoryBarrier2* buffer_barrier = &buf_bars[nbuf_bars++];\n      buffer_barrier->sType                  = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2;\n      buffer_barrier->pNext                  = NULL;\n      buffer_barrier->srcStageMask           = barrier->src_stage_mask;\n      buffer_barrier->dstStageMask           = barrier->dst_stage_mask;\n      buffer_barrier->srcQueueFamilyIndex    = VK_QUEUE_FAMILY_IGNORED;\n      buffer_barrier->dstQueueFamilyIndex    = VK_QUEUE_FAMILY_IGNORED;\n      buffer_barrier->srcAccessMask          = barrier->src_access_mask;\n      buffer_barrier->dstAccessMask          = barrier->dst_access_mask;\n      buffer_barrier->offset                 = 0u;\n      buffer_barrier->buffer                 = (VkBuffer)buf->alloc.obj_handle;\n      buffer_barrier->size                   = buf->size;\n      break;\n    }\n    default:\n      assert(false);\n      break;\n    }\n  }\n  pending_bars->barriers.clear();\n  pending_bars->npending_buf_bars = 0u;\n  pending_bars->npending_img_bars = 0u;\n  if (nbuf_bars > 0 || nimg_bars > 0) {\n    const VkDependencyInfo dep_info = {\n        .sType                    = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,\n        .pNext                    = NULL,\n        .dependencyFlags          = 0u,\n        .memoryBarrierCount       = 0u,\n        .pMemoryBarriers          = NULL,\n        .bufferMemoryBarrierCount = nbuf_bars,\n        .pBufferMemoryBarriers    = buf_bars,\n        .imageMemoryBarrierCount  = nimg_bars,\n        .pImageMemoryBarriers     = img_bars};\n    vkCmdPipelineBarrier2(cmd_buf, &dep_info);\n  }\n}\n\nstatic void ngfvk_sync_commit_pending_barriers(\n    ngfvk_pending_barrier_list* pending_bars,\n    VkCommandBuffer             cmd_buf) {\n  if (vkCmdPipelineBarrier2) {\n    ngfvk_sync_commit_pending_barriers_sync2(pending_bars, cmd_buf);\n  } else {\n    ngfvk_sync_commit_pending_barriers_legacy(pending_bars, cmd_buf);\n  }\n}\n\nstatic void ngfvk_sync_req_batch_process(ngfvk_sync_req_batch* batch, ngf_cmd_buffer cmd_buf) {\n  for (size_t i = 0u; i < batch->npending_sync_reqs; ++i) {\n    auto sync_res_data = cmd_buf->local_res_states.get_prehashed(batch->sync_res_data_keys[i]);\n    if (!sync_res_data) {\n      NGFI_DIAG_WARNING(\n          \"Internal error - resource missing from cmd buffer's synchronization table?\");\n      assert(false);\n    }\n\n    const ngfvk_sync_req* sync_req = &batch->pending_sync_reqs[i];\n    const bool            fresh    = batch->freshness[i];\n    ngfvk_barrier_data    barrier_data;\n    const bool            barrier_needed =\n        ngfvk_sync_barrier(&sync_res_data->sync_state, sync_req, &barrier_data);\n    if (barrier_needed && !fresh) {\n      barrier_data.res.type = sync_res_data->res_type;\n      if (barrier_data.res.type == NGFVK_SYNC_RES_IMAGE) {\n        barrier_data.res.data.img = (ngf_image)sync_res_data->res_handle;\n        ++cmd_buf->pending_barriers.npending_img_bars;\n      } else {\n        barrier_data.res.data.buf = (ngf_buffer)sync_res_data->res_handle;\n        ++cmd_buf->pending_barriers.npending_buf_bars;\n      }\n      cmd_buf->pending_barriers.barriers.append(\n          barrier_data,\n          CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena);\n      sync_res_data->had_barrier = true;\n    }\n    sync_res_data->pending_sync_req_idx = ~0u;\n\n    if (!sync_res_data->had_barrier) {\n      sync_res_data->expected_sync_req.barrier_masks.stage_mask |=\n          sync_req->barrier_masks.stage_mask;\n      sync_res_data->expected_sync_req.barrier_masks.access_mask |=\n          sync_req->barrier_masks.access_mask;\n      // Make note of the initial layout with which the resource is expected to be used.\n      if (sync_res_data->expected_sync_req.layout == VK_IMAGE_LAYOUT_UNDEFINED) {\n        sync_res_data->expected_sync_req.layout = sync_req->layout;\n      }\n    }\n  }\n}\n\nstatic void ngfvk_sync_req_batch_commit(ngfvk_sync_req_batch* batch, ngf_cmd_buffer cmd_buf) {\n  ngfvk_sync_req_batch_process(batch, cmd_buf);\n  ngfvk_sync_commit_pending_barriers(&cmd_buf->pending_barriers, cmd_buf->vk_cmd_buffer);\n}\n\nstatic void ngfvk_handle_single_sync_req(\n    ngf_cmd_buffer        cmd_buf,\n    const ngfvk_sync_res* res,\n    const ngfvk_sync_req* sync_req) {\n  bool                              fresh = false;\n  ngfvk_sync_res_hashtable::keyhash sync_res_data_key;\n  ngfvk_sync_req empty_sync_req = {.barrier_masks = {0u, 0u}, .layout = VK_IMAGE_LAYOUT_UNDEFINED};\n\n  ngfvk_sync_req_batch batch = {\n      .sync_res_data_keys = &sync_res_data_key,\n      .pending_sync_reqs  = &empty_sync_req,\n      .freshness          = &fresh,\n      .npending_sync_reqs = 0,\n      .nbuffer_sync_reqs  = 0,\n      .nimage_sync_reqs   = 0};\n\n  ngfvk_sync_req_batch_add_with_lookup(&batch, cmd_buf, res, sync_req);\n  ngfvk_sync_req_batch_commit(&batch, cmd_buf);\n}\n\nstatic ngfvk_sync_res ngfvk_sync_res_from_bind_op(const ngf_resource_bind_op* bind_op) {\n  switch (bind_op->type) {\n  case NGF_DESCRIPTOR_IMAGE:\n  case NGF_DESCRIPTOR_IMAGE_AND_SAMPLER:\n  case NGF_DESCRIPTOR_STORAGE_IMAGE:\n    return ngfvk_sync_res_from_img(\n        bind_op->info.image_sampler.is_image_view ? bind_op->info.image_sampler.resource.view->src\n                                                  : bind_op->info.image_sampler.resource.image);\n    break;\n  case NGF_DESCRIPTOR_STORAGE_BUFFER:\n  case NGF_DESCRIPTOR_UNIFORM_BUFFER:\n    return ngfvk_sync_res_from_buf(bind_op->info.buffer.buffer);\n    break;\n  case NGF_DESCRIPTOR_TEXEL_BUFFER:\n    return ngfvk_sync_res_from_buf(bind_op->info.texel_buffer_view->buffer);\n    break;\n  default:\n    break;\n  }\n  const ngfvk_sync_res none = {.data = {.buf = NULL}, .type = NGFVK_SYNC_RES_COUNT};\n  return none;\n}\n\n// Returns a sync request corresponding to the given bind operation.\nstatic ngfvk_sync_req ngfvk_sync_req_for_bind_op(\n    const ngf_resource_bind_op*   bind_op,\n    const ngfvk_generic_pipeline* pipeline) {\n  ngfvk_sync_req sync_req;\n  memset(&sync_req, 0, sizeof(sync_req));\n  sync_req.layout = VK_IMAGE_LAYOUT_UNDEFINED;\n\n  // Bind ops that target non-existent sets/bindings should be disregarded.\n  if (bind_op->target_set >= pipeline->descriptor_set_layouts.size()) return sync_req;\n  const ngfvk_desc_set_layout* layout = &pipeline->descriptor_set_layouts[bind_op->target_set];\n  if (bind_op->target_binding >= layout->binding_properties.size()) return sync_req;\n\n  const bool is_read_only = layout->binding_properties[bind_op->target_binding].readonly;\n\n  sync_req.barrier_masks.stage_mask = pipeline->descriptor_set_layouts[bind_op->target_set]\n                                          .binding_properties[bind_op->target_binding]\n                                          .stage_accessors;\n\n  switch (bind_op->type) {\n  case NGF_DESCRIPTOR_UNIFORM_BUFFER: {\n    sync_req.barrier_masks.access_mask = VK_ACCESS_UNIFORM_READ_BIT;\n    break;\n  }\n  case NGF_DESCRIPTOR_IMAGE:\n  case NGF_DESCRIPTOR_IMAGE_AND_SAMPLER: {\n    sync_req.barrier_masks.access_mask = VK_ACCESS_SHADER_READ_BIT;\n    sync_req.layout                    = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;\n    break;\n  }\n  case NGF_DESCRIPTOR_STORAGE_BUFFER: {\n    sync_req.barrier_masks.access_mask =\n        VK_ACCESS_SHADER_READ_BIT | (is_read_only ? 0u : VK_ACCESS_SHADER_WRITE_BIT);\n    break;\n  }\n  case NGF_DESCRIPTOR_STORAGE_IMAGE: {\n    sync_req.barrier_masks.access_mask =\n        VK_ACCESS_SHADER_READ_BIT | (is_read_only ? 0u : VK_ACCESS_SHADER_WRITE_BIT);\n    sync_req.layout = VK_IMAGE_LAYOUT_GENERAL;\n    break;\n  }\n  case NGF_DESCRIPTOR_TEXEL_BUFFER: {\n    sync_req.barrier_masks.access_mask = VK_ACCESS_SHADER_READ_BIT;\n    break;\n  }\n  case NGF_DESCRIPTOR_SAMPLER:\n    sync_req.barrier_masks.stage_mask = 0u;\n    break;\n  case NGF_DESCRIPTOR_ACCELERATION_STRUCTURE:\n    sync_req.barrier_masks.stage_mask = 0u;\n    break;\n  default:\n    assert(0);\n  }\n  return sync_req;\n}\n\n// Actually records renderpass commands into a command buffer.\nstatic void ngfvk_cmd_buf_record_render_cmds(\n    ngf_cmd_buffer                              buf,\n    const ngfi::chunked_list<ngfvk_render_cmd>& cmd_list) {\n  ngfi::tmp_arena().reset();\n\n  for (const ngfvk_render_cmd& cmd_ref : cmd_list) {\n    const ngfvk_render_cmd* cmd = &cmd_ref;\n    switch (cmd->type) {\n    case NGFVK_RENDER_CMD_BIND_PIPELINE: {\n      buf->active_gfx_pipe = cmd->data.pipeline;\n      // If we had a pipeline bound for which there have been resources bound, but no draw call\n      // executed, commit those resources to actual descriptor sets and bind them so that the next\n      // pipeline is able to \"see\" those resources, provided that it's compatible.\n      if (buf->active_gfx_pipe && buf->npending_bind_ops > 0u) { ngfvk_execute_pending_binds(buf); }\n      vkCmdBindPipeline(\n          buf->vk_cmd_buffer,\n          VK_PIPELINE_BIND_POINT_GRAPHICS,\n          ((ngfvk_generic_pipeline*)(cmd->data.pipeline))->vk_pipeline);\n      break;\n    }\n    case NGFVK_RENDER_CMD_SET_VIEWPORT: {\n      const VkViewport viewport = {\n          .x        = (float)cmd->data.rect.x,\n          .y        = (float)cmd->data.rect.y,\n          .width    = NGFI_MAX(1, (float)cmd->data.rect.width),\n          .height   = NGFI_MAX(1, (float)cmd->data.rect.height),\n          .minDepth = 0.0f,\n          .maxDepth = 1.0f};\n      vkCmdSetViewport(buf->vk_cmd_buffer, 0u, 1u, &viewport);\n      break;\n    }\n    case NGFVK_RENDER_CMD_SET_SCISSOR: {\n      const ngf_irect2d* r            = &cmd->data.rect;\n      const VkRect2D     scissor_rect = {.offset = {r->x, r->y}, .extent = {r->width, r->height}};\n      vkCmdSetScissor(buf->vk_cmd_buffer, 0u, 1u, &scissor_rect);\n      break;\n    }\n    case NGFVK_RENDER_CMD_SET_STENCIL_REFERENCE: {\n      vkCmdSetStencilReference(\n          buf->vk_cmd_buffer,\n          VK_STENCIL_FACE_FRONT_BIT,\n          cmd->data.stencil_values.front);\n      vkCmdSetStencilReference(\n          buf->vk_cmd_buffer,\n          VK_STENCIL_FACE_BACK_BIT,\n          cmd->data.stencil_values.back);\n      break;\n    }\n    case NGFVK_RENDER_CMD_SET_STENCIL_COMPARE_MASK: {\n      vkCmdSetStencilCompareMask(\n          buf->vk_cmd_buffer,\n          VK_STENCIL_FACE_FRONT_BIT,\n          cmd->data.stencil_values.front);\n      vkCmdSetStencilCompareMask(\n          buf->vk_cmd_buffer,\n          VK_STENCIL_FACE_BACK_BIT,\n          cmd->data.stencil_values.back);\n      break;\n    }\n    case NGFVK_RENDER_CMD_SET_STENCIL_WRITE_MASK: {\n      vkCmdSetStencilWriteMask(\n          buf->vk_cmd_buffer,\n          VK_STENCIL_FACE_FRONT_BIT,\n          cmd->data.stencil_values.front);\n      vkCmdSetStencilWriteMask(\n          buf->vk_cmd_buffer,\n          VK_STENCIL_FACE_BACK_BIT,\n          cmd->data.stencil_values.back);\n      break;\n    }\n    case NGFVK_RENDER_CMD_SET_DEPTH_BIAS: {\n      vkCmdSetDepthBias(\n          buf->vk_cmd_buffer,\n          cmd->data.depth_bias.const_factor,\n          cmd->data.depth_bias.clamp,\n          cmd->data.depth_bias.slope_factor);\n      break;\n    }\n    case NGFVK_RENDER_CMD_BIND_RESOURCE: {\n      ngfvk_cmd_bind_resources(buf, &cmd->data.bind_resource, 1u);\n      break;\n    }\n    case NGFVK_RENDER_CMD_BIND_ATTRIB_BUFFER: {\n      VkDeviceSize vkoffset = cmd->data.bind_attrib_buffer.offset;\n      vkCmdBindVertexBuffers(\n          buf->vk_cmd_buffer,\n          cmd->data.bind_attrib_buffer.binding,\n          1,\n          (VkBuffer*)&cmd->data.bind_attrib_buffer.buffer->alloc.obj_handle,\n          &vkoffset);\n      break;\n    }\n    case NGFVK_RENDER_CMD_BIND_INDEX_BUFFER: {\n      const VkIndexType idx_type = get_vk_index_type(cmd->data.bind_index_buffer.type);\n      assert(idx_type == VK_INDEX_TYPE_UINT16 || idx_type == VK_INDEX_TYPE_UINT32);\n      vkCmdBindIndexBuffer(\n          buf->vk_cmd_buffer,\n          (VkBuffer)cmd->data.bind_index_buffer.buffer->alloc.obj_handle,\n          cmd->data.bind_index_buffer.offset,\n          idx_type);\n      break;\n    }\n    case NGFVK_RENDER_CMD_DRAW: {\n      // Allocate and write descriptor sets.\n      ngfvk_execute_pending_binds(buf);\n\n      // With all resources bound, we may perform the draw operation.\n      if (cmd->data.draw.indexed) {\n        vkCmdDrawIndexed(\n            buf->vk_cmd_buffer,\n            cmd->data.draw.nelements,\n            cmd->data.draw.ninstances,\n            cmd->data.draw.first_element,\n            0u,\n            0u);\n      } else {\n        vkCmdDraw(\n            buf->vk_cmd_buffer,\n            cmd->data.draw.nelements,\n            cmd->data.draw.ninstances,\n            cmd->data.draw.first_element,\n            0u);\n      }\n      break;\n    }\n    default:\n      assert(false);\n    }\n  }\n  ngfi::tmp_arena().reset();\n}\n\nstatic void ngfvk_debug_label_begin(VkCommandBuffer b, const char* name) {\n  if (vkCmdBeginDebugUtilsLabelEXT) {\n    const VkDebugUtilsLabelEXT label = {\n\n        .sType      = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT,\n        .pNext      = NULL,\n        .pLabelName = name,\n        .color      = {0.f, 0.f, 0.f, 0.f}};\n    vkCmdBeginDebugUtilsLabelEXT(b, &label);\n  }\n}\n\nstatic void ngfvk_debug_label_end(VkCommandBuffer b) {\n  if (vkCmdEndDebugUtilsLabelEXT) { vkCmdEndDebugUtilsLabelEXT(b); }\n}\n\n// Submits all pending command buffers for the current frame.\nstatic ngf_error ngfvk_submit_pending_cmd_buffers(\n    ngfvk_frame_resources* frame_res,\n    VkSemaphore            wait_semaphore,\n    VkFence                signal_fence) {\n  ngf_error      err                 = NGF_ERROR_OK;\n  const uint32_t ncmd_bufs           = static_cast<uint32_t>(frame_res->submitted_cmd_bufs.size());\n  auto     submitted_cmd_buf_handles = ngfi::frame_alloc<VkCommandBuffer>(ncmd_bufs * 2u + 2u);\n  uint32_t submitted_cmd_buf_handles_idx = 0u;\n\n  {\n    // Check if dummy image needs to be transitioned from UNDEFINED to GENERAL layout,\n    // submit and aux command buffer with the appropriate barrier if so.\n    pthread_mutex_lock(&_vk.dummy_res.img_mu);\n    if (!_vk.dummy_res.image_transitioned) {\n      _vk.dummy_res.image_transitioned = true;\n      VkCommandBuffer aux_cmd_buf;\n      VkCommandPool   aux_cmd_pool;\n      ngfvk_cmd_buffer_allocate_for_frame(\n          CURRENT_CONTEXT->current_frame_token,\n          &aux_cmd_pool,\n          &aux_cmd_buf);\n      const VkImageMemoryBarrier bar[] = {\n          {\n              .sType               = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n              .pNext               = NULL,\n              .srcAccessMask       = 0,\n              .dstAccessMask       = 0,\n              .oldLayout           = VK_IMAGE_LAYOUT_UNDEFINED,\n              .newLayout           = VK_IMAGE_LAYOUT_GENERAL,\n              .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,\n              .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,\n              .image               = (VkImage)_vk.dummy_res.img->alloc.obj_handle,\n              .subresourceRange =\n                  {.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT,\n                   .baseMipLevel   = 0u,\n                   .levelCount     = 1u,\n                   .baseArrayLayer = 0u,\n                   .layerCount     = 1u},\n          },\n          {.sType               = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n           .pNext               = NULL,\n           .srcAccessMask       = 0,\n           .dstAccessMask       = 0,\n           .oldLayout           = VK_IMAGE_LAYOUT_UNDEFINED,\n           .newLayout           = VK_IMAGE_LAYOUT_GENERAL,\n           .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,\n           .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,\n           .image               = (VkImage)_vk.dummy_res.cube->alloc.obj_handle,\n           .subresourceRange    = {\n                  .aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT,\n                  .baseMipLevel   = 0u,\n                  .levelCount     = 1u,\n                  .baseArrayLayer = 0u,\n                  .layerCount     = 6u}}};\n      vkCmdPipelineBarrier(aux_cmd_buf, 0, 0, 0, 0, NULL, 0, NULL, 2, bar);\n      vkEndCommandBuffer(aux_cmd_buf);\n      submitted_cmd_buf_handles[submitted_cmd_buf_handles_idx++] = aux_cmd_buf;\n      frame_res->retire.append(ngfvk_cmd_buf_with_pool {aux_cmd_buf, aux_cmd_pool});\n    }\n    pthread_mutex_unlock(&_vk.dummy_res.img_mu);\n  }\n\n  ngfvk_pending_barrier_list pending_patch_barriers;\n  pending_patch_barriers.npending_img_bars = 0;\n  pending_patch_barriers.npending_buf_bars = 0;\n\n  for (size_t c = 0; c < frame_res->submitted_cmd_bufs.size(); ++c) {\n    ngf_cmd_buffer cmd_buf = frame_res->submitted_cmd_bufs[c];\n    ngfi::tmp_arena().reset();\n\n    for (auto& entry : cmd_buf->local_res_states) {\n      ngfvk_sync_res_data* cmd_buf_res_state = &entry.value;\n      ngfvk_sync_state*    global_sync_state =\n          cmd_buf_res_state->res_type == NGFVK_SYNC_RES_IMAGE\n                 ? &(((ngf_image)cmd_buf_res_state->res_handle)->sync_state)\n                 : &(((ngf_buffer)cmd_buf_res_state->res_handle)->sync_state);\n      ngfvk_barrier_data patch_barrier_data;\n      if (ngfvk_sync_barrier(\n              global_sync_state,\n              &cmd_buf_res_state->expected_sync_req,\n              &patch_barrier_data)) {\n        patch_barrier_data.res.type = cmd_buf_res_state->res_type;\n        if (patch_barrier_data.res.type == NGFVK_SYNC_RES_IMAGE) {\n          patch_barrier_data.res.data.img = (ngf_image)cmd_buf_res_state->res_handle;\n          pending_patch_barriers.npending_img_bars++;\n        } else {\n          patch_barrier_data.res.data.buf = (ngf_buffer)cmd_buf_res_state->res_handle;\n          pending_patch_barriers.npending_buf_bars++;\n        }\n        pending_patch_barriers.barriers.append(\n            patch_barrier_data,\n            CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena);\n      }\n      if (cmd_buf_res_state->sync_state.last_writer_masks.access_mask != 0) {\n        const bool skip_hazard_tracking = global_sync_state->skip_hazard_tracking;\n        *global_sync_state = cmd_buf_res_state->sync_state;\n        global_sync_state->skip_hazard_tracking = skip_hazard_tracking;\n      } else {\n        global_sync_state->active_readers_masks.access_mask |=\n            cmd_buf_res_state->sync_state.active_readers_masks.access_mask;\n        global_sync_state->per_stage_readers_mask |=\n            cmd_buf_res_state->sync_state.per_stage_readers_mask;\n      }\n    }\n    if (pending_patch_barriers.npending_buf_bars + pending_patch_barriers.npending_img_bars > 0u) {\n      VkCommandBuffer aux_cmd_buf;\n      VkCommandPool   aux_cmd_pool;\n      ngfvk_cmd_buffer_allocate_for_frame(\n          CURRENT_CONTEXT->current_frame_token,\n          &aux_cmd_pool,\n          &aux_cmd_buf);\n      ngfvk_debug_label_begin(aux_cmd_buf, \"ngf - patch barrier cmd buffer\");\n      ngfvk_sync_commit_pending_barriers(&pending_patch_barriers, aux_cmd_buf);\n      ngfvk_debug_label_end(aux_cmd_buf);\n      vkEndCommandBuffer(aux_cmd_buf);\n      submitted_cmd_buf_handles[submitted_cmd_buf_handles_idx++] = aux_cmd_buf;\n\n      frame_res->retire.append(ngfvk_cmd_buf_with_pool {aux_cmd_buf, aux_cmd_pool});\n    }\n    pending_patch_barriers.barriers.clear();\n    submitted_cmd_buf_handles[submitted_cmd_buf_handles_idx++] = cmd_buf->vk_cmd_buffer;\n    NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_SUBMITTED);\n    cmd_buf->active_gfx_pipe     = NULL;\n    cmd_buf->active_compute_pipe = NULL;\n    cmd_buf->active_rt           = NULL;\n    ngfvk_cmd_buf_reset_res_states(cmd_buf);\n    frame_res->retire.append(\n        ngfvk_cmd_buf_with_pool {cmd_buf->vk_cmd_buffer, cmd_buf->vk_cmd_pool});\n\n    cmd_buf->vk_cmd_buffer = VK_NULL_HANDLE;\n    cmd_buf->vk_cmd_pool   = VK_NULL_HANDLE;\n    if (cmd_buf->destroy_on_submit) { ngf_destroy_cmd_buffer(cmd_buf); }\n  }\n  frame_res->submitted_cmd_bufs.clear();\n\n  // Transition the swapchain image to VK_IMAGE_LAYOUT_PRESENT_SRC if necessary.\n  const bool needs_present = CURRENT_CONTEXT->swapchain && wait_semaphore != VK_NULL_HANDLE;\n  if (needs_present) {\n    if (CURRENT_CONTEXT->swapchain->image_idx == ngfvk::global::invalid_idx) ngfvk_maybe_acquire_swapchain_image();\n    ngf_image swapchain_image =\n        CURRENT_CONTEXT->swapchain->wrapper_imgs[CURRENT_CONTEXT->swapchain->image_idx].get();\n    if (swapchain_image->sync_state.layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {\n      VkCommandBuffer aux_cmd_buf;\n      VkCommandPool   aux_cmd_pool;\n      ngfvk_cmd_buffer_allocate_for_frame(\n          CURRENT_CONTEXT->current_frame_token,\n          &aux_cmd_pool,\n          &aux_cmd_buf);\n      const VkImageMemoryBarrier swapchain_mem_bar = {\n          .sType               = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n          .pNext               = NULL,\n          .srcAccessMask       = swapchain_image->sync_state.last_writer_masks.access_mask,\n          .dstAccessMask       = 0u,\n          .oldLayout           = swapchain_image->sync_state.layout,\n          .newLayout           = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n          .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,\n          .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,\n          .image               = (VkImage)swapchain_image->alloc.obj_handle,\n          .subresourceRange    = {\n                 .aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT,\n                 .baseMipLevel   = 0u,\n                 .levelCount     = 1u,\n                 .baseArrayLayer = 0u,\n                 .layerCount     = 1u}};\n      vkCmdPipelineBarrier(\n          aux_cmd_buf,\n          swapchain_image->sync_state.last_writer_masks.stage_mask,\n          VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n          0u,\n          0u,\n          NULL,\n          0u,\n          NULL,\n          1u,\n          &swapchain_mem_bar);\n      vkEndCommandBuffer(aux_cmd_buf);\n      memset(&swapchain_image->sync_state, 0, sizeof(swapchain_image->sync_state));\n      swapchain_image->sync_state.layout                         = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;\n      submitted_cmd_buf_handles[submitted_cmd_buf_handles_idx++] = aux_cmd_buf;\n      frame_res->retire.append(ngfvk_cmd_buf_with_pool {aux_cmd_buf, aux_cmd_pool});\n    }\n  }\n\n  const VkPipelineStageFlags wait_masks[] = {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT};\n  const VkSubmitInfo         submit_info  = {\n               .sType                = VK_STRUCTURE_TYPE_SUBMIT_INFO,\n               .pNext                = NULL,\n               .waitSemaphoreCount   = needs_present ? 1u : 0u,\n               .pWaitSemaphores      = needs_present ? &wait_semaphore : NULL,\n               .pWaitDstStageMask    = wait_masks,\n               .commandBufferCount   = submitted_cmd_buf_handles_idx,\n               .pCommandBuffers      = submitted_cmd_buf_handles,\n               .signalSemaphoreCount = needs_present ? 1u : 0u,\n               .pSignalSemaphores    = needs_present ? &CURRENT_CONTEXT->swapchain->submit_sems[CURRENT_CONTEXT->swapchain->image_idx] : NULL};\n\n  VkResult submit_result = vkQueueSubmit(_vk.gfx_queue, 1, &submit_info, signal_fence);\n\n  if (submit_result != VK_SUCCESS) err = NGF_ERROR_INVALID_OPERATION;\n  return err;\n}\n\nstatic void ngfvk_reset_desc_pools_list(ngfvk_desc_pools_list* superpool) {\n  for (ngfvk_desc_pool* pool = superpool->list; pool; pool = pool->next) {\n    vkResetDescriptorPool(_vk.device, pool->vk_pool, 0u);\n    memset(&pool->utilization, 0, sizeof(pool->utilization));\n  }\n  superpool->active_pool = superpool->list;\n}\n\n#if defined(__APPLE__)\nvoid* ngfvk_create_ca_metal_layer(const ngf_swapchain_info*);\n#endif\n\nvoid ngfi_dump_sys_alloc_dbgstats(FILE* out);\n\nstatic bool ngfi_skip_hazard_tracking_for_bind_op(const ngf_resource_bind_op& op) {\n  switch (op.type) {\n  case NGF_DESCRIPTOR_UNIFORM_BUFFER:\n  case NGF_DESCRIPTOR_STORAGE_BUFFER:\n    return op.info.buffer.buffer->sync_state.skip_hazard_tracking;\n  case NGF_DESCRIPTOR_STORAGE_IMAGE:\n  case NGF_DESCRIPTOR_IMAGE:\n  case NGF_DESCRIPTOR_IMAGE_AND_SAMPLER:\n    return !op.info.image_sampler.is_image_view\n            ? op.info.image_sampler.resource.image->sync_state.skip_hazard_tracking\n            : op.info.image_sampler.resource.view->src->sync_state.skip_hazard_tracking;\n  default: return false;\n  }\n}\n\n#pragma endregion\n\n#pragma region external_funcs\n\nextern \"C\" ngf_error\nngf_get_device_list(const ngf_device** devices, uint32_t* ndevices) NGF_NOEXCEPT {\n  if (!ngfvk_init_loader_if_necessary()) { return NGF_ERROR_OPERATION_FAILED; }\n  if (ngfvk::global::num_phys_devices == 0) {\n    ngf_error  err          = NGF_ERROR_OK;\n    VkInstance tmp_instance = VK_NULL_HANDLE;\n\n    VkResult vk_err = ngfvk_create_instance(false, false, &tmp_instance, NULL);\n    if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; }\n\n    auto tmp_proc_addr = [tmp_instance]<class PtrT>(PtrT, const char* name) {\n      return (PtrT)vkGetInstanceProcAddr(tmp_instance, name);\n    };\n#define NGFVK_PROCADDR(name) (tmp_proc_addr((PFN_vk##name) nullptr, \"vk\" #name))\n    auto enumerate_vk_phys_devs     = NGFVK_PROCADDR(EnumeratePhysicalDevices);\n    auto get_vk_phys_dev_properties = NGFVK_PROCADDR(GetPhysicalDeviceProperties);\n    auto get_vk_phys_dev_features   = NGFVK_PROCADDR(GetPhysicalDeviceFeatures);\n    auto get_vk_phys_dev_mem_props  = NGFVK_PROCADDR(GetPhysicalDeviceMemoryProperties);\n    auto enumerate_extension_props  = NGFVK_PROCADDR(EnumerateDeviceExtensionProperties);\n    auto destroy_vk_instance        = NGFVK_PROCADDR(DestroyInstance);\n#undef NGFVK_PROCADDR\n    ngfvk::global::num_phys_devices = ngfvk::global::max_phys_dev;\n    VkPhysicalDevice phys_devs[ngfvk::global::max_phys_dev];\n    vk_err = enumerate_vk_phys_devs(tmp_instance, &ngfvk::global::num_phys_devices, phys_devs);\n    if (vk_err == VK_SUCCESS) {\n      for (size_t i = 0; i < ngfvk::global::num_phys_devices; ++i) {\n        VkPhysicalDeviceProperties       dev_props;\n        VkPhysicalDeviceFeatures         dev_features;\n        VkPhysicalDeviceMemoryProperties mem_props;\n        get_vk_phys_dev_properties(phys_devs[i], &dev_props);\n        get_vk_phys_dev_features(phys_devs[i], &dev_features);\n        get_vk_phys_dev_mem_props(phys_devs[i], &mem_props);\n        ngfvk_device_info* ngfdevinfo = &ngfvk::global::phys_device_infos[i];\n        ngfdevinfo->device_id         = dev_props.deviceID;\n        ngfdevinfo->vendor_id         = dev_props.vendorID;\n        ngf_device* ngfdev            = &ngfvk::global::phys_devices[i];\n        ngfdev->handle                = (ngf_device_handle)i;\n        switch (dev_props.deviceType) {\n        case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:\n          ngfdev->performance_tier = NGF_DEVICE_PERFORMANCE_TIER_HIGH;\n          break;\n        case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:\n        case VK_PHYSICAL_DEVICE_TYPE_CPU:\n          ngfdev->performance_tier = NGF_DEVICE_PERFORMANCE_TIER_LOW;\n          break;\n        default:\n          ngfdev->performance_tier = NGF_DEVICE_PERFORMANCE_TIER_UNKNOWN;\n        }\n        strncpy(\n            ngfdev->name,\n            dev_props.deviceName,\n            NGFI_MIN(NGF_DEVICE_NAME_MAX_LENGTH, VK_MAX_PHYSICAL_DEVICE_NAME_SIZE));\n        ngf_device_capabilities*      devcaps     = &ngfdev->capabilities;\n        const VkPhysicalDeviceLimits* vkdevlimits = &dev_props.limits;\n        // Populate basic device capabilities.\n        devcaps->clipspace_z_zero_to_one = true;\n        devcaps->uniform_buffer_offset_alignment =\n            (size_t)vkdevlimits->minUniformBufferOffsetAlignment;\n        devcaps->storage_buffer_offset_alignment =\n            (size_t)vkdevlimits->minStorageBufferOffsetAlignment;\n        devcaps->texel_buffer_offset_alignment = (size_t)vkdevlimits->minTexelBufferOffsetAlignment;\n        devcaps->max_vertex_input_attributes_per_pipeline = vkdevlimits->maxVertexInputAttributes;\n        devcaps->max_sampled_images_per_stage  = vkdevlimits->maxPerStageDescriptorSampledImages;\n        devcaps->max_samplers_per_stage        = vkdevlimits->maxPerStageDescriptorSamplers;\n        devcaps->max_fragment_input_components = vkdevlimits->maxFragmentInputComponents;\n        devcaps->max_fragment_inputs =\n            (devcaps->max_fragment_input_components) / 4; /* as per vk spec. */\n        devcaps->max_1d_image_dimension          = vkdevlimits->maxImageDimension1D;\n        devcaps->max_2d_image_dimension          = vkdevlimits->maxImageDimension2D;\n        devcaps->max_3d_image_dimension          = vkdevlimits->maxImageDimension3D;\n        devcaps->max_cube_image_dimension        = vkdevlimits->maxImageDimensionCube;\n        devcaps->max_image_layers                = vkdevlimits->maxImageArrayLayers;\n        devcaps->max_color_attachments_per_pass  = vkdevlimits->maxColorAttachments;\n        devcaps->max_uniform_buffers_per_stage   = vkdevlimits->maxPerStageDescriptorUniformBuffers;\n        devcaps->max_sampler_anisotropy          = vkdevlimits->maxSamplerAnisotropy;\n        devcaps->max_uniform_buffer_range        = vkdevlimits->maxUniformBufferRange;\n        devcaps->cubemap_arrays_supported        = dev_features.imageCubeArray;\n        devcaps->framebuffer_color_sample_counts = vkdevlimits->framebufferColorSampleCounts;\n        devcaps->framebuffer_depth_sample_counts = vkdevlimits->framebufferDepthSampleCounts;\n        devcaps->texture_color_sample_counts     = vkdevlimits->sampledImageColorSampleCounts;\n        devcaps->texture_depth_sample_counts     = vkdevlimits->sampledImageDepthSampleCounts;\n\n        devcaps->max_supported_framebuffer_color_sample_count =\n            ngfi_get_highest_sample_count(devcaps->framebuffer_color_sample_counts);\n        devcaps->max_supported_framebuffer_depth_sample_count =\n            ngfi_get_highest_sample_count(devcaps->framebuffer_depth_sample_counts);\n        devcaps->max_supported_texture_color_sample_count =\n            ngfi_get_highest_sample_count(devcaps->texture_color_sample_counts);\n        devcaps->max_supported_texture_depth_sample_count =\n            ngfi_get_highest_sample_count(devcaps->texture_depth_sample_counts);\n\n        // Device capabilities: detect device-local host-visible memory.\n        devcaps->device_local_memory_is_host_visible = false;\n        for (size_t mem_type_idx = 0u; !devcaps->device_local_memory_is_host_visible &&\n                                       mem_type_idx < mem_props.memoryTypeCount;\n             ++mem_type_idx) {\n          const VkMemoryType*         mem_type = &mem_props.memoryTypes[mem_type_idx];\n          const VkMemoryPropertyFlags local_visible =\n              VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;\n          if ((mem_type->propertyFlags & local_visible) == local_visible) {\n            // Some systems only expose <= 256M device-local host-visible memory, we don't want\n            // that. Only set the cap flag if a large region of device-local memory is also\n            // host-visible.\n            devcaps->device_local_memory_is_host_visible =\n                mem_props.memoryHeaps[mem_type->heapIndex].size > (256u * 1024u * 1024u);\n          }\n        }\n\n        // Device capabilities: determine enabled extensions.\n        ngfi::array<VkExtensionProperties, ngfi::system_alloc_callbacks> supported_phys_dev_exts;\n        uint32_t nsupported_phys_dev_exts = 0u;\n        vk_err =\n            enumerate_extension_props(phys_devs[i], nullptr, &nsupported_phys_dev_exts, nullptr);\n        supported_phys_dev_exts.resize(nsupported_phys_dev_exts);\n        vk_err = enumerate_extension_props(\n            phys_devs[i],\n            nullptr,\n            &nsupported_phys_dev_exts,\n            supported_phys_dev_exts.data());\n        auto ext_supported = [&](const char* ext_name) {\n          for (const VkExtensionProperties& supported_ext : supported_phys_dev_exts) {\n            if (strcmp(ext_name, supported_ext.extensionName) == 0) { return true; }\n          }\n          return false;\n        };\n        auto& enabled_exts     = ngfdevinfo->enabled_ext_names;\n        auto  add_optional_ext = [&enabled_exts, &ext_supported](const char* ext_name) {\n          const bool r = ext_supported(ext_name);\n          if (r) enabled_exts.push_back(ext_name);\n          return r;\n        };\n        enabled_exts.push_back(\"VK_KHR_maintenance1\");\n        enabled_exts.push_back(\"VK_KHR_swapchain\");\n        const bool shader_float16_int8_supported = add_optional_ext(\"VK_KHR_shader_float16_int8\");\n        const bool sync2_supported               = add_optional_ext(\"VK_KHR_synchronization2\");\n        const bool inline_ray_tracing_supported =\n            add_optional_ext(\"VK_KHR_acceleration_structure\") &&\n            add_optional_ext(\"VK_KHR_buffer_device_address\") &&\n            add_optional_ext(\"VK_KHR_deferred_host_operations\") &&\n            add_optional_ext(\"VK_KHR_spirv_1_4\") &&\n            add_optional_ext(\"VK_KHR_shader_float_controls\") &&\n            add_optional_ext(\"VK_KHR_ray_query\") && add_optional_ext(\"VK_EXT_descriptor_indexing\");\n\n        // Device capabilities: features structs.\n        const VkBool32 enable_cubemap_arrays =\n            devcaps->cubemap_arrays_supported ? VK_TRUE : VK_FALSE;\n        ngfdevinfo->required_features = VkPhysicalDeviceFeatures {\n            .imageCubeArray                       = enable_cubemap_arrays,\n            .independentBlend                     = VK_TRUE,\n            .depthBiasClamp                       = VK_TRUE,\n            .samplerAnisotropy                    = VK_TRUE,\n            .shaderStorageImageReadWithoutFormat  = VK_TRUE,\n            .shaderStorageImageWriteWithoutFormat = VK_TRUE};\n        ngfdevinfo->sf16i8_features = VkPhysicalDeviceShaderFloat16Int8Features {\n            .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES};\n        ngfdevinfo->sync2_features = VkPhysicalDeviceSynchronization2Features {\n            .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES};\n        ngfdevinfo->bda_features = VkPhysicalDeviceBufferDeviceAddressFeatures {\n            .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES};\n        ngfdevinfo->accls_features = VkPhysicalDeviceAccelerationStructureFeaturesKHR {\n            .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR};\n        ngfdevinfo->ray_query_features = VkPhysicalDeviceRayQueryFeaturesKHR {\n            .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR};\n        void* features_structs      = nullptr;\n        auto  append_feature_struct = [&features_structs](auto& s) {\n          s.pNext          = features_structs;\n          features_structs = &s;\n        };\n        if (shader_float16_int8_supported) append_feature_struct(ngfdevinfo->sf16i8_features);\n        if (sync2_supported) append_feature_struct(ngfdevinfo->sync2_features);\n        if (inline_ray_tracing_supported) {\n          append_feature_struct(ngfdevinfo->bda_features);\n          append_feature_struct(ngfdevinfo->accls_features);\n          append_feature_struct(ngfdevinfo->ray_query_features);\n        }\n        devcaps->supports_inline_raytracing = inline_ray_tracing_supported;\n        ngfdevinfo->phys_dev_features2 = VkPhysicalDeviceFeatures2 {\n            .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,\n            .pNext = features_structs};\n      }\n    } else {\n      err = NGF_ERROR_OPERATION_FAILED;\n    }\n    if (tmp_instance != VK_NULL_HANDLE) { destroy_vk_instance(tmp_instance, NULL); }\n    if (err != NGF_ERROR_OK) return err;\n  }\n  if (devices) { *devices = ngfvk::global::phys_devices; }\n  if (ndevices) { *ndevices = (uint32_t)ngfvk::global::num_phys_devices; }\n  return NGF_ERROR_OK;\n}\n\nextern \"C\" ngf_error ngf_initialize(const ngf_init_info* init_info) NGF_NOEXCEPT {\n  assert(init_info);\n\n  // Sanity checks.\n  if (_vk.instance != VK_NULL_HANDLE) {\n    // Disallow double initialization.\n    NGFI_DIAG_ERROR(\"double-initialization detected. `ngf_initialize` may only be called once.\")\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n\n  // Install user-provided diagnostic callbacks and set preferred log verbosity.\n  if (init_info->diag_info != nullptr) {\n    ngfi_diag_info = *init_info->diag_info;\n  } else {\n    ngfi_diag_info.callback  = nullptr;\n    ngfi_diag_info.userdata  = nullptr;\n    ngfi_diag_info.verbosity = NGF_DIAGNOSTICS_VERBOSITY_DEFAULT;\n  }\n  NGFI_DIAG_INFO(\"Initializing nicegraf.\");\n\n  // Install user-provided allocation callbacks.\n  ngfi_set_allocation_callbacks(init_info->allocation_callbacks);\n\n  // Engage RenderDoc if requested.\n  if (init_info->renderdoc_info) {\n    ngfi_module_handle ngf_renderdoc_mod =\n        LoadLibraryA(init_info->renderdoc_info->renderdoc_lib_path);\n    if (ngf_renderdoc_mod != NULL) {\n      pRENDERDOC_GetAPI RENDERDOC_GetAPI =\n          (pRENDERDOC_GetAPI)GetProcAddress(ngf_renderdoc_mod, \"RENDERDOC_GetAPI\");\n      if (!RENDERDOC_GetAPI(eRENDERDOC_API_Version_1_6_0, (void**)&_renderdoc.api)) {\n        return NGF_ERROR_OBJECT_CREATION_FAILED;\n      }\n      if (init_info->renderdoc_info->renderdoc_destination_template) {\n        _renderdoc.api->SetCaptureFilePathTemplate(\n            init_info->renderdoc_info->renderdoc_destination_template);\n      }\n      _renderdoc.is_capturing = false;\n      _renderdoc.capture_next = false;\n    }\n  }\n\n  // Load basic vk entrypoints.\n  if (!ngfvk_init_loader_if_necessary()) {\n    NGFI_DIAG_ERROR(\"Failed to initialize vulkan loader!\");\n    return NGF_ERROR_OPERATION_FAILED;\n  }\n\n  // Create vk instance, attempting to enable api validation according to user preference.\n  bool           validation_enabled     = false;\n  const VkResult instance_create_result = ngfvk_create_instance(\n      ngfi_diag_info.verbosity == NGF_DIAGNOSTICS_VERBOSITY_DETAILED,\n      ngfi_diag_info.enable_debug_groups,\n      &_vk.instance,\n      &validation_enabled);\n  if (instance_create_result != VK_SUCCESS) {\n    NGFI_DIAG_INFO(\"Failed to set up a new vulkan instance.\");\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n  vkl_init_instance(\n      _vk.instance);  // load instance-level Vulkan functions into the global namespace.\n\n  // If validation was enabled, install a debug callback to forward\n  // vulkan debug messages to the user.\n  if (validation_enabled) {\n    NGFI_DIAG_INFO(\"vulkan validation layers enabled\");\n    const VkDebugUtilsMessengerCreateInfoEXT debug_callback_info = {\n        .sType           = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,\n        .pNext           = NULL,\n        .flags           = 0u,\n        .messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |\n                           VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |\n                           VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT |\n                           VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT,\n\n        .messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |\n                       VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |\n                       VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT,\n        .pfnUserCallback = ngfvk_debug_message_callback,\n        .pUserData       = NULL};\n    vkCreateDebugUtilsMessengerEXT(_vk.instance, &debug_callback_info, NULL, &_vk.debug_messenger);\n  } else {\n    NGFI_DIAG_INFO(\"vulkan validation is disabled\");\n  }\n\n  // Obtain a list of available physical devices.\n  uint32_t         nphysdev = ngfvk::global::max_phys_dev;\n  VkPhysicalDevice physdevs[ngfvk::global::max_phys_dev];\n  VkResult         vk_err = vkEnumeratePhysicalDevices(_vk.instance, &nphysdev, physdevs);\n  if (vk_err != VK_SUCCESS) {\n    NGFI_DIAG_ERROR(\"Failed to enumerate Vulkan physical devices, VK error %d.\", vk_err);\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n\n  // Sanity-check the requested device handle.\n  const uint32_t device_idx = (uint32_t)init_info->device;\n  if (device_idx >= ngfvk::global::num_phys_devices) { return NGF_ERROR_INVALID_OPERATION; }\n\n  // Pick a suitable physical device based on user's preference.\n  uint32_t                   vk_device_index = ngfvk::global::invalid_idx;\n  ngfvk_device_info*         ngfdevinfo      = &ngfvk::global::phys_device_infos[device_idx];\n  VkPhysicalDeviceProperties phys_dev_properties;\n  for (uint32_t i = 0; i < nphysdev && vk_device_index == ngfvk::global::invalid_idx; ++i) {\n    vkGetPhysicalDeviceProperties(physdevs[i], &phys_dev_properties);\n    if (phys_dev_properties.deviceID == ngfdevinfo->device_id &&\n        phys_dev_properties.vendorID == ngfdevinfo->vendor_id) {\n      vk_device_index = i;\n    }\n  }\n  if (vk_device_index == ngfvk::global::invalid_idx) {\n    NGFI_DIAG_ERROR(\"Failed to find a suitable physical device.\");\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n  _vk.phys_dev = physdevs[vk_device_index];\n\n  // Obtain a list of queue family properties from the device.\n  uint32_t num_queue_families = 0U;\n  vkGetPhysicalDeviceQueueFamilyProperties(_vk.phys_dev, &num_queue_families, NULL);\n  VkQueueFamilyProperties* queue_families =\n      ngfi::tmp_arena().alloc<VkQueueFamilyProperties>(num_queue_families);\n  assert(queue_families);\n  vkGetPhysicalDeviceQueueFamilyProperties(_vk.phys_dev, &num_queue_families, queue_families);\n\n  // Pick suitable queue families for graphics and present, ensuring graphics also supports compute.\n  uint32_t gfx_family_idx     = ngfvk::global::invalid_idx;\n  uint32_t present_family_idx = ngfvk::global::invalid_idx;\n  for (uint32_t q = 0; queue_families && q < num_queue_families; ++q) {\n    const VkQueueFlags flags      = queue_families[q].queueFlags;\n    const bool         is_gfx     = (flags & VK_QUEUE_GRAPHICS_BIT) != 0;\n    const bool         is_present = ngfvk_query_presentation_support(_vk.phys_dev, q);\n    const bool         is_compute = (flags & VK_QUEUE_COMPUTE_BIT) != 0;\n    if (gfx_family_idx == ngfvk::global::invalid_idx && is_gfx && is_compute) {\n      gfx_family_idx = q;\n    }\n    if (present_family_idx == ngfvk::global::invalid_idx && is_present) { present_family_idx = q; }\n  }\n  queue_families = NULL;\n  if (gfx_family_idx == ngfvk::global::invalid_idx ||\n      present_family_idx == ngfvk::global::invalid_idx) {\n    NGFI_DIAG_ERROR(\"Could not find a suitable queue family for graphics and/or presentation.\");\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n  _vk.gfx_family_idx     = gfx_family_idx;\n  _vk.present_family_idx = present_family_idx;\n\n  // Create logical device.\n  const float             queue_prio           = 1.0f;\n  const bool              same_gfx_and_present = _vk.gfx_family_idx == _vk.present_family_idx;\n  const uint32_t          num_queue_infos      = (same_gfx_and_present ? 1u : 2u);\n  VkDeviceQueueCreateInfo queue_infos[]        = {\n      {.sType            = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,\n              .pNext            = NULL,\n              .flags            = 0,\n              .queueFamilyIndex = _vk.present_family_idx,\n              .queueCount       = 1,\n              .pQueuePriorities = &queue_prio},\n      {.sType            = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,\n              .pNext            = NULL,\n              .flags            = 0,\n              .queueFamilyIndex = _vk.gfx_family_idx,\n              .queueCount       = 1,\n              .pQueuePriorities = &queue_prio}};\n  if (vkGetPhysicalDeviceFeatures2KHR) {\n    vkGetPhysicalDeviceFeatures2KHR(_vk.phys_dev, &ngfdevinfo->phys_dev_features2);\n  }\n\n  const VkDeviceCreateInfo dev_info = {\n      .sType                   = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,\n      .pNext                   = ngfdevinfo->phys_dev_features2.pNext,\n      .flags                   = 0,\n      .queueCreateInfoCount    = num_queue_infos,\n      .pQueueCreateInfos       = &queue_infos[same_gfx_and_present ? 1u : 0u],\n      .enabledLayerCount       = 0,\n      .ppEnabledLayerNames     = NULL,\n      .enabledExtensionCount   = static_cast<uint32_t>(ngfdevinfo->enabled_ext_names.size()),\n      .ppEnabledExtensionNames = ngfdevinfo->enabled_ext_names.data(),\n      .pEnabledFeatures        = &ngfdevinfo->required_features};\n  vk_err = vkCreateDevice(_vk.phys_dev, &dev_info, NULL, &_vk.device);\n\n  if (vk_err != VK_SUCCESS) {\n    NGFI_DIAG_ERROR(\"Failed to create a Vulkan device, VK error %d.\", vk_err);\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n\n  // Load device-level entry points.\n  vkl_init_device(_vk.device, ngfdevinfo->sync2_features.synchronization2);\n\n  // Set up VMA.\n  VmaVulkanFunctions vma_vk_fns = {\n      .vkGetInstanceProcAddr = vkGetInstanceProcAddr,\n      .vkGetDeviceProcAddr   = vkGetDeviceProcAddr,\n  };\n  VmaAllocatorCreateInfo vma_info = {\n      .flags = ngfvk::global::phys_devices[device_idx].capabilities.supports_inline_raytracing\n                   ? VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT\n                   : 0u,\n      .physicalDevice              = _vk.phys_dev,\n      .device                      = _vk.device,\n      .preferredLargeHeapBlockSize = 0u,\n      .pAllocationCallbacks        = NULL,\n      .pDeviceMemoryCallbacks      = NULL,\n      .pHeapSizeLimit              = NULL,\n      .pVulkanFunctions            = &vma_vk_fns,\n      .instance                    = _vk.instance,\n      .vulkanApiVersion            = 0};\n  vk_err = vmaCreateAllocator(&vma_info, &_vk.allocator);\n\n  // Obtain queue handles.\n  vkGetDeviceQueue(_vk.device, _vk.gfx_family_idx, 0, &_vk.gfx_queue);\n  vkGetDeviceQueue(_vk.device, _vk.present_family_idx, 0, &_vk.present_queue);\n\n  // Populate device capabilities.\n  ngfvk::global::phys_device_caps = ngfvk::global::phys_devices[init_info->device].capabilities;\n\n  // Create dummy objects to pre-bind in fresh descriptor sets.\n  const ngf_image_info dummy_img_info = {\n      .type         = NGF_IMAGE_TYPE_IMAGE_2D,\n      .extent       = {1u, 1u, 1u},\n      .nmips        = 1u,\n      .nlayers      = 1u,\n      .format       = NGF_IMAGE_FORMAT_R8,\n      .sample_count = NGF_SAMPLE_COUNT_1,\n      .usage_hint   = NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_STORAGE};\n  const ngf_image_info dummy_cube_info = {\n      .type         = NGF_IMAGE_TYPE_CUBE,\n      .extent       = {1u, 1u, 1u},\n      .nmips        = 1u,\n      .nlayers      = 1u,\n      .format       = NGF_IMAGE_FORMAT_R8,\n      .sample_count = NGF_SAMPLE_COUNT_1,\n      .usage_hint   = NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_STORAGE};\n  const ngf_buffer_info dummy_buf_info = {\n      .size         = 1u,\n      .storage_type = NGF_BUFFER_STORAGE_DEVICE_LOCAL,\n      .buffer_usage = NGF_BUFFER_USAGE_STORAGE_BUFFER | NGF_BUFFER_USAGE_UNIFORM_BUFFER |\n                      NGF_BUFFER_USAGE_TEXEL_BUFFER};\n  ngf_sampler_info dummy_samp_info;\n  memset(&dummy_samp_info, 0, sizeof(dummy_samp_info));\n  ngf_create_image(&dummy_img_info, &_vk.dummy_res.img);\n  ngf_create_image(&dummy_cube_info, &_vk.dummy_res.cube);\n  ngf_create_buffer(&dummy_buf_info, &_vk.dummy_res.buf);\n  ngf_create_sampler(&dummy_samp_info, &_vk.dummy_res.samp);\n  const ngf_texel_buffer_view_info tbuf_info =\n      {.buffer = _vk.dummy_res.buf, .offset = 0u, .size = 1u, .texel_format = NGF_IMAGE_FORMAT_R8};\n  ngf_create_texel_buffer_view(&tbuf_info, &_vk.dummy_res.tbuf);\n  _vk.dummy_res.buf_info.buffer            = (VkBuffer)_vk.dummy_res.buf->alloc.obj_handle;\n  _vk.dummy_res.buf_info.offset            = 0u;\n  _vk.dummy_res.buf_info.range             = 1u;\n  _vk.dummy_res.img_info.imageLayout       = VK_IMAGE_LAYOUT_GENERAL;\n  _vk.dummy_res.img_info.imageView         = _vk.dummy_res.img->vkview;\n  _vk.dummy_res.img_info.sampler           = VK_NULL_HANDLE;\n  _vk.dummy_res.cube_info.imageLayout      = VK_IMAGE_LAYOUT_GENERAL;\n  _vk.dummy_res.cube_info.imageView        = _vk.dummy_res.cube->vkview;\n  _vk.dummy_res.cube_info.sampler          = VK_NULL_HANDLE;\n  _vk.dummy_res.img_arr_info               = _vk.dummy_res.img_info;\n  _vk.dummy_res.img_arr_info.imageView     = _vk.dummy_res.img->vkview_arrayed;\n  _vk.dummy_res.cube_arr_info              = _vk.dummy_res.cube_info;\n  _vk.dummy_res.cube_arr_info.imageView    = _vk.dummy_res.cube->vkview_arrayed;\n  _vk.dummy_res.samp_info.imageLayout      = VK_IMAGE_LAYOUT_GENERAL;\n  _vk.dummy_res.samp_info.imageView        = VK_NULL_HANDLE;\n  _vk.dummy_res.samp_info.sampler          = _vk.dummy_res.samp->vksampler;\n  _vk.dummy_res.imgsamp_info.imageLayout   = VK_IMAGE_LAYOUT_GENERAL;\n  _vk.dummy_res.imgsamp_info.imageView     = _vk.dummy_res.img->vkview;\n  _vk.dummy_res.imgsamp_info.sampler       = _vk.dummy_res.samp->vksampler;\n  _vk.dummy_res.imgsamp_arr_info           = _vk.dummy_res.imgsamp_info;\n  _vk.dummy_res.imgsamp_arr_info.imageView = _vk.dummy_res.img->vkview_arrayed;\n  _vk.dummy_res.dummy_accel_struct         = VK_NULL_HANDLE;\n  _vk.dummy_res.image_transitioned         = false;\n  pthread_mutex_init(&_vk.dummy_res.img_mu, NULL);\n\n  // Done!\n\n  return NGF_ERROR_OK;\n}\n\nextern \"C\" void ngf_shutdown(void) NGF_NOEXCEPT {\n  NGFI_DIAG_INFO(\"Shutting down nicegraf.\");\n\n  if (CURRENT_CONTEXT != NULL) { NGFI_DIAG_ERROR(\"Context not destroyed before shutdown.\") }\n  NGFI_FREE(_vk.dummy_res.tbuf);\n  NGFI_FREE(_vk.dummy_res.img);\n  NGFI_FREE(_vk.dummy_res.cube);\n  NGFI_FREE(_vk.dummy_res.buf);\n  NGFI_FREE(_vk.dummy_res.samp);\n\n  if (_vk.allocator != VK_NULL_HANDLE) { vmaDestroyAllocator(_vk.allocator); }\n\n  if (_vk.device != VK_NULL_HANDLE) { vkDestroyDevice(_vk.device, NULL); }\n  if (_vk.debug_messenger) {\n    vkDestroyDebugUtilsMessengerEXT(_vk.instance, _vk.debug_messenger, NULL);\n  }\n  if (_vk.instance != VK_NULL_HANDLE) { vkDestroyInstance(_vk.instance, NULL); }\n  _vk.instance = VK_NULL_HANDLE;\n#if defined(__linux__)\n  if (_vk.xcb_connection) {\n    xcb_disconnect(_vk.xcb_connection);\n    _vk.xcb_visualid   = 0;\n    _vk.xcb_connection = NULL;\n  }\n#endif\n}\n\nextern \"C\" ngf_error\nngf_create_context(const ngf_context_info* info, ngf_context* result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  auto maybe_ctx = ngf_context_t::make(*info);\n\n  if (!maybe_ctx.has_error()) result[0] = maybe_ctx.value().release();\n  return maybe_ctx.has_error() ? maybe_ctx.error() : NGF_ERROR_OK;\n}\n\nextern \"C\" const ngf_device_capabilities* ngf_get_device_capabilities(void) NGF_NOEXCEPT {\n  return &ngfvk::global::phys_device_caps;\n}\n\nextern \"C\" ngf_error\nngf_resize_context(ngf_context ctx, uint32_t new_width, uint32_t new_height) NGF_NOEXCEPT {\n  assert(ctx);\n  if (!ctx || !ctx->default_render_target || !ctx->swapchain) {\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n  ctx->swapchain_info.width          = NGFI_MAX(1, new_width);\n  ctx->swapchain_info.height         = NGFI_MAX(1, new_height);\n  ctx->default_render_target->width  = ctx->swapchain_info.width;\n  ctx->default_render_target->height = ctx->swapchain_info.height;\n\n  // swapchain needs to be explicitly destroyed before\n  // creating a new one with the same surface.\n  ctx->swapchain = ngfi::unique_ptr<ngfvk_swapchain> {};\n  auto maybe_swapchain =\n      ngfvk_swapchain::make(ctx->swapchain_info, ctx->default_render_target.get(), ctx->surface);\n  if (!maybe_swapchain.has_error()) {\n    ctx->swapchain = ngfi::move(maybe_swapchain.value());\n    return NGF_ERROR_OK;\n  } else {\n    return maybe_swapchain.error();\n  }\n}\n\nextern \"C\" void ngf_destroy_context(ngf_context ctx) NGF_NOEXCEPT {\n  if (ctx != nullptr) { ngfi::free<ngf_context_t>(ctx); }\n}\nextern \"C\" ngf_error ngf_set_context(ngf_context ctx) NGF_NOEXCEPT {\n  CURRENT_CONTEXT = ctx;\n  return NGF_ERROR_OK;\n}\n\nextern \"C\" ngf_context ngf_get_context() NGF_NOEXCEPT {\n  return CURRENT_CONTEXT;\n}\n\nextern \"C\" ngf_error\nngf_create_cmd_buffer(const ngf_cmd_buffer_info*, ngf_cmd_buffer* result) NGF_NOEXCEPT {\n  assert(result);\n  auto cmd_buf = ngf_cmd_buffer_t::make();\n  if (!cmd_buf.has_error()) { result[0] = cmd_buf.value().release(); }\n  return cmd_buf.has_error() ? cmd_buf.error() : NGF_ERROR_OK;\n}\n\nextern \"C\" ngf_error ngf_cmd_begin_render_pass_simple(\n    ngf_cmd_buffer      cmd_buf,\n    ngf_render_target   rt,\n    float               clear_color_r,\n    float               clear_color_g,\n    float               clear_color_b,\n    float               clear_color_a,\n    float               clear_depth,\n    uint32_t            clear_stencil,\n    ngf_render_encoder* enc) NGF_NOEXCEPT {\n  ngfi::tmp_arena().reset();\n  auto load_ops  = ngfi::tmp_alloc<ngf_attachment_load_op>(rt->nattachments);\n  auto store_ops = ngfi::tmp_alloc<ngf_attachment_store_op>(rt->nattachments);\n  auto clears    = ngfi::tmp_alloc<ngf_clear>(rt->nattachments);\n\n  for (size_t i = 0u; i < rt->nattachments; ++i) {\n    load_ops[i] = NGF_LOAD_OP_CLEAR;\n    if (rt->attachment_descs[i].type == NGF_ATTACHMENT_COLOR) {\n      clears[i].clear_color[0] = clear_color_r;\n      clears[i].clear_color[1] = clear_color_g;\n      clears[i].clear_color[2] = clear_color_b;\n      clears[i].clear_color[3] = clear_color_a;\n    } else if (\n        rt->attachment_descs[i].type == NGF_ATTACHMENT_DEPTH ||\n        rt->attachment_descs[i].type == NGF_ATTACHMENT_DEPTH_STENCIL) {\n      clears[i].clear_depth_stencil.clear_depth   = clear_depth;\n      clears[i].clear_depth_stencil.clear_stencil = clear_stencil;\n    } else {\n      assert(false);\n    }\n\n    const bool needs_resolve = rt->attachment_descs[i].type == NGF_ATTACHMENT_COLOR &&\n                               rt->have_resolve_attachments &&\n                               rt->attachment_descs[i].sample_count > NGF_SAMPLE_COUNT_1;\n    store_ops[i] = needs_resolve ? NGF_STORE_OP_RESOLVE : NGF_STORE_OP_STORE;\n  }\n  const ngf_render_pass_info pass_info = {\n      .render_target = rt,\n      .load_ops      = load_ops,\n      .store_ops     = store_ops,\n      .clears        = clears,\n  };\n  return ngf_cmd_begin_render_pass(cmd_buf, &pass_info, enc);\n}\n\nextern \"C\" ngf_error ngf_cmd_begin_render_pass(\n    ngf_cmd_buffer              cmd_buf,\n    const ngf_render_pass_info* pass_info,\n    ngf_render_encoder*         enc) NGF_NOEXCEPT {\n  if (pass_info->render_target->is_default &&\n      ngfvk_maybe_acquire_swapchain_image() != NGF_ERROR_OK) {\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n\n  ngf_error err = NGF_ERROR_OK;\n\n  ngfvk_encoder_start(cmd_buf);\n  if (err != NGF_ERROR_OK) return err;\n\n  err = ngfvk_initialize_generic_encoder(cmd_buf, &enc->pvt_data_donotuse);\n  if (err != NGF_ERROR_OK) { return err; }\n\n  ngfi::tmp_arena().reset();\n\n  cmd_buf->active_rt         = pass_info->render_target;\n  cmd_buf->renderpass_active = true;\n\n  cmd_buf->pending_render_pass_info.render_target = pass_info->render_target;\n\n  auto cloned_load_ops =\n      ngfi::frame_alloc<ngf_attachment_load_op>(pass_info->render_target->nattachments);\n  cmd_buf->pending_render_pass_info.load_ops = cloned_load_ops;\n  if (cmd_buf->pending_render_pass_info.load_ops == NULL) { return NGF_ERROR_OUT_OF_MEM; }\n  memcpy(\n      cloned_load_ops,\n      pass_info->load_ops,\n      sizeof(ngf_attachment_load_op) * pass_info->render_target->nattachments);\n\n  auto cloned_store_ops =\n      ngfi::frame_alloc<ngf_attachment_store_op>(pass_info->render_target->nattachments);\n  cmd_buf->pending_render_pass_info.store_ops = cloned_store_ops;\n  if (cmd_buf->pending_render_pass_info.store_ops == NULL) { return NGF_ERROR_OUT_OF_MEM; }\n  memcpy(\n      cloned_store_ops,\n      pass_info->store_ops,\n      sizeof(ngf_attachment_store_op) * pass_info->render_target->nattachments);\n\n  uint32_t nclears       = 0u;\n  auto     cloned_clears = ngfi::frame_alloc<ngf_clear>(pass_info->render_target->nattachments);\n  if (cloned_clears == NULL) { return NGF_ERROR_OUT_OF_MEM; }\n  for (uint32_t i = 0u; i < pass_info->render_target->nattachments; ++i) {\n    if (cmd_buf->pending_render_pass_info.load_ops[i] == NGF_LOAD_OP_CLEAR) {\n      nclears          = NGFI_MAX(nclears, i + 1);\n      cloned_clears[i] = pass_info->clears[i];\n    }\n  }\n  if (nclears > 0u) {\n    cmd_buf->pending_render_pass_info.clears = cloned_clears;\n  } else {\n    cmd_buf->pending_render_pass_info.clears = NULL;\n  }\n  cmd_buf->pending_clear_value_count = (uint16_t)nclears;\n\n  ngfvk_sync_req_batch sync_req_batch;\n\n  ngfvk_sync_req_batch_init(pass_info->render_target->nattachments, &sync_req_batch);\n\n  for (size_t i = 0u; i < pass_info->render_target->nattachments; ++i) {\n    const ngf_attachment_type attachment_type = pass_info->render_target->attachment_descs[i].type;\n    const ngf_sample_count    attachment_sample_count =\n        pass_info->render_target->attachment_descs[i].sample_count;\n    switch (attachment_type) {\n    case NGF_ATTACHMENT_COLOR: {\n      ngfvk_sync_req sync_req;\n      sync_req.barrier_masks.access_mask =\n          VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;\n      sync_req.barrier_masks.stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;\n      sync_req.layout                   = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;\n      ngf_image color_image =\n          cmd_buf->active_rt->is_default\n              ? (attachment_sample_count == NGF_SAMPLE_COUNT_1\n                     ? CURRENT_CONTEXT->swapchain\n                           ->wrapper_imgs[CURRENT_CONTEXT->swapchain->image_idx]\n                           .get()\n                     : CURRENT_CONTEXT->swapchain\n                           ->multisample_imgs[CURRENT_CONTEXT->swapchain->image_idx]\n                           .get())\n              : pass_info->render_target->attachment_images[i];\n      ngfvk_sync_res res = ngfvk_sync_res_from_img(color_image);\n      ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &res, &sync_req);\n      break;\n    }\n    case NGF_ATTACHMENT_DEPTH:\n    case NGF_ATTACHMENT_DEPTH_STENCIL: {\n      ngfvk_sync_req sync_req;\n      sync_req.barrier_masks.access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |\n                                           VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;\n      sync_req.barrier_masks.stage_mask =\n          VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;\n      sync_req.layout                    = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;\n      ngf_image      depth_stencil_image = cmd_buf->active_rt->is_default\n                                               ? CURRENT_CONTEXT->swapchain->depth_img\n                                               : pass_info->render_target->attachment_images[i];\n      ngfvk_sync_res res                 = ngfvk_sync_res_from_img(depth_stencil_image);\n      ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &res, &sync_req);\n      break;\n    }\n\n    default:\n      assert(0);\n    }\n  }\n  ngfvk_sync_req_batch_process(&sync_req_batch, cmd_buf);\n\n  return NGF_ERROR_OK;\n}\n\nextern \"C\" ngf_error ngf_cmd_begin_xfer_pass(\n    ngf_cmd_buffer            cmd_buf,\n    const ngf_xfer_pass_info* pass_info,\n    ngf_xfer_encoder*         enc) NGF_NOEXCEPT {\n  (void)pass_info;\n  ngf_error err = ngfvk_encoder_start(cmd_buf);\n  if (err != NGF_ERROR_OK) return err;\n\n  err = ngfvk_initialize_generic_encoder(cmd_buf, &enc->pvt_data_donotuse);\n  if (err != NGF_ERROR_OK) { return err; }\n  cmd_buf->xfer_pass_active = true;\n\n  return NGF_ERROR_OK;\n}\n\nextern \"C\" ngf_error ngf_cmd_begin_compute_pass(\n    ngf_cmd_buffer               cmd_buf,\n    const ngf_compute_pass_info* pass_info,\n    ngf_compute_encoder*         enc) NGF_NOEXCEPT {\n  (void)pass_info;\n  ngf_error err = ngfvk_encoder_start(cmd_buf);\n  if (err != NGF_ERROR_OK) return err;\n\n  err = ngfvk_initialize_generic_encoder(cmd_buf, &enc->pvt_data_donotuse);\n  if (err != NGF_ERROR_OK) { return err; }\n\n  cmd_buf->compute_pass_active = true;\n  return NGF_ERROR_OK;\n}\n\nextern \"C\" ngf_error ngf_cmd_end_render_pass(ngf_render_encoder enc) NGF_NOEXCEPT {\n  ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc);\n\n  // Commit all the pending barriers.\n  ngfvk_sync_commit_pending_barriers(&buf->pending_barriers, buf->vk_cmd_buffer);\n\n  // Begin the real render pass.\n  const ngf_render_pass_info* pass_info   = &buf->pending_render_pass_info;\n  const VkRenderPass          render_pass = ngfvk_lookup_renderpass(\n      pass_info->render_target,\n      ngfvk_renderpass_ops_key(\n          pass_info->render_target,\n          pass_info->load_ops,\n          pass_info->store_ops));\n\n  const ngfvk_swapchain*  swapchain = CURRENT_CONTEXT->swapchain.get();\n  const ngf_render_target target    = pass_info->render_target;\n\n  const VkFramebuffer fb =\n      target->is_default ? swapchain->framebufs[swapchain->image_idx] : target->frame_buffer;\n  const VkExtent2D render_extent = {\n      target->is_default ? CURRENT_CONTEXT->swapchain_info.width : target->width,\n      target->is_default ? CURRENT_CONTEXT->swapchain_info.height : target->height};\n\n  const uint32_t clear_value_count = buf->pending_clear_value_count;\n  auto           vk_clears =\n      clear_value_count > 0 ? ngfi::tmp_alloc<VkClearValue>(clear_value_count) : nullptr;\n  if (clear_value_count > 0) {\n    for (size_t i = 0; i < clear_value_count; ++i) {\n      VkClearValue*    vk_clear_val = &vk_clears[i];\n      const ngf_clear* clear        = &pass_info->clears[i];\n      if (target->attachment_descs[i].format != NGF_IMAGE_FORMAT_DEPTH16 &&\n          target->attachment_descs[i].format != NGF_IMAGE_FORMAT_DEPTH32 &&\n          target->attachment_descs[i].format != NGF_IMAGE_FORMAT_DEPTH24_STENCIL8) {\n        VkClearColorValue* clear_color_var = &vk_clear_val->color;\n        clear_color_var->float32[0]        = clear->clear_color[0];\n        clear_color_var->float32[1]        = clear->clear_color[1];\n        clear_color_var->float32[2]        = clear->clear_color[2];\n        clear_color_var->float32[3]        = clear->clear_color[3];\n      } else {\n        VkClearDepthStencilValue* clear_depth_stencil_val = &vk_clear_val->depthStencil;\n        clear_depth_stencil_val->depth                    = clear->clear_depth_stencil.clear_depth;\n        clear_depth_stencil_val->stencil = clear->clear_depth_stencil.clear_stencil;\n      }\n    }\n  }\n\n  const VkRenderPassBeginInfo begin_info = {\n      .sType           = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,\n      .pNext           = NULL,\n      .renderPass      = render_pass,\n      .framebuffer     = fb,\n      .renderArea      = {.offset = {0u, 0u}, .extent = render_extent},\n      .clearValueCount = clear_value_count,\n      .pClearValues    = vk_clears};\n  vkCmdBeginRenderPass(buf->vk_cmd_buffer, &begin_info, VK_SUBPASS_CONTENTS_INLINE);\n\n  // Clean up after the begin operation.\n  ngfi::tmp_arena().reset();\n\n  // Encode each pending render command.\n  ngfvk_cmd_buf_record_render_cmds(buf, buf->in_pass_cmd_chnks);\n\n  // Reset pending render command storage.\n  ngfvk_cmd_buf_reset_render_cmds(buf);\n\n  // Finish renderpass.\n  vkCmdEndRenderPass(buf->vk_cmd_buffer);\n  buf->renderpass_active = false;\n  buf->active_rt         = NULL;\n\n  return ngfvk_encoder_end(buf, &enc.pvt_data_donotuse);\n}\n\nextern \"C\" ngf_error ngf_cmd_end_xfer_pass(ngf_xfer_encoder enc) NGF_NOEXCEPT {\n  ngf_cmd_buffer buf    = NGFVK_ENC2CMDBUF(enc);\n  buf->xfer_pass_active = false;\n  return ngfvk_encoder_end(buf, &enc.pvt_data_donotuse);\n}\n\nextern \"C\" ngf_error ngf_cmd_end_compute_pass(ngf_compute_encoder enc) NGF_NOEXCEPT {\n  ngf_cmd_buffer cmd_buf       = NGFVK_ENC2CMDBUF(enc);\n  cmd_buf->compute_pass_active = false;\n  return ngfvk_encoder_end(cmd_buf, &enc.pvt_data_donotuse);\n}\n\nextern \"C\" ngf_error\nngf_start_cmd_buffer(ngf_cmd_buffer cmd_buf, ngf_frame_token token) NGF_NOEXCEPT {\n  assert(cmd_buf);\n\n  NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_READY);\n\n  cmd_buf->parent_frame        = token;\n  cmd_buf->desc_pools_list     = nullptr;\n  cmd_buf->active_rt           = nullptr;\n  cmd_buf->active_gfx_pipe     = nullptr;\n  cmd_buf->active_compute_pipe = nullptr;\n  cmd_buf->compute_pass_active = false;\n  cmd_buf->renderpass_active   = false;\n  cmd_buf->npending_bind_ops   = 0u;\n\n  cmd_buf->virt_bind_ops_ranges.clear();\n  cmd_buf->in_pass_cmd_chnks.clear();\n  cmd_buf->pending_barriers.barriers.clear();\n  cmd_buf->local_res_states.clear();\n\n  ngfvk_cleanup_pending_binds(cmd_buf);\n\n  return ngfvk_cmd_buffer_allocate_for_frame(token, &cmd_buf->vk_cmd_pool, &cmd_buf->vk_cmd_buffer);\n}\n\nextern \"C\" void ngf_destroy_cmd_buffer(ngf_cmd_buffer buffer) NGF_NOEXCEPT {\n  if (buffer && buffer->state != ngfi::CMD_BUFFER_STATE_PENDING) {\n    NGFI_FREE(buffer);\n  } else if (buffer) {\n    buffer->destroy_on_submit = true;\n  }\n}\n\nextern \"C\" ngf_error\nngf_submit_cmd_buffers(uint32_t nbuffers, ngf_cmd_buffer* cmd_bufs) NGF_NOEXCEPT {\n  assert(cmd_bufs);\n  uint32_t               frame_id       = CURRENT_CONTEXT->frame_id;\n  ngfvk_frame_resources* frame_res_data = &CURRENT_CONTEXT->frame_res[frame_id];\n  for (uint32_t i = 0u; i < nbuffers; ++i) {\n    ngf_cmd_buffer cmd_buf = cmd_bufs[i];\n    if (cmd_buf->parent_frame != CURRENT_CONTEXT->current_frame_token) {\n      NGFI_DIAG_ERROR(\"submitting a command buffer for the wrong frame\");\n      return NGF_ERROR_INVALID_OPERATION;\n    }\n    NGFI_TRANSITION_CMD_BUF(cmd_bufs[i], ngfi::CMD_BUFFER_STATE_PENDING);\n    if (cmd_buf->desc_pools_list) { frame_res_data->retire.append(cmd_buf->desc_pools_list); }\n    vkEndCommandBuffer(cmd_buf->vk_cmd_buffer);\n\n    frame_res_data->submitted_cmd_bufs.push_back(cmd_buf);\n  }\n  return NGF_ERROR_OK;\n}\n\nextern \"C\" ngf_error ngf_begin_frame(ngf_frame_token* token) NGF_NOEXCEPT {\n  ngf_error err = NGF_ERROR_OK;\n\n  // increment frame id.\n  const uint32_t fi = (CURRENT_CONTEXT->frame_id + 1u) % CURRENT_CONTEXT->max_inflight_frames;\n  CURRENT_CONTEXT->frame_id = fi;\n\n  // setup frame capture\n  if (_renderdoc.api && _renderdoc.capture_next) {\n    _renderdoc.capture_next = false;\n    _renderdoc.is_capturing = true;\n    _renderdoc.api->StartFrameCapture(\n        RENDERDOC_DEVICEPOINTER_FROM_VKINSTANCE(_vk.instance),\n        (RENDERDOC_WindowHandle)CURRENT_CONTEXT->swapchain_info.native_handle);\n  }\n\n  // reset stack allocators.\n  ngfi::tmp_arena().reset();\n  ngfi::frame_arena().reset();\n\n  // Retire resources.\n  ngfvk_frame_resources* next_frame_res = &CURRENT_CONTEXT->frame_res[fi];\n  ngfvk_retire_resources(next_frame_res);\n  next_frame_res->res_frame_arena.reset();\n\n  if (CURRENT_CONTEXT->swapchain) {\n    CURRENT_CONTEXT->swapchain->image_idx = ngfvk::global::invalid_idx;\n  }\n  CURRENT_CONTEXT->current_frame_token  = ngfi_encode_frame_token(\n      (uint16_t)((uintptr_t)CURRENT_CONTEXT & 0xffff),\n      (uint8_t)CURRENT_CONTEXT->max_inflight_frames,\n      (uint8_t)CURRENT_CONTEXT->frame_id);\n\n  *token = CURRENT_CONTEXT->current_frame_token;\n  return err;\n}\n\nextern \"C\" ngf_error\nngf_get_current_swapchain_image(ngf_frame_token token, ngf_image* result) NGF_NOEXCEPT {\n  assert(CURRENT_CONTEXT);\n  assert(result);\n\n  if (token != CURRENT_CONTEXT->current_frame_token) {\n    NGFI_DIAG_ERROR(\"unexpected frame token\");\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n  if (!CURRENT_CONTEXT->swapchain || CURRENT_CONTEXT->swapchain->vk_swapchain == VK_NULL_HANDLE) {\n    NGFI_DIAG_ERROR(\n        \"requesting a swapchain image handle from a context that does not have a swapchain\");\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n  ngfvk_maybe_acquire_swapchain_image();\n  *result = CURRENT_CONTEXT->swapchain->wrapper_imgs[CURRENT_CONTEXT->swapchain->image_idx].get();\n  return NGF_ERROR_OK;\n}\n\nextern \"C\" ngf_error ngf_end_frame(ngf_frame_token token) NGF_NOEXCEPT {\n  if (token != CURRENT_CONTEXT->current_frame_token) {\n    NGFI_DIAG_ERROR(\"ending a frame with an unexpected frame token\");\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n\n  ngf_error err = NGF_ERROR_OK;\n\n  // Obtain the current frame resource structure.\n  const uint32_t         fi        = CURRENT_CONTEXT->frame_id;\n  ngfvk_frame_resources* frame_res = &CURRENT_CONTEXT->frame_res[fi];\n\n  frame_res->nwait_fences = 0u;\n\n  // Submit pending commands & present.\n  VkSemaphore image_semaphore = VK_NULL_HANDLE;\n  const bool  needs_present   = CURRENT_CONTEXT->swapchain && CURRENT_CONTEXT->swapchain->vk_swapchain != VK_NULL_HANDLE;\n  if (needs_present) { image_semaphore = CURRENT_CONTEXT->swapchain->acquire_sems[fi]; }\n\n  ngf_error submit_result = ngfvk_submit_pending_cmd_buffers(\n      frame_res,\n      image_semaphore,\n      frame_res->fences[frame_res->nwait_fences++]);\n\n  // Present if necessary.\n  if (submit_result == NGF_ERROR_OK && needs_present) {\n    const VkPresentInfoKHR present_info = {\n        .sType              = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,\n        .pNext              = NULL,\n        .waitSemaphoreCount = 1u,\n        .pWaitSemaphores    = &CURRENT_CONTEXT->swapchain->submit_sems[CURRENT_CONTEXT->swapchain->image_idx],\n        .swapchainCount     = 1,\n        .pSwapchains        = &CURRENT_CONTEXT->swapchain->vk_swapchain,\n        .pImageIndices      = &CURRENT_CONTEXT->swapchain->image_idx,\n        .pResults           = NULL};\n    const VkResult present_result = vkQueuePresentKHR(_vk.present_queue, &present_info);\n    if (present_result != VK_SUCCESS) err = NGF_ERROR_INVALID_OPERATION;\n  }\n\n  // end frame capture\n  if (_renderdoc.api && _renderdoc.is_capturing) {\n    _renderdoc.api->EndFrameCapture(\n        RENDERDOC_DEVICEPOINTER_FROM_VKINSTANCE(_vk.instance),\n        (RENDERDOC_WindowHandle)CURRENT_CONTEXT->swapchain_info.native_handle);\n    _renderdoc.is_capturing = false;\n    _renderdoc.capture_next = false;\n  }\n  return err;\n}\n\nextern \"C\" ngf_error\nngf_create_shader_stage(const ngf_shader_stage_info* info, ngf_shader_stage* result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  auto maybe_stage = ngf_shader_stage_t::make(*info);\n  if (!maybe_stage.has_error()) result[0] = maybe_stage.value().release();\n  return maybe_stage.has_error() ? maybe_stage.error() : NGF_ERROR_OK;\n}\n\nextern \"C\" void ngf_destroy_shader_stage(ngf_shader_stage stage) NGF_NOEXCEPT {\n  if (stage) { NGFI_FREE(stage); }\n}\n\nextern \"C\" ngf_error ngf_create_graphics_pipeline(\n    const ngf_graphics_pipeline_info* info,\n    ngf_graphics_pipeline*            result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  auto maybe_pipeline = ngfvk_generic_pipeline::make(*info);\n  if (!maybe_pipeline.has_error())\n    result[0] = (ngf_graphics_pipeline)maybe_pipeline.value().release();\n  return maybe_pipeline.has_error() ? maybe_pipeline.error() : NGF_ERROR_OK;\n}\n\nextern \"C\" void ngf_destroy_graphics_pipeline(ngf_graphics_pipeline p) NGF_NOEXCEPT {\n  if (p) {\n    auto gp = (ngfvk_generic_pipeline*)p;\n    NGFI_FREE(gp);\n  }\n}\n\nextern \"C\" ngf_error ngf_create_compute_pipeline(\n    const ngf_compute_pipeline_info* info,\n    ngf_compute_pipeline*            result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  auto maybe_pipeline = ngfvk_generic_pipeline::make(*info);\n  if (!maybe_pipeline.has_error())\n    result[0] = (ngf_compute_pipeline)maybe_pipeline.value().release();\n  return maybe_pipeline.has_error() ? maybe_pipeline.error() : NGF_ERROR_OK;\n}\n\nextern \"C\" void ngf_destroy_compute_pipeline(ngf_compute_pipeline p) NGF_NOEXCEPT {\n  if (p) {\n    auto gp = (ngfvk_generic_pipeline*)p;\n    NGFI_FREE(gp);\n  }\n}\n\nextern \"C\" ngf_render_target ngf_default_render_target() NGF_NOEXCEPT {\n  if (CURRENT_CONTEXT) {\n    return CURRENT_CONTEXT->default_render_target.get();\n  } else {\n    return NULL;\n  }\n}\n\nextern \"C\" const ngf_attachment_descriptions*\nngf_default_render_target_attachment_descs() NGF_NOEXCEPT {\n  if (CURRENT_CONTEXT->default_render_target) {\n    CURRENT_CONTEXT->default_attachment_descriptions_list.ndescs =\n        CURRENT_CONTEXT->swapchain_info.depth_format != NGF_IMAGE_FORMAT_UNDEFINED ? 2u : 1u;\n    CURRENT_CONTEXT->default_attachment_descriptions_list.descs =\n        CURRENT_CONTEXT->default_render_target->attachment_descs.data();\n    return &CURRENT_CONTEXT->default_attachment_descriptions_list;\n  } else {\n    return NULL;\n  }\n}\n\nextern \"C\" ngf_error ngf_create_render_target(\n    const ngf_render_target_info* info,\n    ngf_render_target*            result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  auto maybe_rt = ngf_render_target_t::make(*info);\n  if (!maybe_rt.has_error()) result[0] = maybe_rt.value().release();\n  return maybe_rt.has_error() ? maybe_rt.error() : NGF_ERROR_OK;\n}\nextern \"C\" void ngf_destroy_render_target(ngf_render_target target) NGF_NOEXCEPT {\n  if (target) {\n    if (target->is_default) {\n      NGFI_DIAG_ERROR(\"default RT can only be destroyed by owning context\\n\");\n      return;\n    }\n    NGFI_FREE(target);\n  }\n}\nextern \"C\" void ngf_cmd_dispatch(\n    ngf_compute_encoder enc,\n    uint32_t            x_threadgroups,\n    uint32_t            y_threadgroups,\n    uint32_t            z_threadgroups) NGF_NOEXCEPT {\n  ngf_cmd_buffer cmd_buf = NGFVK_ENC2CMDBUF(enc);\n\n  ngfi::tmp_arena().reset();\n\n  // Prepare a batch of sync requests by scanning all pending bind operations.\n  ngfvk_sync_req_batch sync_req_batch;\n  ngfvk_sync_req_batch_init(cmd_buf->npending_bind_ops, &sync_req_batch);\n\n  for (const ngf_resource_bind_op& bind_op_ref : cmd_buf->pending_bind_ops) {\n    const ngf_resource_bind_op* bind_op  = &bind_op_ref;\n    ngfvk_sync_req              sync_req = ngfvk_sync_req_for_bind_op(\n        bind_op,\n        (ngfvk_generic_pipeline*)(cmd_buf->active_compute_pipe));\n    if (sync_req.barrier_masks.stage_mask == 0u) { continue; }\n    const ngfvk_sync_res res = ngfvk_sync_res_from_bind_op(bind_op);\n    if (res.type == NGFVK_SYNC_RES_COUNT) { continue; }\n    ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &res, &sync_req);\n  }\n\n  // Emit the necessary barriers prior to dispatch.\n  ngfvk_sync_req_batch_commit(&sync_req_batch, cmd_buf);\n\n  // Allocate and write descriptor sets.\n  ngfvk_execute_pending_binds(cmd_buf);\n\n  vkCmdDispatch(cmd_buf->vk_cmd_buffer, x_threadgroups, y_threadgroups, z_threadgroups);\n}\n\nextern \"C\" void ngf_cmd_draw(\n    ngf_render_encoder enc,\n    bool               indexed,\n    uint32_t           first_element,\n    uint32_t           nelements,\n    uint32_t           ninstances) NGF_NOEXCEPT {\n  ngf_cmd_buffer cmd_buf = NGFVK_ENC2CMDBUF(enc);\n\n  uint32_t nmax_pending_sync_reqs = 2u;\n  for (const ngfvk_virt_bind_range& r : cmd_buf->virt_bind_ops_ranges) {\n    nmax_pending_sync_reqs += r.count;\n  }\n\n  ngfvk_sync_req_batch sync_req_batch;\n  ngfvk_sync_req_batch_init(nmax_pending_sync_reqs, &sync_req_batch);\n\n  const ngfvk_sync_req attr_buf_sync_req = {\n      .barrier_masks =\n          {.access_mask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,\n           .stage_mask  = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT},\n      .layout = VK_IMAGE_LAYOUT_UNDEFINED};\n  if (cmd_buf->active_attr_buf) {\n    const ngfvk_sync_res attr_buf_res = ngfvk_sync_res_from_buf(cmd_buf->active_attr_buf);\n    ngfvk_sync_req_batch_add_with_lookup(\n        &sync_req_batch,\n        cmd_buf,\n        &attr_buf_res,\n        &attr_buf_sync_req);\n  }\n  if (indexed && cmd_buf->active_idx_buf) {\n    const ngfvk_sync_req idx_buf_sync_req = {\n        .barrier_masks =\n            {.access_mask = VK_ACCESS_INDEX_READ_BIT,\n             .stage_mask  = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT},\n        .layout = VK_IMAGE_LAYOUT_UNDEFINED};\n    const ngfvk_sync_res idx_buf_res = ngfvk_sync_res_from_buf(cmd_buf->active_idx_buf);\n    ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &idx_buf_res, &idx_buf_sync_req);\n  }\n  cmd_buf->active_attr_buf = NULL;\n  cmd_buf->active_idx_buf  = NULL;\n\n  for (const ngfvk_virt_bind_range& r : cmd_buf->virt_bind_ops_ranges) {\n    for (uint32_t j = 0u; j < r.count; ++j) {\n      const ngfvk_render_cmd* render_cmd = &r.start[j];\n      assert(render_cmd->type == NGFVK_RENDER_CMD_BIND_RESOURCE);\n      const ngfvk_sync_req sync_req = ngfvk_sync_req_for_bind_op(\n          &render_cmd->data.bind_resource,\n          (ngfvk_generic_pipeline*)(cmd_buf->active_gfx_pipe));\n      if (sync_req.barrier_masks.stage_mask == 0u) { continue; }\n      const ngfvk_sync_res sync_res = ngfvk_sync_res_from_bind_op(&render_cmd->data.bind_resource);\n      ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &sync_res, &sync_req);\n    }\n  }\n  cmd_buf->virt_bind_ops_ranges.clear();\n  ngfvk_sync_req_batch_process(&sync_req_batch, cmd_buf);\n\n  const ngfvk_render_cmd cmd = {\n      .data =\n          {.draw =\n               {.first_element = first_element,\n                .nelements     = nelements,\n                .ninstances    = ninstances,\n                .indexed       = indexed}},\n      .type = NGFVK_RENDER_CMD_DRAW};\n  ngfvk_cmd_buf_add_render_cmd(cmd_buf, &cmd, true);\n}\n\nextern \"C\" void\nngf_cmd_bind_gfx_pipeline(ngf_render_encoder enc, ngf_graphics_pipeline pipeline) NGF_NOEXCEPT {\n  ngf_cmd_buffer         buf = NGFVK_ENC2CMDBUF(enc);\n  const ngfvk_render_cmd cmd = {\n      .data = {.pipeline = pipeline},\n      .type = NGFVK_RENDER_CMD_BIND_PIPELINE};\n  ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true);\n  buf->active_gfx_pipe = pipeline;\n}\n\nextern \"C\" void ngf_cmd_bind_resources(\n    ngf_render_encoder          enc,\n    const ngf_resource_bind_op* bind_operations,\n    uint32_t                    nbind_operations) NGF_NOEXCEPT {\n  ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc);\n  if (nbind_operations <= 0u) { return; }\n\n  ngfvk_virt_bind_range   curr_range = {.start = nullptr, .count = 0u};\n  const ngfvk_render_cmd* prev_cmd   = nullptr;\n\n  for (uint32_t i = 0u; i < nbind_operations; ++i) {\n    const ngfvk_render_cmd cmd = {\n        .data = {.bind_resource = bind_operations[i]},\n        .type = NGFVK_RENDER_CMD_BIND_RESOURCE};\n    const ngfvk_render_cmd* cmd_ptr = buf->in_pass_cmd_chnks.append(\n        cmd,\n        CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena);\n\n    // Check if the bound resource is marked as read-only.\n    // Do not add such resources to the cmd buffer's virt_bind_ops_ranges.\n    // This will preclude hazard tracking from occurring for said resources.\n    if (ngfi_skip_hazard_tracking_for_bind_op(bind_operations[i])) { continue; }\n\n    // Check if this command is contiguous with the previous one (same chunk)\n    if (prev_cmd != nullptr && cmd_ptr != prev_cmd + 1) {\n      // New chunk started, flush current range\n      if (curr_range.start != nullptr) {\n        buf->virt_bind_ops_ranges.append(\n            curr_range,\n            CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena);\n      }\n      curr_range.start = cmd_ptr;\n      curr_range.count = 0u; // 0 is intentional, we increment count at the end of loop.\n    } else if (curr_range.start == nullptr) {\n      // First command\n      curr_range.start = cmd_ptr;\n    }\n    ++curr_range.count;\n    prev_cmd = cmd_ptr;\n  }\n\n  if (curr_range.start != nullptr) {\n    buf->virt_bind_ops_ranges.append(\n        curr_range,\n        CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena);\n  }\n}\n\nextern \"C\" void ngf_cmd_bind_compute_resources(\n    ngf_compute_encoder         enc,\n    const ngf_resource_bind_op* bind_operations,\n    uint32_t                    nbind_operations) NGF_NOEXCEPT {\n  ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc);\n  ngfvk_cmd_bind_resources(buf, bind_operations, nbind_operations);\n}\n\nextern \"C\" void\nngf_cmd_bind_compute_pipeline(ngf_compute_encoder enc, ngf_compute_pipeline pipeline) NGF_NOEXCEPT {\n  ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc);\n  if (buf->active_compute_pipe && buf->npending_bind_ops > 0u) { ngfvk_execute_pending_binds(buf); }\n\n  buf->active_compute_pipe = pipeline;\n  vkCmdBindPipeline(\n      buf->vk_cmd_buffer,\n      VK_PIPELINE_BIND_POINT_COMPUTE,\n      ((ngfvk_generic_pipeline*)pipeline)->vk_pipeline);\n}\n\nextern \"C\" void ngf_cmd_viewport(ngf_render_encoder enc, const ngf_irect2d* r) NGF_NOEXCEPT {\n  ngf_cmd_buffer         buf = NGFVK_ENC2CMDBUF(enc);\n  const ngfvk_render_cmd cmd = {.data = {.rect = *r}, .type = NGFVK_RENDER_CMD_SET_VIEWPORT};\n  ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true);\n}\n\nextern \"C\" void ngf_cmd_scissor(ngf_render_encoder enc, const ngf_irect2d* r) NGF_NOEXCEPT {\n  ngf_cmd_buffer         buf = NGFVK_ENC2CMDBUF(enc);\n  const ngfvk_render_cmd cmd = {.data = {.rect = *r}, .type = NGFVK_RENDER_CMD_SET_SCISSOR};\n  ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true);\n}\n\nextern \"C\" void\nngf_cmd_stencil_reference(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT {\n  ngf_cmd_buffer         buf = NGFVK_ENC2CMDBUF(enc);\n  const ngfvk_render_cmd cmd = {\n      .data = {.stencil_values = {.front = front, .back = back}},\n      .type = NGFVK_RENDER_CMD_SET_STENCIL_REFERENCE};\n  ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true);\n}\n\nextern \"C\" void\nngf_cmd_stencil_compare_mask(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT {\n  ngf_cmd_buffer         buf = NGFVK_ENC2CMDBUF(enc);\n  const ngfvk_render_cmd cmd = {\n      .data = {.stencil_values = {.front = front, .back = back}},\n      .type = NGFVK_RENDER_CMD_SET_STENCIL_COMPARE_MASK};\n  ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true);\n}\n\nextern \"C\" void\nngf_cmd_stencil_write_mask(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT {\n  ngf_cmd_buffer         buf = NGFVK_ENC2CMDBUF(enc);\n  const ngfvk_render_cmd cmd = {\n      .data = {.stencil_values = {.front = front, .back = back}},\n      .type = NGFVK_RENDER_CMD_SET_STENCIL_WRITE_MASK};\n  ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true);\n}\n\nextern \"C\" void\nngf_cmd_set_depth_bias(ngf_render_encoder enc, float const_scale, float slope_scale, float clamp)\n    NGF_NOEXCEPT {\n  ngf_cmd_buffer         buf = NGFVK_ENC2CMDBUF(enc);\n  const ngfvk_render_cmd cmd = {\n      .data =\n          {.depth_bias =\n               {.const_factor = const_scale, .slope_factor = slope_scale, .clamp = clamp}},\n      .type = NGFVK_RENDER_CMD_SET_DEPTH_BIAS};\n  ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true);\n}\n\nextern \"C\" void\nngf_cmd_bind_attrib_buffer(ngf_render_encoder enc, ngf_buffer abuf, uint32_t binding, size_t offset)\n    NGF_NOEXCEPT {\n  ngf_cmd_buffer         buf = NGFVK_ENC2CMDBUF(enc);\n  const ngfvk_render_cmd cmd = {\n      .data = {.bind_attrib_buffer = {.buffer = abuf, .binding = binding, .offset = offset}},\n      .type = NGFVK_RENDER_CMD_BIND_ATTRIB_BUFFER};\n  buf->active_attr_buf = abuf;\n  ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true);\n}\n\nextern \"C\" void ngf_cmd_bind_index_buffer(\n    ngf_render_encoder enc,\n    ngf_buffer         ibuf,\n    size_t             offset,\n    ngf_type           index_type) NGF_NOEXCEPT {\n  ngf_cmd_buffer         buf = NGFVK_ENC2CMDBUF(enc);\n  const ngfvk_render_cmd cmd = {\n      .data = {.bind_index_buffer = {.buffer = ibuf, .offset = offset, .type = index_type}},\n      .type = NGFVK_RENDER_CMD_BIND_INDEX_BUFFER};\n  buf->active_idx_buf = ibuf;\n  ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true);\n}\n\nextern \"C\" void ngf_cmd_copy_buffer(\n    ngf_xfer_encoder enc,\n    ngf_buffer       src,\n    ngf_buffer       dst,\n    size_t           size,\n    size_t           src_offset,\n    size_t           dst_offset) NGF_NOEXCEPT {\n  ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc);\n  assert(buf);\n  ngfvk_sync_req_batch sync_req_batch;\n  ngfi::tmp_arena().reset();\n  ngfvk_sync_req_batch_init(2, &sync_req_batch);\n  const ngfvk_sync_req src_sync_req = {\n      .barrier_masks =\n          {.access_mask = VK_ACCESS_TRANSFER_READ_BIT,\n           .stage_mask  = VK_PIPELINE_STAGE_TRANSFER_BIT},\n      .layout = VK_IMAGE_LAYOUT_UNDEFINED};\n  const ngfvk_sync_res src_sync_res = ngfvk_sync_res_from_buf(src);\n  ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, buf, &src_sync_res, &src_sync_req);\n  const ngfvk_sync_req dst_sync_req = {\n      .barrier_masks =\n          {.access_mask = VK_ACCESS_TRANSFER_WRITE_BIT,\n           .stage_mask  = VK_PIPELINE_STAGE_TRANSFER_BIT},\n      .layout = VK_IMAGE_LAYOUT_UNDEFINED};\n  const ngfvk_sync_res dst_sync_res = ngfvk_sync_res_from_buf(dst);\n  ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, buf, &dst_sync_res, &dst_sync_req);\n  ngfvk_sync_req_batch_commit(&sync_req_batch, buf);\n\n  const VkBufferCopy copy_region = {.srcOffset = src_offset, .dstOffset = dst_offset, .size = size};\n  vkCmdCopyBuffer(\n      buf->vk_cmd_buffer,\n      (VkBuffer)src->alloc.obj_handle,\n      (VkBuffer)dst->alloc.obj_handle,\n      1u,\n      &copy_region);\n}\n\nextern \"C\" void ngf_cmd_write_image(\n    ngf_xfer_encoder       enc,\n    ngf_buffer             src,\n    ngf_image              dst,\n    const ngf_image_write* writes,\n    uint32_t               nwrites) NGF_NOEXCEPT {\n  ngf_cmd_buffer cmd_buf = NGFVK_ENC2CMDBUF(enc);\n  assert(cmd_buf);\n  assert(nwrites == 0u || writes);\n  if (nwrites == 0u) return;\n  ngfvk_sync_req_batch sync_req_batch;\n  ngfi::tmp_arena().reset();\n  ngfvk_sync_req_batch_init(2, &sync_req_batch);\n  const ngfvk_sync_req src_sync_req = {\n      .barrier_masks =\n          {.access_mask = VK_ACCESS_TRANSFER_READ_BIT,\n           .stage_mask  = VK_PIPELINE_STAGE_TRANSFER_BIT},\n      .layout = VK_IMAGE_LAYOUT_UNDEFINED};\n  const ngfvk_sync_res src_sync_res = ngfvk_sync_res_from_buf(src);\n  ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &src_sync_res, &src_sync_req);\n  const ngfvk_sync_req dst_sync_req = {\n      .barrier_masks =\n          {.access_mask = VK_ACCESS_TRANSFER_WRITE_BIT,\n           .stage_mask  = VK_PIPELINE_STAGE_TRANSFER_BIT},\n      .layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL};\n  const ngfvk_sync_res dst_sync_res = ngfvk_sync_res_from_img(dst);\n  ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &dst_sync_res, &dst_sync_req);\n  ngfvk_sync_req_batch_commit(&sync_req_batch, cmd_buf);\n\n  ngfi::tmp_arena().reset();\n  auto vk_writes = ngfi::tmp_alloc<VkBufferImageCopy>(nwrites);\n  if (vk_writes) {\n    for (size_t i = 0u; i < nwrites; ++i) {\n      const ngf_image_write* ngf_write = &writes[i];\n      VkBufferImageCopy*     vk_write  = &vk_writes[i];\n      memset(vk_write, 0, sizeof(VkBufferImageCopy));\n      vk_write->bufferOffset                    = ngf_write->src_offset;\n      vk_write->imageOffset.x                   = ngf_write->dst_offset.x;\n      vk_write->imageOffset.y                   = ngf_write->dst_offset.y;\n      vk_write->imageOffset.z                   = ngf_write->dst_offset.z;\n      vk_write->imageExtent.width               = ngf_write->extent.width;\n      vk_write->imageExtent.height              = ngf_write->extent.height;\n      vk_write->imageExtent.depth               = ngf_write->extent.depth;\n      vk_write->imageSubresource.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;\n      vk_write->imageSubresource.mipLevel       = ngf_write->dst_level;\n      vk_write->imageSubresource.baseArrayLayer = ngf_write->dst_base_layer;\n      vk_write->imageSubresource.layerCount     = ngf_write->nlayers;\n    }\n    vkCmdCopyBufferToImage(\n        cmd_buf->vk_cmd_buffer,\n        (VkBuffer)src->alloc.obj_handle,\n        (VkImage)dst->alloc.obj_handle,\n        VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n        nwrites,\n        vk_writes);\n  } else {\n    NGFI_DIAG_ERROR(\"Image write failed\");\n  }\n}\n\nextern \"C\" void ngf_cmd_copy_image_to_buffer(\n    ngf_xfer_encoder    enc,\n    const ngf_image_ref src,\n    ngf_offset3d        src_offset,\n    ngf_extent3d        src_extent,\n    uint32_t            nlayers,\n    ngf_buffer          dst,\n    size_t              dst_offset) NGF_NOEXCEPT {\n  ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc);\n  assert(buf);\n  ngfvk_sync_req_batch sync_req_batch;\n  ngfi::tmp_arena().reset();\n  ngfvk_sync_req_batch_init(2, &sync_req_batch);\n  const ngfvk_sync_req src_sync_req = {\n      .barrier_masks =\n          {.access_mask = VK_ACCESS_TRANSFER_READ_BIT,\n           .stage_mask  = VK_PIPELINE_STAGE_TRANSFER_BIT},\n      .layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL};\n  const ngfvk_sync_res src_sync_res = ngfvk_sync_res_from_img(src.image);\n  ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, buf, &src_sync_res, &src_sync_req);\n  const ngfvk_sync_req dst_sync_req = {\n      .barrier_masks =\n          {.access_mask = VK_ACCESS_TRANSFER_WRITE_BIT,\n           .stage_mask  = VK_PIPELINE_STAGE_TRANSFER_BIT},\n      .layout = VK_IMAGE_LAYOUT_UNDEFINED};\n  const ngfvk_sync_res dst_sync_res = ngfvk_sync_res_from_buf(dst);\n  ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, buf, &dst_sync_res, &dst_sync_req);\n  ngfvk_sync_req_batch_commit(&sync_req_batch, buf);\n\n  const uint32_t src_layer =\n      src.image->type == NGF_IMAGE_TYPE_CUBE ? 6u * src.layer + src.cubemap_face : src.layer;\n  const VkBufferImageCopy copy_op = {\n      .bufferOffset      = dst_offset,\n      .bufferRowLength   = 0u,\n      .bufferImageHeight = 0u,\n      .imageSubresource =\n          {.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT,\n           .mipLevel       = src.mip_level,\n           .baseArrayLayer = src_layer,\n           .layerCount     = nlayers},\n      .imageOffset = {.x = src_offset.x, .y = src_offset.y, .z = src_offset.z},\n      .imageExtent =\n          {.width = src_extent.width, .height = src_extent.height, .depth = src_extent.depth}};\n\n  vkCmdCopyImageToBuffer(\n      buf->vk_cmd_buffer,\n      (VkImage)src.image->alloc.obj_handle,\n      VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n      (VkBuffer)dst->alloc.obj_handle,\n      1u,\n      &copy_op);\n}\n\nextern \"C\" ngf_error ngf_cmd_generate_mipmaps(ngf_xfer_encoder xfenc, ngf_image img) NGF_NOEXCEPT {\n  if (!(img->usage_flags & NGF_IMAGE_USAGE_MIPMAP_GENERATION)) {\n    NGFI_DIAG_ERROR(\"mipmap generation was requested for an image that was created without \"\n                    \"the NGF_IMAGE_USAGE_MIPMAP_GENERATION usage flag.\");\n    return NGF_ERROR_INVALID_OPERATION;\n  }\n\n  ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(xfenc);\n  assert(buf);\n\n  // TODO: ensure the pixel format is valid for mip generation.\n  // TODO: hazard-track images on mip + level granularity.\n\n  ngfvk_sync_req sync_req = {\n      .barrier_masks =\n          {.access_mask = VK_ACCESS_TRANSFER_WRITE_BIT,\n           .stage_mask  = VK_PIPELINE_STAGE_TRANSFER_BIT},\n      .layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL};\n  ngfvk_sync_res img_res = ngfvk_sync_res_from_img(img);\n  ngfvk_handle_single_sync_req(buf, &img_res, &sync_req);\n\n  uint32_t src_w = img->extent.width, src_h = img->extent.height, src_d = img->extent.depth,\n           dst_w = 0, dst_h = 0, dst_d = 0;\n  const uint32_t nlayers = img->nlayers;\n\n  for (uint32_t src_level = 0u; src_level < img->nlevels; ++src_level) {\n    const uint32_t dst_level                    = src_level + 1u;\n    dst_w                                       = src_w > 1u ? (src_w >> 1u) : 1u;\n    dst_h                                       = src_h > 1u ? (src_h >> 1u) : 1u;\n    dst_d                                       = src_d > 1u ? (src_d >> 1u) : 1u;\n    const VkImageMemoryBarrier pre_blit_barrier = {\n        .sType               = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n        .pNext               = NULL,\n        .srcAccessMask       = VK_ACCESS_TRANSFER_WRITE_BIT,\n        .dstAccessMask       = VK_ACCESS_TRANSFER_READ_BIT,\n        .oldLayout           = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n        .newLayout           = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n        .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,\n        .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,\n        .image               = (VkImage)img->alloc.obj_handle,\n        .subresourceRange    = {\n               .aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT,\n               .baseMipLevel   = src_level,\n               .levelCount     = 1u,\n               .baseArrayLayer = 0u,\n               .layerCount     = nlayers}};\n    vkCmdPipelineBarrier(\n        buf->vk_cmd_buffer,\n        VK_PIPELINE_STAGE_TRANSFER_BIT,\n        VK_PIPELINE_STAGE_TRANSFER_BIT,\n        0u,\n        0u,\n        NULL,\n        0u,\n        NULL,\n        1u,\n        &pre_blit_barrier);\n    if (src_level < img->nlevels - 1) {\n      const VkImageBlit blit_region = {\n          .srcSubresource =\n              {.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT,\n               .mipLevel       = src_level,\n               .baseArrayLayer = 0u,\n               .layerCount     = nlayers},\n          .srcOffsets = {{0, 0, 0}, {(int32_t)src_w, (int32_t)src_h, (int32_t)src_d}},\n          .dstSubresource =\n              {.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT,\n               .mipLevel       = dst_level,\n               .baseArrayLayer = 0u,\n               .layerCount     = nlayers},\n          .dstOffsets = {{0, 0, 0}, {(int32_t)dst_w, (int32_t)dst_h, (int32_t)dst_d}}};\n      vkCmdBlitImage(\n          buf->vk_cmd_buffer,\n          (VkImage)img->alloc.obj_handle,\n          VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n          (VkImage)img->alloc.obj_handle,\n          VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n          1,\n          &blit_region,\n          VK_FILTER_LINEAR);\n      src_w = dst_w;\n      src_h = dst_h;\n      src_d = dst_d;\n    }\n  }\n  ngfvk_sync_res       r             = ngfvk_sync_res_from_img(img);\n  ngfvk_sync_res_data* sync_res_data = NULL;\n  ngfvk_cmd_buf_lookup_sync_res(buf, &r, &sync_res_data);\n  sync_res_data->sync_state.active_readers_masks.stage_mask |= VK_PIPELINE_STAGE_TRANSFER_BIT;\n  sync_res_data->sync_state.active_readers_masks.access_mask |= VK_ACCESS_TRANSFER_READ_BIT;\n  sync_res_data->sync_state.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;\n\n  return NGF_ERROR_OK;\n}\n\nextern \"C\" void\nngf_cmd_begin_debug_group(ngf_cmd_buffer cmd_buffer, const char* name) NGF_NOEXCEPT {\n  ngfvk_debug_label_begin(cmd_buffer->vk_cmd_buffer, name);\n}\n\nextern \"C\" void ngf_cmd_end_current_debug_group(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT {\n  ngfvk_debug_label_end(cmd_buffer->vk_cmd_buffer);\n}\n\nextern \"C\" ngf_error ngf_create_texel_buffer_view(\n    const ngf_texel_buffer_view_info* info,\n    ngf_texel_buffer_view*            result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  auto maybe_buf_view = ngf_texel_buffer_view_t::make(*info);\n  if (!maybe_buf_view.has_error()) result[0] = maybe_buf_view.value().release();\n  return maybe_buf_view.has_error() ? maybe_buf_view.error() : NGF_ERROR_OK;\n}\n\nextern \"C\" void ngf_destroy_texel_buffer_view(ngf_texel_buffer_view buf_view) NGF_NOEXCEPT {\n  if (buf_view) {\n    const uint32_t fi = CURRENT_CONTEXT->frame_id;\n    CURRENT_CONTEXT->frame_res[fi].retire.append(buf_view);\n  }\n}\n\nextern \"C\" ngf_error\nngf_create_buffer(const ngf_buffer_info* info, ngf_buffer* result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n\n  auto maybe_buf = ngf_buffer_t::make(*info);\n  if (!maybe_buf.has_error()) { result[0] = maybe_buf.value().release(); }\n  return maybe_buf.has_error() ? maybe_buf.error() : NGF_ERROR_OK;\n}\n\nextern \"C\" void ngf_destroy_buffer(ngf_buffer buffer) NGF_NOEXCEPT {\n  if (buffer) {\n    const uint32_t fi = CURRENT_CONTEXT->frame_id;\n    CURRENT_CONTEXT->frame_res[fi].retire.append(buffer);\n  }\n}\n\nextern \"C\" void* ngf_buffer_map_range(ngf_buffer buf, size_t offset, size_t) NGF_NOEXCEPT {\n  buf->mapped_offset = offset;\n  return (uint8_t*)buf->alloc.mapped_data + buf->mapped_offset;\n}\n\nextern \"C\" void ngf_buffer_flush_range(ngf_buffer buf, size_t offset, size_t size) NGF_NOEXCEPT {\n  vmaFlushAllocation(_vk.allocator, buf->alloc.vma_alloc, buf->mapped_offset + offset, size);\n}\n\nextern \"C\" void ngf_buffer_unmap(ngf_buffer) NGF_NOEXCEPT {  // vk buffers are persistently mapped.\n}\n\nextern \"C\" ngf_error\nngf_create_image_view(const ngf_image_view_info* info, ngf_image_view* result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  auto maybe_view = ngf_image_view_t::make(*info);\n  if (!maybe_view.has_error()) result[0] = maybe_view.value().release();\n  return maybe_view.has_error() ? maybe_view.error() : NGF_ERROR_OK;\n}\n\nextern \"C\" void ngf_destroy_image_view(ngf_image_view view) NGF_NOEXCEPT {\n  if (view) {\n    const uint32_t fi = CURRENT_CONTEXT->frame_id;\n    CURRENT_CONTEXT->frame_res[fi].retire.append(view);\n  }\n}\n\nextern \"C\" ngf_error ngf_create_image(const ngf_image_info* info, ngf_image* result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  auto maybe_image = ngf_image_t::make(*info);\n  if (!maybe_image.has_error()) result[0] = maybe_image.value().release();\n  return maybe_image.has_error() ? maybe_image.error() : NGF_ERROR_OK;\n}\n\nextern \"C\" void ngf_destroy_image(ngf_image img) NGF_NOEXCEPT {\n  if (img != NULL) {\n    const uint32_t fi = CURRENT_CONTEXT->frame_id;\n    CURRENT_CONTEXT->frame_res[fi].retire.append(img);\n  }\n}\n\nngfi::maybe_ngfptr<ngf_sampler_t> ngf_sampler_t::make(const ngf_sampler_info& info) NGF_NOEXCEPT {\n  auto sampler = ngfi::unique_ptr<ngf_sampler_t>::make();\n  if (!sampler) return NGF_ERROR_OUT_OF_MEM;\n  const VkSamplerCreateInfo vk_sampler_info = {\n      .sType                   = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,\n      .pNext                   = NULL,\n      .flags                   = 0u,\n      .magFilter               = get_vk_filter(info.mag_filter),\n      .minFilter               = get_vk_filter(info.min_filter),\n      .mipmapMode              = get_vk_mipmode(info.mip_filter),\n      .addressModeU            = get_vk_address_mode(info.wrap_u),\n      .addressModeV            = get_vk_address_mode(info.wrap_v),\n      .addressModeW            = get_vk_address_mode(info.wrap_w),\n      .mipLodBias              = info.lod_bias,\n      .anisotropyEnable        = info.enable_anisotropy ? VK_TRUE : VK_FALSE,\n      .maxAnisotropy           = info.max_anisotropy,\n      .compareEnable           = info.compare_op != NGF_COMPARE_OP_NEVER,\n      .compareOp               = get_vk_compare_op(info.compare_op),\n      .minLod                  = info.lod_min,\n      .maxLod                  = info.lod_max,\n      .borderColor             = VK_BORDER_COLOR_INT_TRANSPARENT_BLACK,\n      .unnormalizedCoordinates = VK_FALSE};\n  const VkResult vk_sampler_create_result =\n      vkCreateSampler(_vk.device, &vk_sampler_info, NULL, &sampler->vksampler);\n  if (vk_sampler_create_result != VK_SUCCESS) return NGF_ERROR_OBJECT_CREATION_FAILED;\n  return sampler;\n}\n\nextern \"C\" ngf_error\nngf_create_sampler(const ngf_sampler_info* info, ngf_sampler* result) NGF_NOEXCEPT {\n  assert(info);\n  assert(result);\n  auto maybe_sampler = ngf_sampler_t::make(*info);\n  if (!maybe_sampler.has_error()) result[0] = maybe_sampler.value().release();\n  return maybe_sampler.has_error() ? maybe_sampler.error() : NGF_ERROR_OK;\n}\n\nngf_sampler_t::~ngf_sampler_t() NGF_NOEXCEPT {\n  vkDestroySampler(_vk.device, vksampler, nullptr);\n}\n\nextern \"C\" void ngf_destroy_sampler(ngf_sampler sampler) NGF_NOEXCEPT {\n  if (sampler) {\n    const uint32_t fi = CURRENT_CONTEXT->frame_id;\n    CURRENT_CONTEXT->frame_res[fi].retire.append(sampler);\n  }\n}\n\nextern \"C\" void ngf_finish(void) NGF_NOEXCEPT {\n  if (CURRENT_CONTEXT->current_frame_token != ~0u) {\n    ngfvk_frame_resources* frame_res = &CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id];\n    ngfvk_submit_pending_cmd_buffers(frame_res, VK_NULL_HANDLE, VK_NULL_HANDLE);\n  }\n  vkDeviceWaitIdle(_vk.device);\n}\n\n// Pushes via the context's default layout; values persist across compatible pipeline binds.\nstatic ngf_error ngfvk_set_bytes_impl(\n    ngf_cmd_buffer cmd_buf,\n    const void*    data,\n    size_t         size_bytes) {\n  if (!data || size_bytes == 0u) return NGF_ERROR_OK;\n  if (size_bytes > NGF_MAX_ENCODER_INLINE_BYTES || (size_bytes & 0x3u) != 0u) {\n    NGFI_DIAG_ERROR(\n        \"push-constant size %zu must be <= %u and a multiple of 4\",\n        size_bytes,\n        NGF_MAX_ENCODER_INLINE_BYTES);\n    return NGF_ERROR_INVALID_SIZE;\n  }\n  vkCmdPushConstants(\n      cmd_buf->vk_cmd_buffer,\n      CURRENT_CONTEXT->vk_default_push_layout,\n      VK_SHADER_STAGE_ALL,\n      0u,\n      static_cast<uint32_t>(size_bytes),\n      data);\n  return NGF_ERROR_OK;\n}\n\nextern \"C\" ngf_error ngf_set_bytes(\n    ngf_render_encoder enc,\n    const void*        data,\n    size_t             size_bytes) NGF_NOEXCEPT {\n  return ngfvk_set_bytes_impl(NGFVK_ENC2CMDBUF(enc), data, size_bytes);\n}\n\nextern \"C\" ngf_error ngf_set_compute_bytes(\n    ngf_compute_encoder enc,\n    const void*         data,\n    size_t              size_bytes) NGF_NOEXCEPT {\n  return ngfvk_set_bytes_impl(NGFVK_ENC2CMDBUF(enc), data, size_bytes);\n}\n\nextern \"C\" void\nngf_mark_read_only(ngf_image* imgs, uint32_t nimgs, ngf_buffer* bufs, uint32_t nbufs) NGF_NOEXCEPT {\n  for (size_t i = 0u; i < nimgs; ++i) { imgs[i]->sync_state.skip_hazard_tracking = true; }\n  for (size_t i = 0u; i < nbufs; ++i) { bufs[i]->sync_state.skip_hazard_tracking = true; }\n}\n\nextern \"C\" void ngf_renderdoc_capture_next_frame() NGF_NOEXCEPT {\n  if (_renderdoc.api) _renderdoc.capture_next = true;\n}\n\nextern \"C\" void ngf_renderdoc_capture_begin() NGF_NOEXCEPT {\n  if (_renderdoc.api && !_renderdoc.api->IsFrameCapturing()) {\n    _renderdoc.api->StartFrameCapture(\n        RENDERDOC_DEVICEPOINTER_FROM_VKINSTANCE(_vk.instance),\n        (RENDERDOC_WindowHandle)CURRENT_CONTEXT->swapchain_info.native_handle);\n  }\n}\n\nextern \"C\" void ngf_renderdoc_capture_end() NGF_NOEXCEPT {\n  if (_renderdoc.api && _renderdoc.api->IsFrameCapturing()) {\n    _renderdoc.api->EndFrameCapture(\n        RENDERDOC_DEVICEPOINTER_FROM_VKINSTANCE(_vk.instance),\n        (RENDERDOC_WindowHandle)CURRENT_CONTEXT->swapchain_info.native_handle);\n  }\n}\n\nextern \"C\" uintptr_t ngf_get_vk_device_handle() NGF_NOEXCEPT {\n  return (uintptr_t)_vk.device;\n}\n\nextern \"C\" uintptr_t ngf_get_vk_instance_handle() NGF_NOEXCEPT {\n  return (uintptr_t)_vk.instance;\n}\n\nextern \"C\" uintptr_t ngf_get_vk_image_handle(ngf_image image) NGF_NOEXCEPT {\n  return image->alloc.obj_handle;\n}\n\nextern \"C\" uintptr_t ngf_get_vk_buffer_handle(ngf_buffer buffer) NGF_NOEXCEPT {\n  return buffer->alloc.obj_handle;\n}\n\nextern \"C\" uintptr_t ngf_get_vk_cmd_buffer_handle(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT {\n  return (uintptr_t)(cmd_buffer->vk_cmd_buffer);\n}\n\nextern \"C\" uintptr_t ngf_get_vk_sampler_handle(ngf_sampler sampler) NGF_NOEXCEPT {\n  return (uintptr_t)(sampler->vksampler);\n}\n\nextern \"C\" uint32_t ngf_get_vk_image_format_index(ngf_image_format format) NGF_NOEXCEPT {\n  return (uint32_t)get_vk_image_format(format);\n}\n\n#pragma endregion\n\n#if defined(NGFVK_TEST_MODE)\n#include \"../tests/vk-backend-tests.cpp\"\n#endif\n"
  },
  {
    "path": "source/ngf-vk/vk_10.c",
    "content": "#include \"ngf-common/silence.h\"\n#include \"vk_10.h\"\n#include \"ngf-common/macros.h\"\n\n#define TO_STRING(str) #str\n#define STRINGIFY(str) TO_STRING(str)\n#if defined(_WIN32) || defined(_WIN64)\n#define VK_LOADER_LIB \"vulkan-1.dll\"\n#define VK_HIDE_SYMBOL\n#else\n#define VK_HIDE_SYMBOL __attribute__((visibility(\"hidden\")))\n#if defined(__APPLE__)\n#define VK_LOADER_LIB \"libMoltenVK.dylib\"\n#else\n#define VK_LOADER_LIB \"libvulkan.so.1\"\n#endif\n#endif\n\nVK_HIDE_SYMBOL PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties;\nVK_HIDE_SYMBOL PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;\nVK_HIDE_SYMBOL PFN_vkCreateInstance vkCreateInstance;\nVK_HIDE_SYMBOL PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion;\n\nVK_HIDE_SYMBOL PFN_vkCreateDevice vkCreateDevice;\nVK_HIDE_SYMBOL PFN_vkDestroyInstance vkDestroyInstance;\nVK_HIDE_SYMBOL PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties;\nVK_HIDE_SYMBOL PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties;\nVK_HIDE_SYMBOL PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices;\nVK_HIDE_SYMBOL PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;\nVK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures;\nVK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties;\nVK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties;\nVK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;\nVK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;\nVK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties;\nVK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties;\n#if !defined(__APPLE__)\nVK_HIDE_SYMBOL VK_GET_DEVICE_PRES_FN_TYPE VK_GET_DEVICE_PRES_FN;\n#endif\nVK_HIDE_SYMBOL VK_CREATE_SURFACE_FN_TYPE VK_CREATE_SURFACE_FN;\nVK_HIDE_SYMBOL PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR;\nVK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR;\nVK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR;\nVK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR;\nVK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR;\nVK_HIDE_SYMBOL PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT;\nVK_HIDE_SYMBOL PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT;\nVK_HIDE_SYMBOL PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT;\n\nVK_HIDE_SYMBOL PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;\nVK_HIDE_SYMBOL PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets;\nVK_HIDE_SYMBOL PFN_vkAllocateMemory vkAllocateMemory;\nVK_HIDE_SYMBOL PFN_vkBeginCommandBuffer vkBeginCommandBuffer;\nVK_HIDE_SYMBOL PFN_vkBindBufferMemory vkBindBufferMemory;\nVK_HIDE_SYMBOL PFN_vkBindImageMemory vkBindImageMemory;\nVK_HIDE_SYMBOL PFN_vkCmdBeginQuery vkCmdBeginQuery;\nVK_HIDE_SYMBOL PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;\nVK_HIDE_SYMBOL PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets;\nVK_HIDE_SYMBOL PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer;\nVK_HIDE_SYMBOL PFN_vkCmdBindPipeline vkCmdBindPipeline;\nVK_HIDE_SYMBOL PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers;\nVK_HIDE_SYMBOL PFN_vkCmdBlitImage vkCmdBlitImage;\nVK_HIDE_SYMBOL PFN_vkCmdClearAttachments vkCmdClearAttachments;\nVK_HIDE_SYMBOL PFN_vkCmdClearColorImage vkCmdClearColorImage;\nVK_HIDE_SYMBOL PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage;\nVK_HIDE_SYMBOL PFN_vkCmdCopyBuffer vkCmdCopyBuffer;\nVK_HIDE_SYMBOL PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage;\nVK_HIDE_SYMBOL PFN_vkCmdCopyImage vkCmdCopyImage;\nVK_HIDE_SYMBOL PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;\nVK_HIDE_SYMBOL PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults;\nVK_HIDE_SYMBOL PFN_vkCmdDispatch vkCmdDispatch;\nVK_HIDE_SYMBOL PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect;\nVK_HIDE_SYMBOL PFN_vkCmdDraw vkCmdDraw;\nVK_HIDE_SYMBOL PFN_vkCmdDrawIndexed vkCmdDrawIndexed;\nVK_HIDE_SYMBOL PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect;\nVK_HIDE_SYMBOL PFN_vkCmdDrawIndirect vkCmdDrawIndirect;\nVK_HIDE_SYMBOL PFN_vkCmdEndQuery vkCmdEndQuery;\nVK_HIDE_SYMBOL PFN_vkCmdEndRenderPass vkCmdEndRenderPass;\nVK_HIDE_SYMBOL PFN_vkCmdExecuteCommands vkCmdExecuteCommands;\nVK_HIDE_SYMBOL PFN_vkCmdFillBuffer vkCmdFillBuffer;\nVK_HIDE_SYMBOL PFN_vkCmdNextSubpass vkCmdNextSubpass;\nVK_HIDE_SYMBOL PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;\nVK_HIDE_SYMBOL PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2;\nVK_HIDE_SYMBOL PFN_vkCmdPushConstants vkCmdPushConstants;\nVK_HIDE_SYMBOL PFN_vkCmdResetEvent vkCmdResetEvent;\nVK_HIDE_SYMBOL PFN_vkCmdResetQueryPool vkCmdResetQueryPool;\nVK_HIDE_SYMBOL PFN_vkCmdResolveImage vkCmdResolveImage;\nVK_HIDE_SYMBOL PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;\nVK_HIDE_SYMBOL PFN_vkCmdSetDepthBias vkCmdSetDepthBias;\nVK_HIDE_SYMBOL PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;\nVK_HIDE_SYMBOL PFN_vkCmdSetEvent vkCmdSetEvent;\nVK_HIDE_SYMBOL PFN_vkCmdSetLineWidth vkCmdSetLineWidth;\nVK_HIDE_SYMBOL PFN_vkCmdSetScissor vkCmdSetScissor;\nVK_HIDE_SYMBOL PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;\nVK_HIDE_SYMBOL PFN_vkCmdSetStencilReference vkCmdSetStencilReference;\nVK_HIDE_SYMBOL PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;\nVK_HIDE_SYMBOL PFN_vkCmdSetViewport vkCmdSetViewport;\nVK_HIDE_SYMBOL PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer;\nVK_HIDE_SYMBOL PFN_vkCmdWaitEvents vkCmdWaitEvents;\nVK_HIDE_SYMBOL PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp;\nVK_HIDE_SYMBOL PFN_vkCreateBuffer vkCreateBuffer;\nVK_HIDE_SYMBOL PFN_vkCreateBufferView vkCreateBufferView;\nVK_HIDE_SYMBOL PFN_vkCreateCommandPool vkCreateCommandPool;\nVK_HIDE_SYMBOL PFN_vkCreateComputePipelines vkCreateComputePipelines;\nVK_HIDE_SYMBOL PFN_vkCreateDescriptorPool vkCreateDescriptorPool;\nVK_HIDE_SYMBOL PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;\nVK_HIDE_SYMBOL PFN_vkCreateEvent vkCreateEvent;\nVK_HIDE_SYMBOL PFN_vkCreateFence vkCreateFence;\nVK_HIDE_SYMBOL PFN_vkCreateFramebuffer vkCreateFramebuffer;\nVK_HIDE_SYMBOL PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;\nVK_HIDE_SYMBOL PFN_vkCreateImage vkCreateImage;\nVK_HIDE_SYMBOL PFN_vkCreateImageView vkCreateImageView;\nVK_HIDE_SYMBOL PFN_vkCreatePipelineCache vkCreatePipelineCache;\nVK_HIDE_SYMBOL PFN_vkCreatePipelineLayout vkCreatePipelineLayout;\nVK_HIDE_SYMBOL PFN_vkCreateQueryPool vkCreateQueryPool;\nVK_HIDE_SYMBOL PFN_vkCreateRenderPass vkCreateRenderPass;\nVK_HIDE_SYMBOL PFN_vkCreateSampler vkCreateSampler;\nVK_HIDE_SYMBOL PFN_vkCreateSemaphore vkCreateSemaphore;\nVK_HIDE_SYMBOL PFN_vkCreateShaderModule vkCreateShaderModule;\nVK_HIDE_SYMBOL PFN_vkDestroyBuffer vkDestroyBuffer;\nVK_HIDE_SYMBOL PFN_vkDestroyBufferView vkDestroyBufferView;\nVK_HIDE_SYMBOL PFN_vkDestroyCommandPool vkDestroyCommandPool;\nVK_HIDE_SYMBOL PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;\nVK_HIDE_SYMBOL PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;\nVK_HIDE_SYMBOL PFN_vkDestroyDevice vkDestroyDevice;\nVK_HIDE_SYMBOL PFN_vkDestroyEvent vkDestroyEvent;\nVK_HIDE_SYMBOL PFN_vkDestroyFence vkDestroyFence;\nVK_HIDE_SYMBOL PFN_vkDestroyFramebuffer vkDestroyFramebuffer;\nVK_HIDE_SYMBOL PFN_vkDestroyImage vkDestroyImage;\nVK_HIDE_SYMBOL PFN_vkDestroyImageView vkDestroyImageView;\nVK_HIDE_SYMBOL PFN_vkDestroyPipeline vkDestroyPipeline;\nVK_HIDE_SYMBOL PFN_vkDestroyPipelineCache vkDestroyPipelineCache;\nVK_HIDE_SYMBOL PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout;\nVK_HIDE_SYMBOL PFN_vkDestroyQueryPool vkDestroyQueryPool;\nVK_HIDE_SYMBOL PFN_vkDestroyRenderPass vkDestroyRenderPass;\nVK_HIDE_SYMBOL PFN_vkDestroySampler vkDestroySampler;\nVK_HIDE_SYMBOL PFN_vkDestroySemaphore vkDestroySemaphore;\nVK_HIDE_SYMBOL PFN_vkDestroyShaderModule vkDestroyShaderModule;\nVK_HIDE_SYMBOL PFN_vkDeviceWaitIdle vkDeviceWaitIdle;\nVK_HIDE_SYMBOL PFN_vkEndCommandBuffer vkEndCommandBuffer;\nVK_HIDE_SYMBOL PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties;\nVK_HIDE_SYMBOL PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;\nVK_HIDE_SYMBOL PFN_vkFreeCommandBuffers vkFreeCommandBuffers;\nVK_HIDE_SYMBOL PFN_vkFreeDescriptorSets vkFreeDescriptorSets;\nVK_HIDE_SYMBOL PFN_vkFreeMemory vkFreeMemory;\nVK_HIDE_SYMBOL PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;\nVK_HIDE_SYMBOL PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment;\nVK_HIDE_SYMBOL PFN_vkGetDeviceQueue vkGetDeviceQueue;\nVK_HIDE_SYMBOL PFN_vkGetEventStatus vkGetEventStatus;\nVK_HIDE_SYMBOL PFN_vkGetFenceStatus vkGetFenceStatus;\nVK_HIDE_SYMBOL PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;\nVK_HIDE_SYMBOL PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements;\nVK_HIDE_SYMBOL PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout;\nVK_HIDE_SYMBOL PFN_vkGetPipelineCacheData vkGetPipelineCacheData;\nVK_HIDE_SYMBOL PFN_vkGetQueryPoolResults vkGetQueryPoolResults;\nVK_HIDE_SYMBOL PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity;\nVK_HIDE_SYMBOL PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;\nVK_HIDE_SYMBOL PFN_vkMapMemory vkMapMemory;\nVK_HIDE_SYMBOL PFN_vkMergePipelineCaches vkMergePipelineCaches;\nVK_HIDE_SYMBOL PFN_vkQueueBindSparse vkQueueBindSparse;\nVK_HIDE_SYMBOL PFN_vkQueueSubmit vkQueueSubmit;\nVK_HIDE_SYMBOL PFN_vkQueueWaitIdle vkQueueWaitIdle;\nVK_HIDE_SYMBOL PFN_vkResetCommandBuffer vkResetCommandBuffer;\nVK_HIDE_SYMBOL PFN_vkResetCommandPool vkResetCommandPool;\nVK_HIDE_SYMBOL PFN_vkResetDescriptorPool vkResetDescriptorPool;\nVK_HIDE_SYMBOL PFN_vkResetEvent vkResetEvent;\nVK_HIDE_SYMBOL PFN_vkResetFences vkResetFences;\nVK_HIDE_SYMBOL PFN_vkSetEvent vkSetEvent;\nVK_HIDE_SYMBOL PFN_vkUnmapMemory vkUnmapMemory;\nVK_HIDE_SYMBOL PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;\nVK_HIDE_SYMBOL PFN_vkWaitForFences vkWaitForFences;\nVK_HIDE_SYMBOL PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;\nVK_HIDE_SYMBOL PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;\nVK_HIDE_SYMBOL PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;\nVK_HIDE_SYMBOL PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;\nVK_HIDE_SYMBOL PFN_vkQueuePresentKHR vkQueuePresentKHR;\nVK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR;\nVK_HIDE_SYMBOL PFN_vkDestroyDebugUtilsMessengerEXT    vkDestroyDebugUtilsMessengerEXT;\n\n\n\nbool vkl_init_loader(void) {\n  ngfi_module_handle vkdll = LoadLibraryA(VK_LOADER_LIB);\n\n  if (!vkdll) { return false; }\n\n  vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)GetProcAddress(vkdll, \"vkGetInstanceProcAddr\");\n  vkCreateInstance =\n      (PFN_vkCreateInstance)vkGetInstanceProcAddr(VK_NULL_HANDLE, \"vkCreateInstance\");\n  vkEnumerateInstanceLayerProperties = (PFN_vkEnumerateInstanceLayerProperties)\n      vkGetInstanceProcAddr(VK_NULL_HANDLE, \"vkEnumerateInstanceLayerProperties\");\n  vkEnumerateInstanceVersion = (PFN_vkEnumerateInstanceVersion)\n      vkGetInstanceProcAddr(VK_NULL_HANDLE, \"vkEnumerateInstanceVersion\");\n\n  vkEnumerateInstanceExtensionProperties = (PFN_vkEnumerateInstanceExtensionProperties)\n      vkGetInstanceProcAddr(VK_NULL_HANDLE, \"vkEnumerateInstanceExtensionProperties\");\n  return true;\n}\n#if !defined(__APPLE__)\nextern VK_GET_DEVICE_PRES_FN_TYPE VK_GET_DEVICE_PRES_FN;\n#endif\nextern VK_CREATE_SURFACE_FN_TYPE VK_CREATE_SURFACE_FN;\nvoid vkl_init_instance(VkInstance inst) {\n  vkCreateDevice = (PFN_vkCreateDevice)vkGetInstanceProcAddr(inst, \"vkCreateDevice\");\n  vkDestroyInstance = (PFN_vkDestroyInstance)vkGetInstanceProcAddr(inst, \"vkDestroyInstance\");\n  vkEnumerateDeviceExtensionProperties = (PFN_vkEnumerateDeviceExtensionProperties)vkGetInstanceProcAddr(inst, \"vkEnumerateDeviceExtensionProperties\");\n  vkEnumerateDeviceLayerProperties = (PFN_vkEnumerateDeviceLayerProperties)vkGetInstanceProcAddr(inst, \"vkEnumerateDeviceLayerProperties\");\n  vkEnumeratePhysicalDevices = (PFN_vkEnumeratePhysicalDevices)vkGetInstanceProcAddr(inst, \"vkEnumeratePhysicalDevices\");\n  vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetInstanceProcAddr(inst, \"vkGetDeviceProcAddr\");\n  vkGetPhysicalDeviceFeatures = (PFN_vkGetPhysicalDeviceFeatures)vkGetInstanceProcAddr(inst, \"vkGetPhysicalDeviceFeatures\");\n  vkGetPhysicalDeviceFormatProperties = (PFN_vkGetPhysicalDeviceFormatProperties)vkGetInstanceProcAddr(inst, \"vkGetPhysicalDeviceFormatProperties\");\n  vkGetPhysicalDeviceImageFormatProperties = (PFN_vkGetPhysicalDeviceImageFormatProperties)vkGetInstanceProcAddr(inst, \"vkGetPhysicalDeviceImageFormatProperties\");\n  vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetInstanceProcAddr(inst, \"vkGetPhysicalDeviceMemoryProperties\");\n  vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetInstanceProcAddr(inst, \"vkGetPhysicalDeviceProperties\");\n  vkGetPhysicalDeviceQueueFamilyProperties = (PFN_vkGetPhysicalDeviceQueueFamilyProperties)vkGetInstanceProcAddr(inst, \"vkGetPhysicalDeviceQueueFamilyProperties\");\n  vkGetPhysicalDeviceSparseImageFormatProperties = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties)vkGetInstanceProcAddr(inst, \"vkGetPhysicalDeviceSparseImageFormatProperties\");\n#if !defined(__APPLE__)\n  VK_GET_DEVICE_PRES_FN = (VK_GET_DEVICE_PRES_FN_TYPE)vkGetInstanceProcAddr(inst, STRINGIFY(VK_GET_DEVICE_PRES_FN));\n#endif\n  VK_CREATE_SURFACE_FN = (VK_CREATE_SURFACE_FN_TYPE)vkGetInstanceProcAddr(inst, STRINGIFY(VK_CREATE_SURFACE_FN));\n  vkDestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)vkGetInstanceProcAddr(inst, \"vkDestroySurfaceKHR\");\n  vkGetPhysicalDeviceSurfaceSupportKHR =\n    (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)vkGetInstanceProcAddr(\n      inst,\n      \"vkGetPhysicalDeviceSurfaceSupportKHR\");\n  vkCreateDebugUtilsMessengerEXT =\n   (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(inst, \"vkCreateDebugUtilsMessengerEXT\");\n  vkGetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)vkGetInstanceProcAddr(inst, \"vkGetPhysicalDeviceSurfacePresentModesKHR\");\n  vkGetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)vkGetInstanceProcAddr(inst, \"vkGetPhysicalDeviceSurfaceFormatsKHR\");\n  vkGetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)vkGetInstanceProcAddr(inst, \"vkGetPhysicalDeviceSurfaceCapabilitiesKHR\");\n  vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(inst, \"vkGetPhysicalDeviceFeatures2KHR\");\n  vkDestroyDebugUtilsMessengerEXT = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(\n      inst,\n      \"vkDestroyDebugUtilsMessengerEXT\");\n  vkCmdBeginDebugUtilsLabelEXT =\n      (PFN_vkCmdBeginDebugUtilsLabelEXT)vkGetInstanceProcAddr(inst, \"vkCmdBeginDebugUtilsLabelEXT\");\n  vkCmdEndDebugUtilsLabelEXT =\n      (PFN_vkCmdEndDebugUtilsLabelEXT)vkGetInstanceProcAddr(inst, \"vkCmdEndDebugUtilsLabelEXT\");\n}\n\nvoid vkl_init_device(VkDevice dev, bool sync2_supported) {\n  vkAllocateCommandBuffers =\n      (PFN_vkAllocateCommandBuffers)vkGetDeviceProcAddr(dev, \"vkAllocateCommandBuffers\");\n  vkAllocateDescriptorSets =\n      (PFN_vkAllocateDescriptorSets)vkGetDeviceProcAddr(dev, \"vkAllocateDescriptorSets\");\n  vkAllocateMemory     = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(dev, \"vkAllocateMemory\");\n  vkBeginCommandBuffer = (PFN_vkBeginCommandBuffer)vkGetDeviceProcAddr(dev, \"vkBeginCommandBuffer\");\n  vkBindBufferMemory   = (PFN_vkBindBufferMemory)vkGetDeviceProcAddr(dev, \"vkBindBufferMemory\");\n  vkBindImageMemory    = (PFN_vkBindImageMemory)vkGetDeviceProcAddr(dev, \"vkBindImageMemory\");\n  vkCmdBeginQuery      = (PFN_vkCmdBeginQuery)vkGetDeviceProcAddr(dev, \"vkCmdBeginQuery\");\n  vkCmdBeginRenderPass = (PFN_vkCmdBeginRenderPass)vkGetDeviceProcAddr(dev, \"vkCmdBeginRenderPass\");\n  vkCmdBindDescriptorSets =\n      (PFN_vkCmdBindDescriptorSets)vkGetDeviceProcAddr(dev, \"vkCmdBindDescriptorSets\");\n  vkCmdBindIndexBuffer = (PFN_vkCmdBindIndexBuffer)vkGetDeviceProcAddr(dev, \"vkCmdBindIndexBuffer\");\n  vkCmdBindPipeline    = (PFN_vkCmdBindPipeline)vkGetDeviceProcAddr(dev, \"vkCmdBindPipeline\");\n  vkCmdBindVertexBuffers =\n      (PFN_vkCmdBindVertexBuffers)vkGetDeviceProcAddr(dev, \"vkCmdBindVertexBuffers\");\n  vkCmdBlitImage = (PFN_vkCmdBlitImage)vkGetDeviceProcAddr(dev, \"vkCmdBlitImage\");\n  vkCmdClearAttachments =\n      (PFN_vkCmdClearAttachments)vkGetDeviceProcAddr(dev, \"vkCmdClearAttachments\");\n  vkCmdClearColorImage = (PFN_vkCmdClearColorImage)vkGetDeviceProcAddr(dev, \"vkCmdClearColorImage\");\n  vkCmdClearDepthStencilImage =\n      (PFN_vkCmdClearDepthStencilImage)vkGetDeviceProcAddr(dev, \"vkCmdClearDepthStencilImage\");\n  vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkGetDeviceProcAddr(dev, \"vkCmdCopyBuffer\");\n  vkCmdCopyBufferToImage =\n      (PFN_vkCmdCopyBufferToImage)vkGetDeviceProcAddr(dev, \"vkCmdCopyBufferToImage\");\n  vkCmdCopyImage = (PFN_vkCmdCopyImage)vkGetDeviceProcAddr(dev, \"vkCmdCopyImage\");\n  vkCmdCopyImageToBuffer =\n      (PFN_vkCmdCopyImageToBuffer)vkGetDeviceProcAddr(dev, \"vkCmdCopyImageToBuffer\");\n  vkCmdCopyQueryPoolResults =\n      (PFN_vkCmdCopyQueryPoolResults)vkGetDeviceProcAddr(dev, \"vkCmdCopyQueryPoolResults\");\n  vkCmdDispatch = (PFN_vkCmdDispatch)vkGetDeviceProcAddr(dev, \"vkCmdDispatch\");\n  vkCmdDispatchIndirect =\n      (PFN_vkCmdDispatchIndirect)vkGetDeviceProcAddr(dev, \"vkCmdDispatchIndirect\");\n  vkCmdDraw        = (PFN_vkCmdDraw)vkGetDeviceProcAddr(dev, \"vkCmdDraw\");\n  vkCmdDrawIndexed = (PFN_vkCmdDrawIndexed)vkGetDeviceProcAddr(dev, \"vkCmdDrawIndexed\");\n  vkCmdDrawIndexedIndirect =\n      (PFN_vkCmdDrawIndexedIndirect)vkGetDeviceProcAddr(dev, \"vkCmdDrawIndexedIndirect\");\n  vkCmdDrawIndirect    = (PFN_vkCmdDrawIndirect)vkGetDeviceProcAddr(dev, \"vkCmdDrawIndirect\");\n  vkCmdEndQuery        = (PFN_vkCmdEndQuery)vkGetDeviceProcAddr(dev, \"vkCmdEndQuery\");\n  vkCmdEndRenderPass   = (PFN_vkCmdEndRenderPass)vkGetDeviceProcAddr(dev, \"vkCmdEndRenderPass\");\n  vkCmdExecuteCommands = (PFN_vkCmdExecuteCommands)vkGetDeviceProcAddr(dev, \"vkCmdExecuteCommands\");\n  vkCmdFillBuffer      = (PFN_vkCmdFillBuffer)vkGetDeviceProcAddr(dev, \"vkCmdFillBuffer\");\n  vkCmdNextSubpass     = (PFN_vkCmdNextSubpass)vkGetDeviceProcAddr(dev, \"vkCmdNextSubpass\");\n  vkCmdPipelineBarrier = (PFN_vkCmdPipelineBarrier)vkGetDeviceProcAddr(dev, \"vkCmdPipelineBarrier\");\n  vkCmdPushConstants   = (PFN_vkCmdPushConstants)vkGetDeviceProcAddr(dev, \"vkCmdPushConstants\");\n  vkCmdResetEvent      = (PFN_vkCmdResetEvent)vkGetDeviceProcAddr(dev, \"vkCmdResetEvent\");\n  vkCmdResetQueryPool  = (PFN_vkCmdResetQueryPool)vkGetDeviceProcAddr(dev, \"vkCmdResetQueryPool\");\n  vkCmdResolveImage    = (PFN_vkCmdResolveImage)vkGetDeviceProcAddr(dev, \"vkCmdResolveImage\");\n  vkCmdSetBlendConstants =\n      (PFN_vkCmdSetBlendConstants)vkGetDeviceProcAddr(dev, \"vkCmdSetBlendConstants\");\n  vkCmdSetDepthBias   = (PFN_vkCmdSetDepthBias)vkGetDeviceProcAddr(dev, \"vkCmdSetDepthBias\");\n  vkCmdSetDepthBounds = (PFN_vkCmdSetDepthBounds)vkGetDeviceProcAddr(dev, \"vkCmdSetDepthBounds\");\n  vkCmdSetEvent       = (PFN_vkCmdSetEvent)vkGetDeviceProcAddr(dev, \"vkCmdSetEvent\");\n  vkCmdSetLineWidth   = (PFN_vkCmdSetLineWidth)vkGetDeviceProcAddr(dev, \"vkCmdSetLineWidth\");\n  vkCmdSetScissor     = (PFN_vkCmdSetScissor)vkGetDeviceProcAddr(dev, \"vkCmdSetScissor\");\n  vkCmdSetStencilCompareMask =\n      (PFN_vkCmdSetStencilCompareMask)vkGetDeviceProcAddr(dev, \"vkCmdSetStencilCompareMask\");\n  vkCmdSetStencilReference =\n      (PFN_vkCmdSetStencilReference)vkGetDeviceProcAddr(dev, \"vkCmdSetStencilReference\");\n  vkCmdSetStencilWriteMask =\n      (PFN_vkCmdSetStencilWriteMask)vkGetDeviceProcAddr(dev, \"vkCmdSetStencilWriteMask\");\n  vkCmdSetViewport    = (PFN_vkCmdSetViewport)vkGetDeviceProcAddr(dev, \"vkCmdSetViewport\");\n  vkCmdUpdateBuffer   = (PFN_vkCmdUpdateBuffer)vkGetDeviceProcAddr(dev, \"vkCmdUpdateBuffer\");\n  vkCmdWaitEvents     = (PFN_vkCmdWaitEvents)vkGetDeviceProcAddr(dev, \"vkCmdWaitEvents\");\n  vkCmdWriteTimestamp = (PFN_vkCmdWriteTimestamp)vkGetDeviceProcAddr(dev, \"vkCmdWriteTimestamp\");\n  vkCreateBuffer      = (PFN_vkCreateBuffer)vkGetDeviceProcAddr(dev, \"vkCreateBuffer\");\n  vkCreateBufferView  = (PFN_vkCreateBufferView)vkGetDeviceProcAddr(dev, \"vkCreateBufferView\");\n  vkCreateCommandPool = (PFN_vkCreateCommandPool)vkGetDeviceProcAddr(dev, \"vkCreateCommandPool\");\n  vkCreateComputePipelines =\n      (PFN_vkCreateComputePipelines)vkGetDeviceProcAddr(dev, \"vkCreateComputePipelines\");\n  vkCreateDescriptorPool =\n      (PFN_vkCreateDescriptorPool)vkGetDeviceProcAddr(dev, \"vkCreateDescriptorPool\");\n  vkCreateDescriptorSetLayout =\n      (PFN_vkCreateDescriptorSetLayout)vkGetDeviceProcAddr(dev, \"vkCreateDescriptorSetLayout\");\n  vkCreateEvent       = (PFN_vkCreateEvent)vkGetDeviceProcAddr(dev, \"vkCreateEvent\");\n  vkCreateFence       = (PFN_vkCreateFence)vkGetDeviceProcAddr(dev, \"vkCreateFence\");\n  vkCreateFramebuffer = (PFN_vkCreateFramebuffer)vkGetDeviceProcAddr(dev, \"vkCreateFramebuffer\");\n  vkCreateGraphicsPipelines =\n      (PFN_vkCreateGraphicsPipelines)vkGetDeviceProcAddr(dev, \"vkCreateGraphicsPipelines\");\n  vkCreateImage     = (PFN_vkCreateImage)vkGetDeviceProcAddr(dev, \"vkCreateImage\");\n  vkCreateImageView = (PFN_vkCreateImageView)vkGetDeviceProcAddr(dev, \"vkCreateImageView\");\n  vkCreatePipelineCache =\n      (PFN_vkCreatePipelineCache)vkGetDeviceProcAddr(dev, \"vkCreatePipelineCache\");\n  vkCreatePipelineLayout =\n      (PFN_vkCreatePipelineLayout)vkGetDeviceProcAddr(dev, \"vkCreatePipelineLayout\");\n  vkCreateQueryPool    = (PFN_vkCreateQueryPool)vkGetDeviceProcAddr(dev, \"vkCreateQueryPool\");\n  vkCreateRenderPass   = (PFN_vkCreateRenderPass)vkGetDeviceProcAddr(dev, \"vkCreateRenderPass\");\n  vkCreateSampler      = (PFN_vkCreateSampler)vkGetDeviceProcAddr(dev, \"vkCreateSampler\");\n  vkCreateSemaphore    = (PFN_vkCreateSemaphore)vkGetDeviceProcAddr(dev, \"vkCreateSemaphore\");\n  vkCreateShaderModule = (PFN_vkCreateShaderModule)vkGetDeviceProcAddr(dev, \"vkCreateShaderModule\");\n  vkDestroyBuffer      = (PFN_vkDestroyBuffer)vkGetDeviceProcAddr(dev, \"vkDestroyBuffer\");\n  vkDestroyBufferView  = (PFN_vkDestroyBufferView)vkGetDeviceProcAddr(dev, \"vkDestroyBufferView\");\n  vkDestroyCommandPool = (PFN_vkDestroyCommandPool)vkGetDeviceProcAddr(dev, \"vkDestroyCommandPool\");\n  vkDestroyDescriptorPool =\n      (PFN_vkDestroyDescriptorPool)vkGetDeviceProcAddr(dev, \"vkDestroyDescriptorPool\");\n  vkDestroyDescriptorSetLayout =\n      (PFN_vkDestroyDescriptorSetLayout)vkGetDeviceProcAddr(dev, \"vkDestroyDescriptorSetLayout\");\n  vkDestroyDevice      = (PFN_vkDestroyDevice)vkGetDeviceProcAddr(dev, \"vkDestroyDevice\");\n  vkDestroyEvent       = (PFN_vkDestroyEvent)vkGetDeviceProcAddr(dev, \"vkDestroyEvent\");\n  vkDestroyFence       = (PFN_vkDestroyFence)vkGetDeviceProcAddr(dev, \"vkDestroyFence\");\n  vkDestroyFramebuffer = (PFN_vkDestroyFramebuffer)vkGetDeviceProcAddr(dev, \"vkDestroyFramebuffer\");\n  vkDestroyImage       = (PFN_vkDestroyImage)vkGetDeviceProcAddr(dev, \"vkDestroyImage\");\n  vkDestroyImageView   = (PFN_vkDestroyImageView)vkGetDeviceProcAddr(dev, \"vkDestroyImageView\");\n  vkDestroyPipeline    = (PFN_vkDestroyPipeline)vkGetDeviceProcAddr(dev, \"vkDestroyPipeline\");\n  vkDestroyPipelineCache =\n      (PFN_vkDestroyPipelineCache)vkGetDeviceProcAddr(dev, \"vkDestroyPipelineCache\");\n  vkDestroyPipelineLayout =\n      (PFN_vkDestroyPipelineLayout)vkGetDeviceProcAddr(dev, \"vkDestroyPipelineLayout\");\n  vkDestroyQueryPool  = (PFN_vkDestroyQueryPool)vkGetDeviceProcAddr(dev, \"vkDestroyQueryPool\");\n  vkDestroyRenderPass = (PFN_vkDestroyRenderPass)vkGetDeviceProcAddr(dev, \"vkDestroyRenderPass\");\n  vkDestroySampler    = (PFN_vkDestroySampler)vkGetDeviceProcAddr(dev, \"vkDestroySampler\");\n  vkDestroySemaphore  = (PFN_vkDestroySemaphore)vkGetDeviceProcAddr(dev, \"vkDestroySemaphore\");\n  vkDestroyShaderModule =\n      (PFN_vkDestroyShaderModule)vkGetDeviceProcAddr(dev, \"vkDestroyShaderModule\");\n  vkDeviceWaitIdle   = (PFN_vkDeviceWaitIdle)vkGetDeviceProcAddr(dev, \"vkDeviceWaitIdle\");\n  vkEndCommandBuffer = (PFN_vkEndCommandBuffer)vkGetDeviceProcAddr(dev, \"vkEndCommandBuffer\");\n  vkFlushMappedMemoryRanges =\n      (PFN_vkFlushMappedMemoryRanges)vkGetDeviceProcAddr(dev, \"vkFlushMappedMemoryRanges\");\n  vkFreeCommandBuffers = (PFN_vkFreeCommandBuffers)vkGetDeviceProcAddr(dev, \"vkFreeCommandBuffers\");\n  vkFreeDescriptorSets = (PFN_vkFreeDescriptorSets)vkGetDeviceProcAddr(dev, \"vkFreeDescriptorSets\");\n  vkFreeMemory         = (PFN_vkFreeMemory)vkGetDeviceProcAddr(dev, \"vkFreeMemory\");\n  vkGetBufferMemoryRequirements =\n      (PFN_vkGetBufferMemoryRequirements)vkGetDeviceProcAddr(dev, \"vkGetBufferMemoryRequirements\");\n  vkGetDeviceMemoryCommitment =\n      (PFN_vkGetDeviceMemoryCommitment)vkGetDeviceProcAddr(dev, \"vkGetDeviceMemoryCommitment\");\n  vkGetDeviceQueue = (PFN_vkGetDeviceQueue)vkGetDeviceProcAddr(dev, \"vkGetDeviceQueue\");\n  vkGetEventStatus = (PFN_vkGetEventStatus)vkGetDeviceProcAddr(dev, \"vkGetEventStatus\");\n  vkGetFenceStatus = (PFN_vkGetFenceStatus)vkGetDeviceProcAddr(dev, \"vkGetFenceStatus\");\n  vkGetImageMemoryRequirements =\n      (PFN_vkGetImageMemoryRequirements)vkGetDeviceProcAddr(dev, \"vkGetImageMemoryRequirements\");\n  vkGetImageSparseMemoryRequirements = (PFN_vkGetImageSparseMemoryRequirements)vkGetDeviceProcAddr(\n      dev,\n      \"vkGetImageSparseMemoryRequirements\");\n  vkGetImageSubresourceLayout =\n      (PFN_vkGetImageSubresourceLayout)vkGetDeviceProcAddr(dev, \"vkGetImageSubresourceLayout\");\n  vkGetPipelineCacheData =\n      (PFN_vkGetPipelineCacheData)vkGetDeviceProcAddr(dev, \"vkGetPipelineCacheData\");\n  vkGetQueryPoolResults =\n      (PFN_vkGetQueryPoolResults)vkGetDeviceProcAddr(dev, \"vkGetQueryPoolResults\");\n  vkGetRenderAreaGranularity =\n      (PFN_vkGetRenderAreaGranularity)vkGetDeviceProcAddr(dev, \"vkGetRenderAreaGranularity\");\n  vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkGetDeviceProcAddr(\n      dev,\n      \"vkInvalidateMappedMemoryRanges\");\n  vkMapMemory = (PFN_vkMapMemory)vkGetDeviceProcAddr(dev, \"vkMapMemory\");\n  vkMergePipelineCaches =\n      (PFN_vkMergePipelineCaches)vkGetDeviceProcAddr(dev, \"vkMergePipelineCaches\");\n  vkQueueBindSparse    = (PFN_vkQueueBindSparse)vkGetDeviceProcAddr(dev, \"vkQueueBindSparse\");\n  vkQueueSubmit        = (PFN_vkQueueSubmit)vkGetDeviceProcAddr(dev, \"vkQueueSubmit\");\n  vkQueueWaitIdle      = (PFN_vkQueueWaitIdle)vkGetDeviceProcAddr(dev, \"vkQueueWaitIdle\");\n  vkResetCommandBuffer = (PFN_vkResetCommandBuffer)vkGetDeviceProcAddr(dev, \"vkResetCommandBuffer\");\n  vkResetCommandPool   = (PFN_vkResetCommandPool)vkGetDeviceProcAddr(dev, \"vkResetCommandPool\");\n  vkResetDescriptorPool =\n      (PFN_vkResetDescriptorPool)vkGetDeviceProcAddr(dev, \"vkResetDescriptorPool\");\n  vkResetEvent  = (PFN_vkResetEvent)vkGetDeviceProcAddr(dev, \"vkResetEvent\");\n  vkResetFences = (PFN_vkResetFences)vkGetDeviceProcAddr(dev, \"vkResetFences\");\n  vkSetEvent    = (PFN_vkSetEvent)vkGetDeviceProcAddr(dev, \"vkSetEvent\");\n  vkUnmapMemory = (PFN_vkUnmapMemory)vkGetDeviceProcAddr(dev, \"vkUnmapMemory\");\n  vkUpdateDescriptorSets =\n      (PFN_vkUpdateDescriptorSets)vkGetDeviceProcAddr(dev, \"vkUpdateDescriptorSets\");\n  vkWaitForFences      = (PFN_vkWaitForFences)vkGetDeviceProcAddr(dev, \"vkWaitForFences\");\n  vkCreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)vkGetDeviceProcAddr(dev, \"vkCreateSwapchainKHR\");\n  vkDestroySwapchainKHR =\n      (PFN_vkDestroySwapchainKHR)vkGetDeviceProcAddr(dev, \"vkDestroySwapchainKHR\");\n  vkGetSwapchainImagesKHR =\n      (PFN_vkGetSwapchainImagesKHR)vkGetDeviceProcAddr(dev, \"vkGetSwapchainImagesKHR\");\n  vkAcquireNextImageKHR =\n      (PFN_vkAcquireNextImageKHR)vkGetDeviceProcAddr(dev, \"vkAcquireNextImageKHR\");\n  vkQueuePresentKHR = (PFN_vkQueuePresentKHR)vkGetDeviceProcAddr(dev, \"vkQueuePresentKHR\");\n  if (sync2_supported) {\n    vkCmdPipelineBarrier2 = (PFN_vkCmdPipelineBarrier2)vkGetDeviceProcAddr(dev, \"vkCmdPipelineBarrier2KHR\");\n  }\n}\n"
  },
  {
    "path": "source/ngf-vk/vk_10.h",
    "content": "#pragma once\n\n#if defined(_WIN32)||defined(_WIN64)\n  #define   VK_GET_DEVICE_PRES_FN       vkGetPhysicalDeviceWin32PresentationSupportKHR\n  #define   VK_GET_DEVICE_PRES_FN_TYPE  PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR\n  #define   VK_SURFACE_EXT              \"VK_KHR_win32_surface\"\n  #define   VK_CREATE_SURFACE_FN        vkCreateWin32SurfaceKHR\n  #define   VK_CREATE_SURFACE_FN_TYPE   PFN_vkCreateWin32SurfaceKHR\n  #define   VK_USE_PLATFORM_WIN32_KHR\n  #define   WIN32_LEAN_AND_MEAN\n  #include <windows.h>\n#elif defined(__ANDROID__)\n  #define   VK_GET_DEVICE_PRES_FN       vkGetPhysicalDeviceAndroidPresentationSupportKHR\n  #define   VK_GET_DEVICE_PRES_FN_TYPE  PFN_vkGetPhysicalDeviceAndroidPresentationSupportKHR\n  #define   VK_SURFACE_EXT              \"VK_KHR_android_surface\"\n  #define   VK_CREATE_SURFACE_FN        vkCreateAndroidSurfaceKHR\n  #define   VK_CREATE_SURFACE_FN_TYPE   PFN_vkCreateAndroidSurfaceKHR\n  #define   VK_USE_PLATFORM_ANDROID_KHR\n#elif defined(__APPLE__)\n#include <dlfcn.h>\n#define     VK_SURFACE_EXT              \"VK_EXT_metal_surface\"\n#define     VK_CREATE_SURFACE_FN        vkCreateMetalSurfaceEXT\n#define     VK_CREATE_SURFACE_FN_TYPE   PFN_vkCreateMetalSurfaceEXT\n#define     VK_USE_PLATFORM_METAL_EXT\n#else\n  #include <xcb/xcb.h>\n  #include <dlfcn.h>\n  #include <X11/Xlib-xcb.h>\n  #define   VK_GET_DEVICE_PRES_FN       vkGetPhysicalDeviceXcbPresentationSupportKHR\n  #define   VK_GET_DEVICE_PRES_FN_TYPE  PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR\n  #define   VK_SURFACE_EXT              \"VK_KHR_xcb_surface\"\n  #define   VK_CREATE_SURFACE_FN        vkCreateXcbSurfaceKHR\n  #define   VK_CREATE_SURFACE_FN_TYPE   PFN_vkCreateXcbSurfaceKHR\n  #define   VK_USE_PLATFORM_XCB_KHR\n#endif\n\n#define VK_NO_PROTOTYPES\n#include <vulkan/vulkan.h>\n#include <stdbool.h>\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nextern PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties;\nextern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;\nextern PFN_vkCreateInstance vkCreateInstance;\nextern PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion;\n\nextern PFN_vkCreateDevice vkCreateDevice;\nextern PFN_vkDestroyInstance vkDestroyInstance;\nextern PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties;\nextern PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties;\nextern PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices;\nextern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;\nextern PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures;\nextern PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties;\nextern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;\nextern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties;\n#if !defined(__APPLE__)\nextern VK_GET_DEVICE_PRES_FN_TYPE VK_GET_DEVICE_PRES_FN;\n#endif\nextern VK_CREATE_SURFACE_FN_TYPE VK_CREATE_SURFACE_FN;\nextern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR;\nextern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR;\nextern PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT;\nextern PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT;\nextern PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT;\nextern PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT;\n\n\nextern PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;\nextern PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets;\nextern PFN_vkAllocateMemory vkAllocateMemory;\nextern PFN_vkBeginCommandBuffer vkBeginCommandBuffer;\nextern PFN_vkBindBufferMemory vkBindBufferMemory;\nextern PFN_vkBindImageMemory vkBindImageMemory;\nextern PFN_vkCmdBeginQuery vkCmdBeginQuery;\nextern PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;\nextern PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets;\nextern PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer;\nextern PFN_vkCmdBindPipeline vkCmdBindPipeline;\nextern PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers;\nextern PFN_vkCmdBlitImage vkCmdBlitImage;\nextern PFN_vkCmdClearAttachments vkCmdClearAttachments;\nextern PFN_vkCmdClearColorImage vkCmdClearColorImage;\nextern PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage;\nextern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;\nextern PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage;\nextern PFN_vkCmdCopyImage vkCmdCopyImage;\nextern PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;\nextern PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults;\nextern PFN_vkCmdDispatch vkCmdDispatch;\nextern PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect;\nextern PFN_vkCmdDraw vkCmdDraw;\nextern PFN_vkCmdDrawIndexed vkCmdDrawIndexed;\nextern PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect;\nextern PFN_vkCmdDrawIndirect vkCmdDrawIndirect;\nextern PFN_vkCmdEndQuery vkCmdEndQuery;\nextern PFN_vkCmdEndRenderPass vkCmdEndRenderPass;\nextern PFN_vkCmdExecuteCommands vkCmdExecuteCommands;\nextern PFN_vkCmdFillBuffer vkCmdFillBuffer;\nextern PFN_vkCmdNextSubpass vkCmdNextSubpass;\nextern PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;\nextern PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2;\nextern PFN_vkCmdPushConstants vkCmdPushConstants;\nextern PFN_vkCmdResetEvent vkCmdResetEvent;\nextern PFN_vkCmdResetQueryPool vkCmdResetQueryPool;\nextern PFN_vkCmdResolveImage vkCmdResolveImage;\nextern PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;\nextern PFN_vkCmdSetDepthBias vkCmdSetDepthBias;\nextern PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;\nextern PFN_vkCmdSetEvent vkCmdSetEvent;\nextern PFN_vkCmdSetLineWidth vkCmdSetLineWidth;\nextern PFN_vkCmdSetScissor vkCmdSetScissor;\nextern PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;\nextern PFN_vkCmdSetStencilReference vkCmdSetStencilReference;\nextern PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;\nextern PFN_vkCmdSetViewport vkCmdSetViewport;\nextern PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer;\nextern PFN_vkCmdWaitEvents vkCmdWaitEvents;\nextern PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp;\nextern PFN_vkCreateBuffer vkCreateBuffer;\nextern PFN_vkCreateBufferView vkCreateBufferView;\nextern PFN_vkCreateCommandPool vkCreateCommandPool;\nextern PFN_vkCreateComputePipelines vkCreateComputePipelines;\nextern PFN_vkCreateDescriptorPool vkCreateDescriptorPool;\nextern PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;\nextern PFN_vkCreateEvent vkCreateEvent;\nextern PFN_vkCreateFence vkCreateFence;\nextern PFN_vkCreateFramebuffer vkCreateFramebuffer;\nextern PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;\nextern PFN_vkCreateImage vkCreateImage;\nextern PFN_vkCreateImageView vkCreateImageView;\nextern PFN_vkCreatePipelineCache vkCreatePipelineCache;\nextern PFN_vkCreatePipelineLayout vkCreatePipelineLayout;\nextern PFN_vkCreateQueryPool vkCreateQueryPool;\nextern PFN_vkCreateRenderPass vkCreateRenderPass;\nextern PFN_vkCreateSampler vkCreateSampler;\nextern PFN_vkCreateSemaphore vkCreateSemaphore;\nextern PFN_vkCreateShaderModule vkCreateShaderModule;\nextern PFN_vkDestroyBuffer vkDestroyBuffer;\nextern PFN_vkDestroyBufferView vkDestroyBufferView;\nextern PFN_vkDestroyCommandPool vkDestroyCommandPool;\nextern PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;\nextern PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;\nextern PFN_vkDestroyDevice vkDestroyDevice;\nextern PFN_vkDestroyEvent vkDestroyEvent;\nextern PFN_vkDestroyFence vkDestroyFence;\nextern PFN_vkDestroyFramebuffer vkDestroyFramebuffer;\nextern PFN_vkDestroyImage vkDestroyImage;\nextern PFN_vkDestroyImageView vkDestroyImageView;\nextern PFN_vkDestroyPipeline vkDestroyPipeline;\nextern PFN_vkDestroyPipelineCache vkDestroyPipelineCache;\nextern PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout;\nextern PFN_vkDestroyQueryPool vkDestroyQueryPool;\nextern PFN_vkDestroyRenderPass vkDestroyRenderPass;\nextern PFN_vkDestroySampler vkDestroySampler;\nextern PFN_vkDestroySemaphore vkDestroySemaphore;\nextern PFN_vkDestroyShaderModule vkDestroyShaderModule;\nextern PFN_vkDeviceWaitIdle vkDeviceWaitIdle;\nextern PFN_vkEndCommandBuffer vkEndCommandBuffer;\nextern PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties;\nextern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;\nextern PFN_vkFreeCommandBuffers vkFreeCommandBuffers;\nextern PFN_vkFreeDescriptorSets vkFreeDescriptorSets;\nextern PFN_vkFreeMemory vkFreeMemory;\nextern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;\nextern PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment;\nextern PFN_vkGetDeviceQueue vkGetDeviceQueue;\nextern PFN_vkGetEventStatus vkGetEventStatus;\nextern PFN_vkGetFenceStatus vkGetFenceStatus;\nextern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;\nextern PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements;\nextern PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout;\nextern PFN_vkGetPipelineCacheData vkGetPipelineCacheData;\nextern PFN_vkGetQueryPoolResults vkGetQueryPoolResults;\nextern PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity;\nextern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;\nextern PFN_vkMapMemory vkMapMemory;\nextern PFN_vkMergePipelineCaches vkMergePipelineCaches;\nextern PFN_vkQueueBindSparse vkQueueBindSparse;\nextern PFN_vkQueueSubmit vkQueueSubmit;\nextern PFN_vkQueueWaitIdle vkQueueWaitIdle;\nextern PFN_vkResetCommandBuffer vkResetCommandBuffer;\nextern PFN_vkResetCommandPool vkResetCommandPool;\nextern PFN_vkResetDescriptorPool vkResetDescriptorPool;\nextern PFN_vkResetEvent vkResetEvent;\nextern PFN_vkResetFences vkResetFences;\nextern PFN_vkSetEvent vkSetEvent;\nextern PFN_vkUnmapMemory vkUnmapMemory;\nextern PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;\nextern PFN_vkWaitForFences vkWaitForFences;\nextern PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;\nextern PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;\nextern PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;\nextern PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;\nextern PFN_vkQueuePresentKHR vkQueuePresentKHR;\nextern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR;\n\nbool vkl_init_loader(void);\nvoid vkl_init_instance(VkInstance instance);\nvoid vkl_init_device(VkDevice device, bool sync2_supported);\n\n#ifdef __cplusplus\n}\n#endif\n\n"
  },
  {
    "path": "tests/arena-alloc-tests.cpp",
    "content": "/**\n * Copyright (c) 2025 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n// Disable warning about setjmp/longjmp interaction with C++ object destruction.\n// This is expected when using nicetest with C++ objects - memory may leak on\n// assertion failure, but this is acceptable for testing.\n#if defined(_MSC_VER)\n#pragma warning(disable : 4611)\n#endif\n\n#define NT_BREAK_ON_ASSERT_FAIL\n#include \"ngf-common/arena.h\"\n#include \"ngf-common/macros.h\"\n\n// Wrap nicetest.h in extern \"C\" to match linkage with test-suite-runner.c\nextern \"C\" {\n#include \"nicetest.h\"\n}\n\n#include <cstdlib>\n#include <cstring>\n#include <ctime>\n\n// Helper to check pointer alignment\nstatic bool is_aligned(void* ptr, size_t alignment) {\n  return (reinterpret_cast<uintptr_t>(ptr) & (alignment - 1)) == 0;\n}\n\nNT_TESTSUITE {\n\n  /* Basic tests */\n\n  NT_TESTCASE(\"arena: create and destroy\") {\n    ngfi::arena arena = ngfi::arena::create(1024);\n    NT_ASSERT(arena.is_valid());\n    NT_ASSERT(arena.total_allocated() > 0);\n    NT_ASSERT(arena.total_used() == 0);\n  }\n\n  NT_TESTCASE(\"arena: default constructed is invalid\") {\n    ngfi::arena arena;\n    NT_ASSERT(!arena.is_valid());\n    NT_ASSERT(arena.total_allocated() == 0);\n    NT_ASSERT(arena.total_used() == 0);\n  }\n\n  NT_TESTCASE(\"arena: single allocation\") {\n    ngfi::arena arena = ngfi::arena::create(1024);\n    NT_ASSERT(arena.is_valid());\n\n    void* ptr = arena.alloc(64);\n    NT_ASSERT(ptr != nullptr);\n    NT_ASSERT(is_aligned(ptr, NGFI_MAX_ALIGNMENT));\n    NT_ASSERT(arena.total_used() >= 64);\n\n    // Write to the memory to verify it's usable\n    std::memset(ptr, 0xAB, 64);\n  }\n\n  NT_TESTCASE(\"arena: multiple sequential allocations\") {\n    ngfi::arena arena = ngfi::arena::create(1024);\n    NT_ASSERT(arena.is_valid());\n\n    void* ptrs[10];\n    for (int i = 0; i < 10; ++i) {\n      ptrs[i] = arena.alloc(32);\n      NT_ASSERT(ptrs[i] != nullptr);\n      NT_ASSERT(is_aligned(ptrs[i], NGFI_MAX_ALIGNMENT));\n\n      // Write unique pattern\n      std::memset(ptrs[i], i + 1, 32);\n    }\n\n    // Verify patterns\n    for (int i = 0; i < 10; ++i) {\n      uint8_t* bytes = static_cast<uint8_t*>(ptrs[i]);\n      for (int j = 0; j < 32; ++j) {\n        NT_ASSERT(bytes[j] == static_cast<uint8_t>(i + 1));\n      }\n    }\n  }\n\n  NT_TESTCASE(\"arena: allocations are distinct\") {\n    ngfi::arena arena = ngfi::arena::create(1024);\n    NT_ASSERT(arena.is_valid());\n\n    void* ptr1 = arena.alloc(100);\n    void* ptr2 = arena.alloc(100);\n    void* ptr3 = arena.alloc(100);\n\n    NT_ASSERT(ptr1 != nullptr);\n    NT_ASSERT(ptr2 != nullptr);\n    NT_ASSERT(ptr3 != nullptr);\n    NT_ASSERT(ptr1 != ptr2);\n    NT_ASSERT(ptr2 != ptr3);\n    NT_ASSERT(ptr1 != ptr3);\n  }\n\n  /* Capacity tests */\n\n  NT_TESTCASE(\"arena: fill initial capacity\") {\n    const size_t capacity = 256;\n    ngfi::arena   arena    = ngfi::arena::create(capacity);\n    NT_ASSERT(arena.is_valid());\n\n    // Allocate small chunks until we exceed initial capacity\n    size_t total_alloc = 0;\n    while (total_alloc < capacity * 2) {\n      void* ptr = arena.alloc(16);\n      NT_ASSERT(ptr != nullptr);\n      total_alloc += 16;\n    }\n  }\n\n  NT_TESTCASE(\"arena: trigger block growth\") {\n    const size_t initial_capacity = 64;\n    ngfi::arena   arena            = ngfi::arena::create(initial_capacity);\n    NT_ASSERT(arena.is_valid());\n\n    size_t initial_allocated = arena.total_allocated();\n\n    // Allocate more than initial capacity\n    void* ptr1 = arena.alloc(initial_capacity);\n    NT_ASSERT(ptr1 != nullptr);\n\n    void* ptr2 = arena.alloc(initial_capacity);\n    NT_ASSERT(ptr2 != nullptr);\n\n    // Should have grown\n    NT_ASSERT(arena.total_allocated() > initial_allocated);\n  }\n\n  NT_TESTCASE(\"arena: large allocation exceeding initial capacity\") {\n    const size_t initial_capacity = 64;\n    ngfi::arena   arena            = ngfi::arena::create(initial_capacity);\n    NT_ASSERT(arena.is_valid());\n\n    // Allocate more than initial capacity in one go\n    void* ptr = arena.alloc(initial_capacity * 4);\n    NT_ASSERT(ptr != nullptr);\n    NT_ASSERT(is_aligned(ptr, NGFI_MAX_ALIGNMENT));\n  }\n\n  NT_TESTCASE(\"arena: many small allocations\") {\n    ngfi::arena arena = ngfi::arena::create(128);\n    NT_ASSERT(arena.is_valid());\n\n    // Many small allocations\n    for (int i = 0; i < 1000; ++i) {\n      void* ptr = arena.alloc(1);\n      NT_ASSERT(ptr != nullptr);\n    }\n  }\n\n  /* Reset tests */\n\n  NT_TESTCASE(\"arena: reset and reallocate\") {\n    ngfi::arena arena = ngfi::arena::create(256);\n    NT_ASSERT(arena.is_valid());\n\n    void* ptr1 = arena.alloc(100);\n    NT_ASSERT(ptr1 != nullptr);\n    NT_ASSERT(arena.total_used() >= 100);\n\n    arena.reset();\n    NT_ASSERT(arena.total_used() == 0);\n\n    void* ptr2 = arena.alloc(100);\n    NT_ASSERT(ptr2 != nullptr);\n    NT_ASSERT(arena.total_used() >= 100);\n  }\n\n  NT_TESTCASE(\"arena: multiple reset cycles\") {\n    ngfi::arena arena = ngfi::arena::create(128);\n    NT_ASSERT(arena.is_valid());\n\n    for (int cycle = 0; cycle < 10; ++cycle) {\n      for (int i = 0; i < 20; ++i) {\n        void* ptr = arena.alloc(16);\n        NT_ASSERT(ptr != nullptr);\n      }\n      arena.reset();\n      NT_ASSERT(arena.total_used() == 0);\n    }\n  }\n\n  NT_TESTCASE(\"arena: reset releases overflow blocks\") {\n    const size_t initial_capacity = 64;\n    ngfi::arena   arena            = ngfi::arena::create(initial_capacity);\n    NT_ASSERT(arena.is_valid());\n\n    size_t initial_allocated = arena.total_allocated();\n\n    // Force overflow blocks\n    for (int i = 0; i < 10; ++i) {\n      arena.alloc(initial_capacity);\n    }\n    NT_ASSERT(arena.total_allocated() > initial_allocated);\n\n    arena.reset();\n    NT_ASSERT(arena.total_used() == 0);\n    NT_ASSERT(arena.total_allocated() == initial_allocated);\n  }\n\n  /* Alignment tests */\n\n  NT_TESTCASE(\"arena: default alignment\") {\n    ngfi::arena arena = ngfi::arena::create(1024);\n    NT_ASSERT(arena.is_valid());\n\n    for (int i = 0; i < 100; ++i) {\n      void* ptr = arena.alloc(1 + (i % 32));\n      NT_ASSERT(ptr != nullptr);\n      NT_ASSERT(is_aligned(ptr, NGFI_MAX_ALIGNMENT));\n    }\n  }\n\n  NT_TESTCASE(\"arena: custom alignments\") {\n    ngfi::arena arena = ngfi::arena::create(4096);\n    NT_ASSERT(arena.is_valid());\n\n    size_t alignments[] = {1, 2, 4, 8, 16, 32, 64};\n    for (size_t align : alignments) {\n      void* ptr = arena.alloc_aligned(32, align);\n      NT_ASSERT(ptr != nullptr);\n      NT_ASSERT(is_aligned(ptr, align));\n    }\n  }\n\n  NT_TESTCASE(\"arena: alignment near block boundary\") {\n    const size_t initial_capacity = 128;\n    ngfi::arena   arena            = ngfi::arena::create(initial_capacity);\n    NT_ASSERT(arena.is_valid());\n\n    // Fill most of the block\n    arena.alloc(initial_capacity - 20);\n\n    // Allocate with large alignment - should go to new block\n    void* ptr = arena.alloc_aligned(16, 64);\n    NT_ASSERT(ptr != nullptr);\n    NT_ASSERT(is_aligned(ptr, 64));\n  }\n\n  /* Edge cases */\n\n  NT_TESTCASE(\"arena: zero-size allocation returns nullptr\") {\n    ngfi::arena arena = ngfi::arena::create(1024);\n    NT_ASSERT(arena.is_valid());\n\n    void* ptr = arena.alloc(0);\n    NT_ASSERT(ptr == nullptr);\n  }\n\n  NT_TESTCASE(\"arena: alloc on invalid arena returns nullptr\") {\n    ngfi::arena arena;\n    NT_ASSERT(!arena.is_valid());\n\n    void* ptr = arena.alloc(64);\n    NT_ASSERT(ptr == nullptr);\n  }\n\n  NT_TESTCASE(\"arena: reset on invalid arena is safe\") {\n    ngfi::arena arena;\n    NT_ASSERT(!arena.is_valid());\n    arena.reset();  // Should not crash\n  }\n\n  /* Move semantics tests */\n\n  NT_TESTCASE(\"arena: move constructor\") {\n    ngfi::arena arena1 = ngfi::arena::create(256);\n    NT_ASSERT(arena1.is_valid());\n\n    void* ptr = arena1.alloc(64);\n    NT_ASSERT(ptr != nullptr);\n\n    size_t used      = arena1.total_used();\n    size_t allocated = arena1.total_allocated();\n\n    ngfi::arena arena2(static_cast<ngfi::arena&&>(arena1));\n\n    // arena2 should have taken ownership\n    NT_ASSERT(arena2.is_valid());\n    NT_ASSERT(arena2.total_used() == used);\n    NT_ASSERT(arena2.total_allocated() == allocated);\n\n    // arena1 should be invalid\n    NT_ASSERT(!arena1.is_valid());\n    NT_ASSERT(arena1.total_used() == 0);\n    NT_ASSERT(arena1.total_allocated() == 0);\n\n    // Can still allocate from arena2\n    void* ptr2 = arena2.alloc(64);\n    NT_ASSERT(ptr2 != nullptr);\n  }\n\n  /* Fuzz tests */\n\n  NT_TESTCASE(\"arena: fuzz random allocation sizes\") {\n    std::srand(static_cast<unsigned>(std::time(nullptr)));\n    ngfi::arena arena = ngfi::arena::create(256);\n    NT_ASSERT(arena.is_valid());\n\n    for (int i = 0; i < 1000; ++i) {\n      size_t size = 1 + (std::rand() % 256);\n      void*  ptr  = arena.alloc(size);\n      NT_ASSERT(ptr != nullptr);\n      NT_ASSERT(is_aligned(ptr, NGFI_MAX_ALIGNMENT));\n    }\n  }\n\n  NT_TESTCASE(\"arena: fuzz random reset patterns\") {\n    std::srand(static_cast<unsigned>(std::time(nullptr)));\n    ngfi::arena arena = ngfi::arena::create(128);\n    NT_ASSERT(arena.is_valid());\n\n    for (int i = 0; i < 500; ++i) {\n      size_t size = 1 + (std::rand() % 64);\n      void*  ptr  = arena.alloc(size);\n      NT_ASSERT(ptr != nullptr);\n\n      // Randomly reset\n      if (std::rand() % 10 == 0) {\n        arena.reset();\n        NT_ASSERT(arena.total_used() == 0);\n      }\n    }\n  }\n\n  /* Statistics tests */\n\n  NT_TESTCASE(\"arena: total_allocated tracking\") {\n    ngfi::arena arena = ngfi::arena::create(256);\n    NT_ASSERT(arena.is_valid());\n\n    size_t initial = arena.total_allocated();\n    NT_ASSERT(initial > 0);\n\n    // Force growth\n    arena.alloc(512);\n    NT_ASSERT(arena.total_allocated() > initial);\n  }\n\n  NT_TESTCASE(\"arena: total_used tracking\") {\n    ngfi::arena arena = ngfi::arena::create(1024);\n    NT_ASSERT(arena.is_valid());\n\n    NT_ASSERT(arena.total_used() == 0);\n\n    arena.alloc(64);\n    NT_ASSERT(arena.total_used() >= 64);\n\n    size_t used_before = arena.total_used();\n    arena.alloc(128);\n    NT_ASSERT(arena.total_used() >= used_before + 128);\n\n    arena.reset();\n    NT_ASSERT(arena.total_used() == 0);\n  }\n\n}\n"
  },
  {
    "path": "tests/common-tests.cpp",
    "content": "#include \"ngf-common/arena.h\"\n#include \"ngf-common/array.h\"\n#include \"ngf-common/chunked-list.h\"\n#include \"ngf-common/cmdbuf-state.h\"\n#include \"ngf-common/frame-token.h\"\n#include \"ngf-common/hashtable.h\"\n#include \"ngf-common/unique-ptr.h\"\n#include \"ngf-common/value-or-error.h\"\n\n#include \"utest.h\"\n\n// Use system allocator for tests to avoid NGF allocation callback setup.\ntemplate<class T>\nusing test_array = ngfi::array<T, ngfi::system_alloc_callbacks>;\n\nUTEST_STATE();\n\nint main(int argc, const char* const argv[]) {\n  // Initialize NGF allocation callbacks (initializes the mutex).\n  ngfi_set_allocation_callbacks(NULL);\n  return utest_main(argc, argv);\n}\n\nUTEST(array, default_construction) {\n  test_array<int> arr;\n  ASSERT_EQ(0u, arr.size());\n  ASSERT_EQ(0u, arr.capacity());\n  ASSERT_TRUE(arr.empty());\n  ASSERT_EQ(nullptr, arr.data());\n}\n\nUTEST(array, size_construction) {\n  test_array<int> arr(10);\n  ASSERT_EQ(10u, arr.size());\n  ASSERT_EQ(10u, arr.capacity());\n  ASSERT_FALSE(arr.empty());\n  ASSERT_NE(nullptr, arr.data());\n}\n\nUTEST(array, push_back) {\n  test_array<int> arr;\n  arr.push_back(1);\n  arr.push_back(2);\n  arr.push_back(3);\n  ASSERT_EQ(3u, arr.size());\n  ASSERT_EQ(1, arr[0]);\n  ASSERT_EQ(2, arr[1]);\n  ASSERT_EQ(3, arr[2]);\n}\n\nUTEST(array, emplace_back) {\n  test_array<int> arr;\n  arr.emplace_back(42);\n  arr.emplace_back(100);\n  ASSERT_EQ(2u, arr.size());\n  ASSERT_EQ(42, arr[0]);\n  ASSERT_EQ(100, arr[1]);\n}\n\nUTEST(array, pop_back) {\n  test_array<int> arr;\n  arr.push_back(1);\n  arr.push_back(2);\n  arr.push_back(3);\n  arr.pop_back();\n  ASSERT_EQ(2u, arr.size());\n  ASSERT_EQ(1, arr[0]);\n  ASSERT_EQ(2, arr[1]);\n}\n\nUTEST(array, pop_back_empty) {\n  test_array<int> arr;\n  arr.pop_back();  // Should not crash.\n  ASSERT_EQ(0u, arr.size());\n}\n\nUTEST(array, clear) {\n  test_array<int> arr;\n  arr.push_back(1);\n  arr.push_back(2);\n  arr.clear();\n  ASSERT_EQ(0u, arr.size());\n  ASSERT_TRUE(arr.empty());\n  ASSERT_GT(arr.capacity(), 0u);  // Capacity should remain.\n}\n\nUTEST(array, front_and_back) {\n  test_array<int> arr;\n  arr.push_back(10);\n  arr.push_back(20);\n  arr.push_back(30);\n  ASSERT_EQ(10, arr.front());\n  ASSERT_EQ(30, arr.back());\n}\n\nUTEST(array, resize_grow) {\n  test_array<int> arr;\n  arr.resize(5);\n  ASSERT_EQ(5u, arr.size());\n  ASSERT_GE(arr.capacity(), 5u);\n}\n\nUTEST(array, resize_shrink) {\n  test_array<int> arr;\n  arr.push_back(1);\n  arr.push_back(2);\n  arr.push_back(3);\n  arr.resize(1);\n  ASSERT_EQ(1u, arr.size());\n  ASSERT_EQ(1, arr[0]);\n}\n\nUTEST(array, reserve) {\n  test_array<int> arr;\n  arr.reserve(100);\n  ASSERT_EQ(0u, arr.size());\n  ASSERT_GE(arr.capacity(), 100u);\n}\n\nUTEST(array, reserve_smaller_noop) {\n  test_array<int> arr;\n  arr.reserve(100);\n  size_t cap = arr.capacity();\n  arr.reserve(50);\n  ASSERT_EQ(cap, arr.capacity());  // Should not shrink.\n}\n\nUTEST(array, iterators) {\n  test_array<int> arr;\n  arr.push_back(1);\n  arr.push_back(2);\n  arr.push_back(3);\n\n  int sum = 0;\n  for (auto it = arr.begin(); it != arr.end(); ++it) {\n    sum += *it;\n  }\n  ASSERT_EQ(6, sum);\n}\n\nUTEST(array, range_for) {\n  test_array<int> arr;\n  arr.push_back(10);\n  arr.push_back(20);\n  arr.push_back(30);\n\n  int sum = 0;\n  for (int val : arr) {\n    sum += val;\n  }\n  ASSERT_EQ(60, sum);\n}\n\nUTEST(array, move_construction) {\n  test_array<int> arr1;\n  arr1.push_back(1);\n  arr1.push_back(2);\n\n  test_array<int> arr2(ngfi::move(arr1));\n  ASSERT_EQ(2u, arr2.size());\n  ASSERT_EQ(1, arr2[0]);\n  ASSERT_EQ(2, arr2[1]);\n  ASSERT_EQ(0u, arr1.size());\n  ASSERT_EQ(nullptr, arr1.data());\n}\n\nUTEST(array, move_assignment) {\n  test_array<int> arr1;\n  arr1.push_back(1);\n  arr1.push_back(2);\n\n  test_array<int> arr2;\n  arr2.push_back(100);\n\n  arr2 = ngfi::move(arr1);\n  ASSERT_EQ(2u, arr2.size());\n  ASSERT_EQ(1, arr2[0]);\n  ASSERT_EQ(2, arr2[1]);\n  ASSERT_EQ(0u, arr1.size());\n  ASSERT_EQ(nullptr, arr1.data());\n}\n\nUTEST(array, growth_on_push) {\n  test_array<int> arr;\n  for (int i = 0; i < 100; ++i) {\n    arr.push_back(i);\n  }\n  ASSERT_EQ(100u, arr.size());\n  for (int i = 0; i < 100; ++i) {\n    ASSERT_EQ(i, arr[(size_t)i]);\n  }\n}\n\n// Helper struct for value_or_error tests.\nstruct test_value {\n  int x;\n  int y;\n  test_value(int x_, int y_) : x(x_), y(y_) {}\n  test_value(test_value&& other) : x(other.x), y(other.y) {\n    other.x = 0;\n    other.y = 0;\n  }\n  test_value& operator=(test_value&& other) {\n    x = other.x;\n    y = other.y;\n    other.x = 0;\n    other.y = 0;\n    return *this;\n  }\n};\n\nUTEST(value_or_error, construct_with_value) {\n  ngfi::value_or_ngferr<int> result{42};\n  ASSERT_FALSE(result.has_error());\n  ASSERT_EQ(NGF_ERROR_OK, result.error());\n  ASSERT_EQ(42, result.value());\n}\n\nUTEST(value_or_error, construct_with_error) {\n  ngfi::value_or_ngferr<int> result{NGF_ERROR_OUT_OF_MEM};\n  ASSERT_TRUE(result.has_error());\n  ASSERT_EQ(NGF_ERROR_OUT_OF_MEM, result.error());\n}\n\nUTEST(value_or_error, construct_with_struct_value) {\n  ngfi::value_or_ngferr<test_value> result{test_value{10, 20}};\n  ASSERT_FALSE(result.has_error());\n  ASSERT_EQ(10, result.value().x);\n  ASSERT_EQ(20, result.value().y);\n}\n\nUTEST(value_or_error, modify_value) {\n  ngfi::value_or_ngferr<int> result{100};\n  result.value() = 200;\n  ASSERT_EQ(200, result.value());\n}\n\nUTEST(value_or_error, move_construction_with_value) {\n  ngfi::value_or_ngferr<test_value> result1{test_value{5, 10}};\n  ngfi::value_or_ngferr<test_value> result2{ngfi::move(result1)};\n\n  ASSERT_FALSE(result2.has_error());\n  ASSERT_EQ(5, result2.value().x);\n  ASSERT_EQ(10, result2.value().y);\n  // After move, result1 should have error (missing_value_error).\n  ASSERT_TRUE(result1.has_error());\n}\n\nUTEST(value_or_error, move_construction_with_error) {\n  ngfi::value_or_ngferr<int> result1{NGF_ERROR_OBJECT_CREATION_FAILED};\n  ngfi::value_or_ngferr<int> result2{ngfi::move(result1)};\n\n  ASSERT_TRUE(result2.has_error());\n  ASSERT_EQ(NGF_ERROR_OBJECT_CREATION_FAILED, result2.error());\n}\n\nUTEST(value_or_error, move_assignment_value_to_value) {\n  ngfi::value_or_ngferr<test_value> result1{test_value{1, 2}};\n  ngfi::value_or_ngferr<test_value> result2{test_value{3, 4}};\n\n  result2 = ngfi::move(result1);\n\n  ASSERT_FALSE(result2.has_error());\n  ASSERT_EQ(1, result2.value().x);\n  ASSERT_EQ(2, result2.value().y);\n  ASSERT_TRUE(result1.has_error());\n}\n\nUTEST(value_or_error, move_assignment_error_to_value) {\n  ngfi::value_or_ngferr<int> result1{NGF_ERROR_OUT_OF_MEM};\n  ngfi::value_or_ngferr<int> result2{42};\n\n  result2 = ngfi::move(result1);\n\n  ASSERT_TRUE(result2.has_error());\n  ASSERT_EQ(NGF_ERROR_OUT_OF_MEM, result2.error());\n}\n\nUTEST(value_or_error, move_assignment_value_to_error) {\n  ngfi::value_or_ngferr<int> result1{42};\n  ngfi::value_or_ngferr<int> result2{NGF_ERROR_OUT_OF_MEM};\n\n  result2 = ngfi::move(result1);\n\n  ASSERT_FALSE(result2.has_error());\n  ASSERT_EQ(42, result2.value());\n}\n\nUTEST(value_or_error, const_value_access) {\n  const ngfi::value_or_ngferr<int> result{99};\n  ASSERT_EQ(99, result.value());\n}\n\n// Helper struct for unique_ptr tests.\nstruct tracked_object {\n  static int instance_count;\n  int value;\n\n  tracked_object(int v = 0) : value(v) { ++instance_count; }\n  ~tracked_object() { --instance_count; }\n};\n\nint tracked_object::instance_count = 0;\n\nUTEST(unique_ptr, default_construction) {\n  ngfi::unique_ptr<int> ptr;\n  ASSERT_FALSE(ptr);\n  ASSERT_EQ(nullptr, ptr.get());\n}\n\nUTEST(unique_ptr, construct_from_pointer) {\n  tracked_object::instance_count = 0;\n  {\n    auto* raw = NGFI_ALLOC(tracked_object);\n    ngfi::unique_ptr<tracked_object> ptr{raw};\n    ASSERT_TRUE(ptr);\n    ASSERT_EQ(raw, ptr.get());\n    ASSERT_EQ(1, tracked_object::instance_count);\n  }\n  ASSERT_EQ(0, tracked_object::instance_count);\n}\n\nUTEST(unique_ptr, make) {\n  tracked_object::instance_count = 0;\n  {\n    auto ptr = ngfi::unique_ptr<tracked_object>::make(42);\n    ASSERT_TRUE(ptr);\n    ASSERT_EQ(42, ptr->value);\n    ASSERT_EQ(1, tracked_object::instance_count);\n  }\n  ASSERT_EQ(0, tracked_object::instance_count);\n}\n\nUTEST(unique_ptr, arrow_operator) {\n  auto ptr = ngfi::unique_ptr<tracked_object>::make(100);\n  ASSERT_EQ(100, ptr->value);\n  ptr->value = 200;\n  ASSERT_EQ(200, ptr->value);\n}\n\nUTEST(unique_ptr, release) {\n  tracked_object::instance_count = 0;\n  tracked_object* raw = nullptr;\n  {\n    auto ptr = ngfi::unique_ptr<tracked_object>::make(5);\n    raw = ptr.release();\n    ASSERT_FALSE(ptr);\n    ASSERT_EQ(nullptr, ptr.get());\n    ASSERT_EQ(1, tracked_object::instance_count);\n  }\n  // Object should still exist after unique_ptr destruction.\n  ASSERT_EQ(1, tracked_object::instance_count);\n  ASSERT_EQ(5, raw->value);\n  NGFI_FREE(raw);\n  ASSERT_EQ(0, tracked_object::instance_count);\n}\n\nUTEST(unique_ptr, move_construction) {\n  tracked_object::instance_count = 0;\n  {\n    auto ptr1 = ngfi::unique_ptr<tracked_object>::make(10);\n    auto* raw = ptr1.get();\n\n    ngfi::unique_ptr<tracked_object> ptr2{ngfi::move(ptr1)};\n\n    ASSERT_FALSE(ptr1);\n    ASSERT_TRUE(ptr2);\n    ASSERT_EQ(raw, ptr2.get());\n    ASSERT_EQ(10, ptr2->value);\n    ASSERT_EQ(1, tracked_object::instance_count);\n  }\n  ASSERT_EQ(0, tracked_object::instance_count);\n}\n\nUTEST(unique_ptr, move_assignment) {\n  tracked_object::instance_count = 0;\n  {\n    auto ptr1 = ngfi::unique_ptr<tracked_object>::make(1);\n    auto ptr2 = ngfi::unique_ptr<tracked_object>::make(2);\n    ASSERT_EQ(2, tracked_object::instance_count);\n\n    auto* raw1 = ptr1.get();\n    ptr2 = ngfi::move(ptr1);\n\n    ASSERT_FALSE(ptr1);\n    ASSERT_TRUE(ptr2);\n    ASSERT_EQ(raw1, ptr2.get());\n    ASSERT_EQ(1, ptr2->value);\n    // Old ptr2 object should be destroyed.\n    ASSERT_EQ(1, tracked_object::instance_count);\n  }\n  ASSERT_EQ(0, tracked_object::instance_count);\n}\n\nUTEST(unique_ptr, move_assignment_to_empty) {\n  tracked_object::instance_count = 0;\n  {\n    auto ptr1 = ngfi::unique_ptr<tracked_object>::make(7);\n    ngfi::unique_ptr<tracked_object> ptr2;\n\n    ptr2 = ngfi::move(ptr1);\n\n    ASSERT_FALSE(ptr1);\n    ASSERT_TRUE(ptr2);\n    ASSERT_EQ(7, ptr2->value);\n    ASSERT_EQ(1, tracked_object::instance_count);\n  }\n  ASSERT_EQ(0, tracked_object::instance_count);\n}\n\nUTEST(unique_ptr, const_get) {\n  auto ptr = ngfi::unique_ptr<tracked_object>::make(50);\n  const auto& const_ptr = ptr;\n  ASSERT_EQ(ptr.get(), const_ptr.get());\n  ASSERT_EQ(50, const_ptr.get()->value);\n}\n\nUTEST(unique_ptr, bool_conversion) {\n  ngfi::unique_ptr<int> empty;\n  auto filled = ngfi::unique_ptr<int>::make();\n\n  ASSERT_FALSE(empty);\n  ASSERT_TRUE(filled);\n\n  if (empty) {\n    ASSERT_TRUE(false);  // Should not reach here.\n  }\n  if (filled) {\n    ASSERT_TRUE(true);  // Should reach here.\n  } else {\n    ASSERT_TRUE(false);  // Should not reach here.\n  }\n}\n\nUTEST(hashtable, default_construction) {\n  ngfi::hashtable<int> ht;\n  ASSERT_EQ(0u, ht.size());\n  ASSERT_EQ(0u, ht.capacity());\n  ASSERT_TRUE(ht.empty());\n}\n\nUTEST(hashtable, construction_with_capacity) {\n  ngfi::hashtable<int> ht(200);\n  ASSERT_EQ(0u, ht.size());\n  ASSERT_TRUE(ht.empty());\n  // Capacity is only allocated on first insert.\n}\n\nUTEST(hashtable, insert_and_get) {\n  ngfi::hashtable<int> ht;\n  int* val = ht.insert(42, 100);\n  ASSERT_NE(nullptr, val);\n  ASSERT_EQ(100, *val);\n  ASSERT_EQ(1u, ht.size());\n  ASSERT_FALSE(ht.empty());\n\n  int* retrieved = ht.get(42);\n  ASSERT_NE(nullptr, retrieved);\n  ASSERT_EQ(100, *retrieved);\n}\n\nUTEST(hashtable, get_nonexistent) {\n  ngfi::hashtable<int> ht;\n  ht.insert(1, 10);\n  int* val = ht.get(999);\n  ASSERT_EQ(nullptr, val);\n}\n\nUTEST(hashtable, get_empty_table) {\n  ngfi::hashtable<int> ht;\n  int* val = ht.get(42);\n  ASSERT_EQ(nullptr, val);\n}\n\nUTEST(hashtable, insert_update_existing) {\n  ngfi::hashtable<int> ht;\n  ht.insert(5, 50);\n  ASSERT_EQ(1u, ht.size());\n\n  int* val = ht.insert(5, 500);\n  ASSERT_NE(nullptr, val);\n  ASSERT_EQ(500, *val);\n  ASSERT_EQ(1u, ht.size());  // Size should not increase.\n}\n\nUTEST(hashtable, multiple_inserts) {\n  ngfi::hashtable<int> ht;\n  for (uint64_t i = 0; i < 50; ++i) {\n    ht.insert(i, static_cast<int>(i * 10));\n  }\n  ASSERT_EQ(50u, ht.size());\n\n  for (uint64_t i = 0; i < 50; ++i) {\n    int* val = ht.get(i);\n    ASSERT_NE(nullptr, val);\n    ASSERT_EQ(static_cast<int>(i * 10), *val);\n  }\n}\n\nUTEST(hashtable, get_or_insert_new) {\n  ngfi::hashtable<int> ht;\n  bool is_new = false;\n  int* val = ht.get_or_insert(10, 100, is_new);\n  ASSERT_NE(nullptr, val);\n  ASSERT_EQ(100, *val);\n  ASSERT_TRUE(is_new);\n  ASSERT_EQ(1u, ht.size());\n}\n\nUTEST(hashtable, get_or_insert_existing) {\n  ngfi::hashtable<int> ht;\n  ht.insert(10, 100);\n\n  bool is_new = true;\n  int* val = ht.get_or_insert(10, 999, is_new);\n  ASSERT_NE(nullptr, val);\n  ASSERT_EQ(100, *val);  // Should return existing value, not default.\n  ASSERT_FALSE(is_new);\n  ASSERT_EQ(1u, ht.size());\n}\n\nUTEST(hashtable, clear) {\n  ngfi::hashtable<int> ht;\n  ht.insert(1, 10);\n  ht.insert(2, 20);\n  ht.insert(3, 30);\n  ASSERT_EQ(3u, ht.size());\n\n  ht.clear();\n  ASSERT_EQ(0u, ht.size());\n  ASSERT_TRUE(ht.empty());\n  ASSERT_GT(ht.capacity(), 0u);  // Capacity should remain.\n\n  // Verify entries are gone.\n  ASSERT_EQ(nullptr, ht.get(1));\n  ASSERT_EQ(nullptr, ht.get(2));\n  ASSERT_EQ(nullptr, ht.get(3));\n}\n\nUTEST(hashtable, prehashed_operations) {\n  ngfi::hashtable<int> ht;\n  auto kh = ngfi::hashtable<int>::compute_hash(42);\n\n  int* val = ht.insert_prehashed(kh, 100);\n  ASSERT_NE(nullptr, val);\n  ASSERT_EQ(100, *val);\n\n  int* retrieved = ht.get_prehashed(kh);\n  ASSERT_NE(nullptr, retrieved);\n  ASSERT_EQ(100, *retrieved);\n}\n\nUTEST(hashtable, move_construction) {\n  ngfi::hashtable<int> ht1;\n  ht1.insert(1, 10);\n  ht1.insert(2, 20);\n\n  ngfi::hashtable<int> ht2(ngfi::move(ht1));\n\n  ASSERT_EQ(0u, ht1.size());\n  ASSERT_EQ(2u, ht2.size());\n  ASSERT_EQ(10, *ht2.get(1));\n  ASSERT_EQ(20, *ht2.get(2));\n}\n\nUTEST(hashtable, move_assignment) {\n  ngfi::hashtable<int> ht1;\n  ht1.insert(1, 10);\n  ht1.insert(2, 20);\n\n  ngfi::hashtable<int> ht2;\n  ht2.insert(100, 1000);\n\n  ht2 = ngfi::move(ht1);\n\n  ASSERT_EQ(0u, ht1.size());\n  ASSERT_EQ(2u, ht2.size());\n  ASSERT_EQ(10, *ht2.get(1));\n  ASSERT_EQ(20, *ht2.get(2));\n  ASSERT_EQ(nullptr, ht2.get(100));  // Old entry should be gone.\n}\n\nUTEST(hashtable, iteration) {\n  ngfi::hashtable<int> ht;\n  ht.insert(1, 10);\n  ht.insert(2, 20);\n  ht.insert(3, 30);\n\n  int sum_keys = 0;\n  int sum_values = 0;\n  int count = 0;\n  for (auto it = ht.begin(); it != ht.end(); ++it) {\n    sum_keys += static_cast<int>(it->key);\n    sum_values += it->value;\n    ++count;\n  }\n\n  ASSERT_EQ(3, count);\n  ASSERT_EQ(6, sum_keys);    // 1 + 2 + 3\n  ASSERT_EQ(60, sum_values); // 10 + 20 + 30\n}\n\nUTEST(hashtable, iteration_empty) {\n  ngfi::hashtable<int> ht;\n  int count = 0;\n  for (auto it = ht.begin(); it != ht.end(); ++it) {\n    ++count;\n  }\n  ASSERT_EQ(0, count);\n}\n\nUTEST(hashtable, rehash_on_load) {\n  ngfi::hashtable<int> ht(10);  // Small initial capacity.\n  size_t initial_cap = 0;\n\n  for (uint64_t i = 0; i < 100; ++i) {\n    ht.insert(i, static_cast<int>(i));\n    if (i == 0) {\n      initial_cap = ht.capacity();\n    }\n  }\n\n  ASSERT_EQ(100u, ht.size());\n  ASSERT_GT(ht.capacity(), initial_cap);  // Should have grown.\n\n  // Verify all entries are still accessible.\n  for (uint64_t i = 0; i < 100; ++i) {\n    int* val = ht.get(i);\n    ASSERT_NE(nullptr, val);\n    ASSERT_EQ(static_cast<int>(i), *val);\n  }\n}\n\nUTEST(hashtable, const_get) {\n  ngfi::hashtable<int> ht;\n  ht.insert(42, 100);\n\n  const auto& const_ht = ht;\n  const int* val = const_ht.get(42);\n  ASSERT_NE(nullptr, val);\n  ASSERT_EQ(100, *val);\n}\n\nUTEST(hashtable, struct_value) {\n  struct point {\n    int x;\n    int y;\n  };\n\n  ngfi::hashtable<point> ht;\n  ht.insert(1, point{10, 20});\n  ht.insert(2, point{30, 40});\n\n  point* p1 = ht.get(1);\n  ASSERT_NE(nullptr, p1);\n  ASSERT_EQ(10, p1->x);\n  ASSERT_EQ(20, p1->y);\n\n  point* p2 = ht.get(2);\n  ASSERT_NE(nullptr, p2);\n  ASSERT_EQ(30, p2->x);\n  ASSERT_EQ(40, p2->y);\n}\n\n// Mock command buffer for testing state transitions.\nstruct mock_cmd_buffer {\n  ngfi::cmd_buffer_state state;\n  bool                   renderpass_active;\n  bool                   compute_pass_active;\n  bool                   xfer_pass_active;\n\n  void reset() {\n    state              = ngfi::CMD_BUFFER_STATE_NEW;\n    renderpass_active  = false;\n    compute_pass_active = false;\n    xfer_pass_active   = false;\n  }\n};\n\nUTEST(cmdbuf_state, new_to_ready) {\n  mock_cmd_buffer buf;\n  buf.reset();\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY);\n  ASSERT_TRUE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state);\n}\n\nUTEST(cmdbuf_state, ready_to_recording) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state = ngfi::CMD_BUFFER_STATE_READY;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_RECORDING);\n  ASSERT_TRUE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state);\n}\n\nUTEST(cmdbuf_state, recording_to_ready_to_submit) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state = ngfi::CMD_BUFFER_STATE_RECORDING;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT);\n  ASSERT_TRUE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT, buf.state);\n}\n\nUTEST(cmdbuf_state, recording_to_ready_to_submit_fails_with_active_renderpass) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state             = ngfi::CMD_BUFFER_STATE_RECORDING;\n  buf.renderpass_active = true;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT);\n  ASSERT_FALSE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state);  // State unchanged.\n}\n\nUTEST(cmdbuf_state, recording_to_ready_to_submit_fails_with_active_compute_pass) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state              = ngfi::CMD_BUFFER_STATE_RECORDING;\n  buf.compute_pass_active = true;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT);\n  ASSERT_FALSE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state);\n}\n\nUTEST(cmdbuf_state, recording_to_ready_to_submit_fails_with_active_xfer_pass) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state            = ngfi::CMD_BUFFER_STATE_RECORDING;\n  buf.xfer_pass_active = true;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT);\n  ASSERT_FALSE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state);\n}\n\nUTEST(cmdbuf_state, ready_to_submit_to_pending) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state = ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_PENDING);\n  ASSERT_TRUE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_PENDING, buf.state);\n}\n\nUTEST(cmdbuf_state, ready_to_pending) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state = ngfi::CMD_BUFFER_STATE_READY;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_PENDING);\n  ASSERT_TRUE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_PENDING, buf.state);\n}\n\nUTEST(cmdbuf_state, pending_to_submitted) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state = ngfi::CMD_BUFFER_STATE_PENDING;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_SUBMITTED);\n  ASSERT_TRUE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_SUBMITTED, buf.state);\n}\n\nUTEST(cmdbuf_state, submitted_to_ready) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state = ngfi::CMD_BUFFER_STATE_SUBMITTED;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY);\n  ASSERT_TRUE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state);\n}\n\nUTEST(cmdbuf_state, ready_to_ready) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state = ngfi::CMD_BUFFER_STATE_READY;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY);\n  ASSERT_TRUE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state);\n}\n\nUTEST(cmdbuf_state, ready_to_submit_to_recording) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state = ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_RECORDING);\n  ASSERT_TRUE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state);\n}\n\nUTEST(cmdbuf_state, cannot_transition_to_new) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state = ngfi::CMD_BUFFER_STATE_READY;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_NEW);\n  ASSERT_FALSE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state);  // State unchanged.\n}\n\nUTEST(cmdbuf_state, new_to_recording_fails) {\n  mock_cmd_buffer buf;\n  buf.reset();\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_RECORDING);\n  ASSERT_FALSE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_NEW, buf.state);\n}\n\nUTEST(cmdbuf_state, new_to_pending_fails) {\n  mock_cmd_buffer buf;\n  buf.reset();\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_PENDING);\n  ASSERT_FALSE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_NEW, buf.state);\n}\n\nUTEST(cmdbuf_state, recording_to_pending_fails) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state = ngfi::CMD_BUFFER_STATE_RECORDING;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_PENDING);\n  ASSERT_FALSE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state);\n}\n\nUTEST(cmdbuf_state, ready_to_submitted_fails) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state = ngfi::CMD_BUFFER_STATE_READY;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_SUBMITTED);\n  ASSERT_FALSE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state);\n}\n\nUTEST(cmdbuf_state, pending_to_ready_fails) {\n  mock_cmd_buffer buf;\n  buf.reset();\n  buf.state = ngfi::CMD_BUFFER_STATE_PENDING;\n\n  bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY);\n  ASSERT_FALSE(result);\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_PENDING, buf.state);\n}\n\nUTEST(cmdbuf_state, full_lifecycle) {\n  mock_cmd_buffer buf;\n  buf.reset();\n\n  // NEW -> READY\n  ASSERT_TRUE(ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY));\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state);\n\n  // READY -> RECORDING\n  ASSERT_TRUE(ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_RECORDING));\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state);\n\n  // RECORDING -> READY_TO_SUBMIT\n  ASSERT_TRUE(ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT));\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT, buf.state);\n\n  // READY_TO_SUBMIT -> PENDING\n  ASSERT_TRUE(ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_PENDING));\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_PENDING, buf.state);\n\n  // PENDING -> SUBMITTED\n  ASSERT_TRUE(ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_SUBMITTED));\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_SUBMITTED, buf.state);\n\n  // SUBMITTED -> READY (reuse)\n  ASSERT_TRUE(ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY));\n  ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state);\n}\n\nUTEST(chunked_list, append_single_element) {\n  ngfi::arena a(1024);\n  ngfi::chunked_list<int> list;\n\n  int* ptr = list.append(42, a);\n  ASSERT_NE(nullptr, ptr);\n  ASSERT_EQ(42, *ptr);\n}\n\nUTEST(chunked_list, append_multiple_elements) {\n  ngfi::arena a(1024);\n  ngfi::chunked_list<int> list;\n\n  for (int i = 0; i < 5; ++i) {\n    int* ptr = list.append(i * 10, a);\n    ASSERT_NE(nullptr, ptr);\n    ASSERT_EQ(i * 10, *ptr);\n  }\n}\n\nUTEST(chunked_list, iterate_elements) {\n  ngfi::arena a(1024);\n  ngfi::chunked_list<int> list;\n\n  list.append(1, a);\n  list.append(2, a);\n  list.append(3, a);\n\n  int sum = 0;\n  int count = 0;\n  for (auto it = list.begin(); !(it == list.end()); ++it) {\n    sum += *it;\n    ++count;\n  }\n\n  ASSERT_EQ(3, count);\n  ASSERT_EQ(6, sum);\n}\n\nUTEST(chunked_list, iterate_empty) {\n  ngfi::chunked_list<int> list;\n\n  int count = 0;\n  for (auto it = list.begin(); !(it == list.end()); ++it) {\n    ++count;\n  }\n\n  ASSERT_EQ(0, count);\n}\n\nUTEST(chunked_list, clear) {\n  ngfi::arena a(1024);\n  ngfi::chunked_list<int> list;\n\n  list.append(1, a);\n  list.append(2, a);\n  list.append(3, a);\n\n  list.clear();\n\n  int count = 0;\n  for (auto it = list.begin(); !(it == list.end()); ++it) {\n    ++count;\n  }\n\n  ASSERT_EQ(0, count);\n}\n\nUTEST(chunked_list, append_after_clear) {\n  ngfi::arena a(1024);\n  ngfi::chunked_list<int> list;\n\n  list.append(1, a);\n  list.append(2, a);\n  list.clear();\n\n  int* ptr = list.append(100, a);\n  ASSERT_NE(nullptr, ptr);\n  ASSERT_EQ(100, *ptr);\n\n  int count = 0;\n  int value = 0;\n  for (auto it = list.begin(); !(it == list.end()); ++it) {\n    value = *it;\n    ++count;\n  }\n\n  ASSERT_EQ(1, count);\n  ASSERT_EQ(100, value);\n}\n\nUTEST(chunked_list, spans_multiple_chunks) {\n  ngfi::arena a(4096);\n  // Use small chunk capacity to force multiple chunks.\n  ngfi::chunked_list<int, 3> list;\n\n  // Insert more elements than one chunk can hold.\n  for (int i = 0; i < 10; ++i) {\n    int* ptr = list.append(i, a);\n    ASSERT_NE(nullptr, ptr);\n    ASSERT_EQ(i, *ptr);\n  }\n\n  // Verify all elements are accessible via iteration.\n  int count = 0;\n  int sum = 0;\n  for (auto it = list.begin(); !(it == list.end()); ++it) {\n    sum += *it;\n    ++count;\n  }\n\n  ASSERT_EQ(10, count);\n  ASSERT_EQ(45, sum);  // 0+1+2+...+9 = 45\n}\n\nUTEST(chunked_list, iteration_order_preserved) {\n  ngfi::arena a(4096);\n  ngfi::chunked_list<int, 2> list;  // Very small chunks.\n\n  int expected[] = {10, 20, 30, 40, 50, 60, 70};\n  for (int val : expected) {\n    list.append(val, a);\n  }\n\n  int idx = 0;\n  for (auto it = list.begin(); !(it == list.end()); ++it) {\n    ASSERT_EQ(expected[idx], *it);\n    ++idx;\n  }\n  ASSERT_EQ(7, idx);\n}\n\nUTEST(chunked_list, struct_elements) {\n  struct point {\n    int x;\n    int y;\n  };\n\n  ngfi::arena a(1024);\n  ngfi::chunked_list<point> list;\n\n  point* p1 = list.append(point{1, 2}, a);\n  point* p2 = list.append(point{3, 4}, a);\n  point* p3 = list.append(point{5, 6}, a);\n\n  ASSERT_NE(nullptr, p1);\n  ASSERT_NE(nullptr, p2);\n  ASSERT_NE(nullptr, p3);\n\n  ASSERT_EQ(1, p1->x);\n  ASSERT_EQ(2, p1->y);\n  ASSERT_EQ(3, p2->x);\n  ASSERT_EQ(4, p2->y);\n  ASSERT_EQ(5, p3->x);\n  ASSERT_EQ(6, p3->y);\n}\n\nUTEST(chunked_list, const_iteration) {\n  ngfi::arena a(1024);\n  ngfi::chunked_list<int> list;\n\n  list.append(10, a);\n  list.append(20, a);\n  list.append(30, a);\n\n  const auto& const_list = list;\n\n  int sum = 0;\n  for (auto it = const_list.begin(); !(it == const_list.end()); ++it) {\n    sum += *it;\n  }\n\n  ASSERT_EQ(60, sum);\n}\n\nUTEST(chunked_list, exact_chunk_boundary) {\n  ngfi::arena a(4096);\n  ngfi::chunked_list<int, 5> list;  // Chunk capacity of 5.\n\n  // Insert exactly 5 elements (fills one chunk exactly).\n  for (int i = 0; i < 5; ++i) {\n    list.append(i, a);\n  }\n\n  int count = 0;\n  for (auto it = list.begin(); !(it == list.end()); ++it) {\n    ++count;\n  }\n  ASSERT_EQ(5, count);\n\n  // Insert one more to trigger new chunk.\n  list.append(5, a);\n\n  count = 0;\n  for (auto it = list.begin(); !(it == list.end()); ++it) {\n    ++count;\n  }\n  ASSERT_EQ(6, count);\n}\n\nUTEST(arena, default_construction) {\n  ngfi::arena a;\n  ASSERT_EQ(0u, a.total_allocated());\n  ASSERT_EQ(0u, a.total_used());\n}\n\nUTEST(arena, construction_with_capacity) {\n  ngfi::arena a(1024);\n  // No allocation until first alloc call.\n  ASSERT_EQ(0u, a.total_allocated());\n  ASSERT_EQ(0u, a.total_used());\n}\n\nUTEST(arena, alloc_basic) {\n  ngfi::arena a(1024);\n  void* ptr = a.alloc(64);\n  ASSERT_NE(nullptr, ptr);\n  ASSERT_GT(a.total_allocated(), 0u);\n  ASSERT_GT(a.total_used(), 0u);\n}\n\nUTEST(arena, alloc_typed_single) {\n  ngfi::arena a(1024);\n  int* ptr = a.alloc<int>();\n  ASSERT_NE(nullptr, ptr);\n  *ptr = 42;\n  ASSERT_EQ(42, *ptr);\n}\n\nUTEST(arena, alloc_typed_array) {\n  ngfi::arena a(1024);\n  int* arr = a.alloc<int>(10);\n  ASSERT_NE(nullptr, arr);\n\n  for (int i = 0; i < 10; ++i) {\n    arr[i] = i * 10;\n  }\n\n  for (int i = 0; i < 10; ++i) {\n    ASSERT_EQ(i * 10, arr[i]);\n  }\n}\n\nUTEST(arena, alloc_struct) {\n  struct test_struct {\n    int x;\n    float y;\n    char z;\n  };\n\n  ngfi::arena a(1024);\n  test_struct* ptr = a.alloc<test_struct>();\n  ASSERT_NE(nullptr, ptr);\n\n  ptr->x = 100;\n  ptr->y = 3.14f;\n  ptr->z = 'A';\n\n  ASSERT_EQ(100, ptr->x);\n  ASSERT_EQ(3.14f, ptr->y);\n  ASSERT_EQ('A', ptr->z);\n}\n\nUTEST(arena, multiple_allocations) {\n  ngfi::arena a(1024);\n\n  int* i1 = a.alloc<int>();\n  int* i2 = a.alloc<int>();\n  int* i3 = a.alloc<int>();\n\n  ASSERT_NE(nullptr, i1);\n  ASSERT_NE(nullptr, i2);\n  ASSERT_NE(nullptr, i3);\n\n  // Pointers should be different.\n  ASSERT_NE(i1, i2);\n  ASSERT_NE(i2, i3);\n  ASSERT_NE(i1, i3);\n\n  *i1 = 1;\n  *i2 = 2;\n  *i3 = 3;\n\n  ASSERT_EQ(1, *i1);\n  ASSERT_EQ(2, *i2);\n  ASSERT_EQ(3, *i3);\n}\n\nUTEST(arena, reset) {\n  ngfi::arena a(1024);\n\n  a.alloc<int>();\n  a.alloc<int>();\n  a.alloc<int>();\n\n  size_t used_before = a.total_used();\n  ASSERT_GT(used_before, 0u);\n\n  a.reset();\n\n  ASSERT_EQ(0u, a.total_used());\n  // Can allocate again after reset.\n  int* ptr = a.alloc<int>();\n  ASSERT_NE(nullptr, ptr);\n}\n\nUTEST(arena, reset_reuses_memory) {\n  ngfi::arena a(1024);\n\n  int* ptr1 = a.alloc<int>();\n  ASSERT_NE(nullptr, ptr1);\n  size_t allocated_after_first = a.total_allocated();\n\n  a.reset();\n\n  int* ptr2 = a.alloc<int>();\n  ASSERT_NE(nullptr, ptr2);\n\n  // Should reuse the same block, so total_allocated stays the same.\n  ASSERT_EQ(allocated_after_first, a.total_allocated());\n}\n\nUTEST(arena, grows_when_needed) {\n  ngfi::arena a(64);  // Small block size.\n\n  // Allocate more than one block can hold.\n  void* ptrs[20];\n  for (int i = 0; i < 20; ++i) {\n    ptrs[i] = a.alloc(32);\n    ASSERT_NE(nullptr, ptrs[i]);\n  }\n\n  // Should have grown.\n  ASSERT_GT(a.total_allocated(), 64u);\n}\n\nUTEST(arena, alignment_basic) {\n  ngfi::arena a(1024);\n\n  // Allocate with 16-byte alignment.\n  void* ptr = a.alloc_aligned(32, 16);\n  ASSERT_NE(nullptr, ptr);\n  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) % 16);\n}\n\nUTEST(arena, alignment_various) {\n  ngfi::arena a(4096);\n\n  for (size_t align = 1; align <= 128; align *= 2) {\n    void* ptr = a.alloc_aligned(16, align);\n    ASSERT_NE(nullptr, ptr);\n    ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) % align);\n  }\n}\n\nUTEST(arena, typed_alloc_alignment) {\n  struct alignas(32) aligned_struct {\n    char data[32];\n  };\n\n  ngfi::arena a(1024);\n  aligned_struct* ptr = a.alloc<aligned_struct>();\n  ASSERT_NE(nullptr, ptr);\n  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) % 32);\n}\n\nUTEST(arena, alloc_zero_size_returns_null) {\n  ngfi::arena a(1024);\n  void* ptr = a.alloc(0);\n  ASSERT_EQ(nullptr, ptr);\n}\n\nUTEST(arena, alloc_without_capacity_returns_null) {\n  ngfi::arena a;  // No capacity set.\n  void* ptr = a.alloc(64);\n  ASSERT_EQ(nullptr, ptr);\n}\n\nUTEST(arena, set_block_size) {\n  ngfi::arena a;\n  a.set_block_size(512);\n\n  void* ptr = a.alloc(64);\n  ASSERT_NE(nullptr, ptr);\n  ASSERT_GT(a.total_allocated(), 0u);\n}\n\nUTEST(arena, move_construction) {\n  ngfi::arena a1(1024);\n  int* ptr = a1.alloc<int>();\n  *ptr = 42;\n\n  size_t allocated = a1.total_allocated();\n  size_t used = a1.total_used();\n\n  ngfi::arena a2(ngfi::move(a1));\n\n  ASSERT_EQ(allocated, a2.total_allocated());\n  ASSERT_EQ(used, a2.total_used());\n  ASSERT_EQ(0u, a1.total_allocated());\n  ASSERT_EQ(0u, a1.total_used());\n\n  // Original pointer should still be valid.\n  ASSERT_EQ(42, *ptr);\n}\n\nUTEST(arena, total_used_tracks_allocations) {\n  ngfi::arena a(1024);\n\n  size_t used0 = a.total_used();\n  a.alloc<int>();\n  size_t used1 = a.total_used();\n  a.alloc<int>();\n  size_t used2 = a.total_used();\n\n  ASSERT_EQ(0u, used0);\n  ASSERT_GT(used1, used0);\n  ASSERT_GT(used2, used1);\n}\n\nUTEST(arena, large_allocation) {\n  ngfi::arena a(64);  // Small default block size.\n\n  // Request larger than default block size.\n  void* ptr = a.alloc(256);\n  ASSERT_NE(nullptr, ptr);\n  ASSERT_GE(a.total_allocated(), 256u);\n}\n\nUTEST(arena, many_small_allocations) {\n  ngfi::arena a(1024);\n\n  for (int i = 0; i < 100; ++i) {\n    char* ptr = a.alloc<char>();\n    ASSERT_NE(nullptr, ptr);\n    *ptr = static_cast<char>(i);\n  }\n\n  ASSERT_GE(a.total_used(), 100u);\n}\n\nUTEST (frame_token, encode_decode) {\n  const uint16_t  test_ctx_id              = 65534u;\n  const uint8_t   test_max_inflight_frames = 3u, test_frame_id = 255u;\n  const uintptr_t test_token =\n  ngfi_encode_frame_token(test_ctx_id, test_max_inflight_frames, test_frame_id);\n  ASSERT_EQ(test_ctx_id, ngfi_frame_ctx_id(test_token));\n  ASSERT_EQ(test_max_inflight_frames, ngfi_frame_max_inflight_frames(test_token));\n  ASSERT_EQ(test_frame_id, ngfi_frame_id(test_token));\n}\n"
  },
  {
    "path": "tests/vk-backend-tests.cpp",
    "content": "/**\n * Copyright (c) 2026 nicegraf contributors\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n// NOTE: this file is meant to be included at the bottom of the vulkan backend implementation in test mode!\n\n#include \"utest.h\"\n\nstatic ngfvk_sync_state empty_sync_state() {\n  ngfvk_sync_state result;\n  memset(&result, 0, sizeof(result));\n  result.layout = VK_IMAGE_LAYOUT_UNDEFINED;\n  return result;\n}\n\n#define test_stg_access_mask(expected_result, stages, accesses) { \\\n  const ngfvk_sync_barrier_masks m      = {accesses, stages}; \\\n  const uint32_t                 result = ngfvk_per_stage_access_mask(&m); \\\n  ASSERT_EQ(expected_result, result); \\\n}\n\n#define test_barrier(sync_state, dsm, dam, expected_src_stage_mask, expected_src_access_mask, expected_src_layout, expected_dst_layout) { \\\n  ngfvk_barrier_data bar; \\\n  const ngfvk_sync_req sync_req = {{dam, dsm}, expected_dst_layout}; \\\n  const bool barrier_necessary = \\\n      ngfvk_sync_barrier(sync_state, &sync_req, &bar); \\\n  const bool barrier_expected = \\\n      expected_src_stage_mask != 0 || (expected_src_layout != expected_dst_layout); \\\n  if (!barrier_expected) { \\\n    ASSERT_FALSE(barrier_necessary); \\\n  } else { \\\n    ASSERT_TRUE(barrier_necessary); \\\n    ASSERT_EQ(expected_src_stage_mask, bar.src_stage_mask); \\\n    ASSERT_EQ(expected_src_access_mask, bar.src_access_mask); \\\n    ASSERT_EQ(dsm, bar.dst_stage_mask); \\\n    ASSERT_EQ(dam, bar.dst_access_mask); \\\n    ASSERT_EQ(expected_src_layout, bar.src_layout); \\\n    ASSERT_EQ(expected_dst_layout, bar.dst_layout); \\\n  } \\\n}\n\n#define test_sync_req_merge(dst_req, src_req, success_expected, expected_stage_mask, expected_access_mask, expected_layout) { \\\n  const bool success = ngfvk_sync_req_merge(&dst_req, &src_req); \\\n  ASSERT_EQ(success_expected, success); \\\n  ASSERT_EQ(expected_stage_mask, dst_req.barrier_masks.stage_mask); \\\n  ASSERT_EQ(expected_access_mask, dst_req.barrier_masks.access_mask); \\\n  ASSERT_EQ(expected_layout, dst_req.layout); \\\n}\n\nUTEST(vk_sync, barrier_attrib_TwGr) {\n  ngfvk_sync_state sync_state = empty_sync_state();\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT,\n      0,\n      0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,\n      VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n}\n\nUTEST(vk_sync, barrier_index_TwGr) {\n  ngfvk_sync_state sync_state = empty_sync_state();\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT,\n      0,\n      0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,\n      VK_ACCESS_INDEX_READ_BIT,\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n}\n\nUTEST(vk_sync, barrier_texture_TwGr) {\n  ngfvk_sync_state sync_state = empty_sync_state();\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT,\n      VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n      0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n  test_barrier(\n      &sync_state,\n      (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT),\n      VK_ACCESS_SHADER_READ_BIT,\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);\n}\n\nUTEST(vk_sync, barrier_index_TwGrGr) {\n  ngfvk_sync_state sync_state = empty_sync_state();\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT,\n      VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n      0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n  test_barrier(\n      &sync_state,\n      (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT),\n      VK_ACCESS_SHADER_READ_BIT,\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);\n  test_barrier(\n      &sync_state,\n      (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT),\n      VK_ACCESS_SHADER_READ_BIT,\n      0,\n      0,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);\n}\n\nUTEST(vk_sync, barrier_texture_TwGrGrCrCr) {\n  ngfvk_sync_state sync_state = empty_sync_state();\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT,\n      VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n      0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n  test_barrier(\n      &sync_state,\n      (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT),\n      VK_ACCESS_SHADER_READ_BIT,\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);\n  test_barrier(\n      &sync_state,\n      (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT),\n      VK_ACCESS_SHADER_READ_BIT,\n      0,\n      0,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT,\n      (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT),\n      0,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT,\n      0,\n      0,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);\n}\n\nUTEST(vk_sync, barrier_texture_TwGrGwCr) {\n  ngfvk_sync_state sync_state = empty_sync_state();\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT,\n      VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n      0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n  test_barrier(\n      &sync_state,\n      (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT),\n      VK_ACCESS_SHADER_READ_BIT,\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);\n  test_barrier(\n      &sync_state,\n      (VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |\n       VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT),\n      (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |\n       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |\n       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT),\n      (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT),\n      VK_ACCESS_SHADER_READ_BIT,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,\n      VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT,\n      (VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |\n       VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT),\n      (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |\n       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |\n       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT),\n      VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);\n}\n\nUTEST(vk_sync, barrier_texture_GwTr) {\n  ngfvk_sync_state sync_state = empty_sync_state();\n  test_barrier(\n      &sync_state,\n      (VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |\n       VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT),\n      (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |\n       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |\n       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT),\n      VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n      0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_READ_BIT,\n      (VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |\n       VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT),\n      (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |\n       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |\n       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT),\n      VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,\n      VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);\n}\n\nUTEST(vk_sync, barrier_buffer_CwCw) {\n  ngfvk_sync_state sync_state = empty_sync_state();\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      0,\n      0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n}\n\nUTEST(vk_sync, barrier_buffer_GrCwCw) {\n  ngfvk_sync_state sync_state = empty_sync_state();\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,\n      VK_ACCESS_UNIFORM_READ_BIT,\n      0,\n      0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,\n      VK_ACCESS_UNIFORM_READ_BIT,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n}\n\nUTEST(vk_sync, barrier_buffer_GrCrCw) {\n  ngfvk_sync_state sync_state = empty_sync_state();\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,\n      VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,\n      0,\n      0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT,\n      0,\n      0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT),\n      (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT),\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n}\n\nUTEST(vk_sync, barrier_buffer_CwGrGr) {\n  ngfvk_sync_state sync_state = empty_sync_state();\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      0, 0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,\n      VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,\n      VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,\n      0,\n      0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n}\n\nUTEST(vk_sync, barrier_buffer_CwGrCr) {\n  ngfvk_sync_state sync_state = empty_sync_state();\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      0,\n      0,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n  test_barrier(\n      &sync_state,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_UNDEFINED,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n}\n\nUTEST(vk_sync, req_merge_concurrent_reads) {\n  ngfvk_sync_req dst_req = {{0, 0}, VK_IMAGE_LAYOUT_UNDEFINED};\n  static const ngfvk_sync_req src_reqs[] = {\n      {.barrier_masks =\n           {.access_mask = VK_ACCESS_SHADER_READ_BIT,\n            .stage_mask  = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT},\n       .layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL},\n      {.barrier_masks =\n           {.access_mask = VK_ACCESS_SHADER_READ_BIT,\n            .stage_mask  = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT},\n       .layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}};\n  test_sync_req_merge(\n      dst_req,\n      src_reqs[0],\n      true,\n      VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);\n  test_sync_req_merge(\n      dst_req,\n      src_reqs[1],\n      true,\n      VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT,\n      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);\n}\n\nUTEST(vk_sync, req_merge_write) {\n  ngfvk_sync_req dst_req = {{0, 0}, VK_IMAGE_LAYOUT_UNDEFINED};\n  static const ngfvk_sync_req sync_reqs[] = {\n      {.barrier_masks =\n           {.access_mask = VK_ACCESS_SHADER_WRITE_BIT,\n            .stage_mask  = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT},\n       .layout = VK_IMAGE_LAYOUT_UNDEFINED},\n  };\n  test_sync_req_merge(\n      dst_req,\n      sync_reqs[0],\n      true,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_UNDEFINED);\n}\n\nUTEST(vk_sync, req_merge_write_write) {\n  ngfvk_sync_req dst_req = {{0, 0}, VK_IMAGE_LAYOUT_UNDEFINED};\n  static const ngfvk_sync_req sync_reqs[] = {\n      {.barrier_masks =\n           {.access_mask = VK_ACCESS_SHADER_WRITE_BIT,\n            .stage_mask  = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT},\n       .layout = VK_IMAGE_LAYOUT_GENERAL},\n     {.barrier_masks =\n           {.access_mask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,\n            .stage_mask  = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT},\n       .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}\n  };\n  test_sync_req_merge(\n      dst_req,\n      sync_reqs[0],\n      true,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_GENERAL);\n  test_sync_req_merge(\n      dst_req,\n      sync_reqs[1],\n      false,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_GENERAL);\n}\n\nUTEST(vk_sync, req_merge_write_read) {\n  ngfvk_sync_req dst_req = {{0, 0}, VK_IMAGE_LAYOUT_UNDEFINED};\n  static const ngfvk_sync_req sync_reqs[] = {\n      {.barrier_masks =\n           {.access_mask = VK_ACCESS_SHADER_WRITE_BIT,\n            .stage_mask  = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT},\n       .layout = VK_IMAGE_LAYOUT_GENERAL},\n     {.barrier_masks =\n           {.access_mask = VK_ACCESS_SHADER_READ_BIT,\n            .stage_mask  = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT},\n       .layout = VK_IMAGE_LAYOUT_GENERAL}\n  };\n  test_sync_req_merge(\n      dst_req,\n      sync_reqs[0],\n      true,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_GENERAL);\n  test_sync_req_merge(\n      dst_req,\n      sync_reqs[1],\n      true,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT,\n      VK_IMAGE_LAYOUT_GENERAL);\n}\n\nUTEST(vk_sync, req_merge_read_write) {\n  ngfvk_sync_req dst_req = {{0, 0}, VK_IMAGE_LAYOUT_UNDEFINED};\n  static const ngfvk_sync_req sync_reqs[] = {\n     {.barrier_masks =\n           {.access_mask = VK_ACCESS_SHADER_READ_BIT,\n            .stage_mask  = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT},\n       .layout = VK_IMAGE_LAYOUT_GENERAL},\n     {.barrier_masks =\n           {.access_mask = VK_ACCESS_SHADER_WRITE_BIT,\n            .stage_mask  = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT},\n       .layout = VK_IMAGE_LAYOUT_GENERAL}\n  };\n  test_sync_req_merge(\n      dst_req,\n      sync_reqs[0],\n      true,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT,\n      VK_IMAGE_LAYOUT_GENERAL);\n  test_sync_req_merge(\n      dst_req,\n      sync_reqs[1],\n      true,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,\n      VK_IMAGE_LAYOUT_GENERAL);\n}\n\nUTEST(vk_sync, req_merge_layout_change) {\n  ngfvk_sync_req dst_req = {{0, 0}, VK_IMAGE_LAYOUT_UNDEFINED};\n  static const ngfvk_sync_req sync_reqs[] = {\n     {.barrier_masks =\n           {.access_mask = VK_ACCESS_SHADER_READ_BIT,\n            .stage_mask  = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT},\n       .layout = VK_IMAGE_LAYOUT_GENERAL},\n     {.barrier_masks =\n           {.access_mask = VK_ACCESS_SHADER_READ_BIT,\n            .stage_mask  = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT},\n       .layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}\n  };\n  test_sync_req_merge(\n      dst_req,\n      sync_reqs[0],\n      true,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT,\n      VK_IMAGE_LAYOUT_GENERAL);\n  test_sync_req_merge(\n      dst_req,\n      sync_reqs[1],\n      true,\n      VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT,\n      VK_IMAGE_LAYOUT_GENERAL);\n}\n\nUTEST(vk_sync, stg_access_map) {\n#define BITMASK3x8(b7, b6, b5, b4, b3, b2, b1, b0) (((b7) << 21) | ((b6) << 18) | ((b5) << 15) | ((b4) << 12) | ((b3) << 9) | ((b2) << 6) | ((b1) << 3) | (b0) )\n  // clang-format: off\n  test_stg_access_mask(\n      BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b001),\n      VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,\n      VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT);\n  test_stg_access_mask(\n      BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b010),\n      VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,\n      VK_ACCESS_INDEX_READ_BIT);\n  test_stg_access_mask(\n      BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b001, 0b000),\n      VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT);\n  test_stg_access_mask(\n      BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b101, 0b000),\n      VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT);\n  test_stg_access_mask(\n      BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b101, 0b101, 0b000),\n      VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT);\n  test_stg_access_mask(\n      BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b101, 0b101, 0b000),\n      VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);\n  test_stg_access_mask(\n      BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b111, 0b101, 0b101, 0b000),\n      VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,\n      VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);\n  test_stg_access_mask(\n      BITMASK3x8(0b000, 0b000, 0b011, 0b011, 0b000, 0b000, 0b000, 0b000),\n      VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,\n      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);\n  test_stg_access_mask(\n      BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000),\n      VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,\n      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);\n  test_stg_access_mask(\n      BITMASK3x8(0b000, 0b011, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000),\n      VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,\n      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);\n  test_stg_access_mask(\n      BITMASK3x8(0b001, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000),\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_READ_BIT);\n  test_stg_access_mask(\n      BITMASK3x8(0b010, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000),\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT);\n  test_stg_access_mask(\n      BITMASK3x8(0b011, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000),\n      VK_PIPELINE_STAGE_TRANSFER_BIT,\n      VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT);\n  test_stg_access_mask(\n      BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000),\n      VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,\n      VK_ACCESS_SHADER_READ_BIT);\n#undef BITMASK3x8\n  // clang-format: on\n}\n\nUTEST_MAIN()\n"
  }
]