Repository: nicebyte/nicegraf Branch: master Commit: 4f53c5b2ee47 Files: 114 Total size: 3.8 MB Directory structure: gitextract_ofuavp55/ ├── .clang-format ├── .github/ │ └── workflows/ │ └── tests.yml ├── .gitignore ├── CMakeLists.txt ├── README.md ├── build-samples.bat ├── build-samples.sh ├── build-utils.cmake ├── deps/ │ ├── SPIRV-reflect/ │ │ ├── CMakeLists.txt │ │ ├── include/ │ │ │ └── spirv/ │ │ │ └── unified1/ │ │ │ └── spirv.h │ │ ├── spirv_reflect.c │ │ └── spirv_reflect.h │ ├── metal-cpp/ │ │ └── MetalSingleHeader.hpp │ ├── renderdoc/ │ │ └── renderdoc_app.h │ ├── utest/ │ │ └── utest.h │ ├── vma/ │ │ ├── CHANGELOG.md │ │ ├── CMakeLists.txt │ │ ├── LICENSE.txt │ │ ├── README.md │ │ └── src/ │ │ ├── vk_mem_alloc.cpp │ │ └── vk_mem_alloc.h │ └── vulkan-headers/ │ └── vulkan/ │ ├── vk_platform.h │ ├── vulkan.h │ ├── vulkan_core.h │ ├── vulkan_ios.h │ ├── vulkan_macos.h │ ├── vulkan_metal.h │ ├── vulkan_win32.h │ └── vulkan_xcb.h ├── docs/ │ └── logo.xcf ├── include/ │ ├── nicegraf-mtl-handles.h │ ├── nicegraf-util.h │ ├── nicegraf-vk-handles.h │ ├── nicegraf-wrappers.h │ └── nicegraf.h ├── misc/ │ ├── common/ │ │ ├── CMakeLists.txt │ │ ├── check.h │ │ ├── file-utils.cpp │ │ ├── file-utils.h │ │ ├── logging.h │ │ ├── mesh-loader.cpp │ │ ├── mesh-loader.h │ │ ├── shader-loader.cpp │ │ ├── shader-loader.h │ │ ├── targa-loader.cpp │ │ └── targa-loader.h │ └── shaders.cmake ├── samples/ │ ├── 00-template/ │ │ └── sample-impl.cpp │ ├── 01-fullscreen-triangle/ │ │ └── fullscreen-triangle.cpp │ ├── 02-render-to-texture/ │ │ └── render-to-texture.cpp │ ├── 03-uniform-buffers/ │ │ └── uniform-buffers.cpp │ ├── 04-texture-sampling/ │ │ └── texture-sampling.cpp │ ├── 05-cubemap/ │ │ └── cubemap.cpp │ ├── 06-vertex-attribs/ │ │ └── vertex-attribs.cpp │ ├── 07-blinn-phong/ │ │ └── blinn-phong.cpp │ ├── 08-image-arrays/ │ │ └── image-arrays.cpp │ ├── 09-volume-rendering/ │ │ └── volume-rendering.cpp │ ├── 0a-compute-demo/ │ │ └── compute-demo.cpp │ ├── 0a-compute-mandelbrot/ │ │ └── compute-mandelbrot.cpp │ ├── 0b-compute-vertices/ │ │ └── compute-vertices.cpp │ ├── 0c-render-to-multisample-texture/ │ │ └── render-to-multisample-texture.cpp │ ├── common/ │ │ ├── camera-controller.cpp │ │ ├── camera-controller.h │ │ ├── diagnostic-callback.cpp │ │ ├── diagnostic-callback.h │ │ ├── imgui-backend.cpp │ │ ├── imgui-backend.h │ │ ├── main.cpp │ │ ├── platform/ │ │ │ └── macos/ │ │ │ ├── glfw-cocoa-contentview.h │ │ │ └── glfw-cocoa-contentview.mm │ │ ├── sample-interface.h │ │ ├── staging-image.cpp │ │ └── staging-image.h │ └── shaders/ │ ├── blinn-phong.hlsl │ ├── compute-demo.hlsl │ ├── compute-vertices.hlsl │ ├── cubemap.hlsl │ ├── fullscreen-triangle.hlsl │ ├── generic-frag-shader-input.hlsl │ ├── imgui.hlsl │ ├── instancing.hlsl │ ├── polygon.hlsl │ ├── quad.hlsl │ ├── quat.hlsl │ ├── simple-texture.hlsl │ ├── textured-quad.hlsl │ ├── triangle.hlsl │ └── volume-renderer.hlsl ├── source/ │ ├── ngf-common/ │ │ ├── arena.cpp │ │ ├── arena.h │ │ ├── array.h │ │ ├── chunked-list.h │ │ ├── cmdbuf-state.h │ │ ├── create-destroy.cpp │ │ ├── default-arenas.cpp │ │ ├── default-arenas.h │ │ ├── frame-token.h │ │ ├── hashtable.h │ │ ├── internal.cpp │ │ ├── macros.h │ │ ├── silence.h │ │ ├── unique-ptr.h │ │ ├── util.c │ │ ├── util.h │ │ └── value-or-error.h │ ├── ngf-mtl/ │ │ ├── impl.cpp │ │ └── layer.mm │ └── ngf-vk/ │ ├── ca-metal-layer.mm │ ├── impl.cpp │ ├── vk_10.c │ └── vk_10.h └── tests/ ├── arena-alloc-tests.cpp ├── common-tests.cpp └── vk-backend-tests.cpp ================================================ FILE CONTENTS ================================================ ================================================ FILE: .clang-format ================================================ AccessModifierOffset: 0 AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: true AlignConsecutiveDeclarations: true AlignConsecutiveMacros: true AlignEscapedNewlines: true AlignOperands: true AlignTrailingComments: true AllowAllArgumentsOnNextLine: false AllowAllConstructorInitializersOnNextLine: false AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: true AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: false AllowShortIfStatementsOnASingleLine: true AllowShortLambdasOnASingleLine: true AllowShortLoopsOnASingleLine: true AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Attach BreakBeforeTernaryOperators: true BreakConstructorInitializers: BeforeColon BreakStringLiterals: true ColumnLimit: 100 CompactNamespaces: false ConstructorInitializerAllOnOneLineOrOnePerLine: true ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true DeriveLineEnding: true DerivePointerAlignment: false FixNamespaceComments: true IncludeBlocks: Regroup IndentCaseLabels: false IndentGotoLabels: false IndentWidth: 2 KeepEmptyLinesAtTheStartOfBlocks: false MaxEmptyLinesToKeep: 1 NamespaceIndentation: None PointerAlignment: Left ReflowComments: true SortIncludes: true SortUsingDeclarations: true SpaceAfterCStyleCast: false SpaceAfterTemplateKeyword: false SpaceBeforeAssignmentOperators: true SpaceBeforeCpp11BracedList: true SpaceBeforeCtorInitializerColon: true SpaceBeforeParens: true SpaceBeforeInheritanceColon: true SpaceBeforeRangeBasedForLoopColon: true SpaceBeforeSquareBrackets: false SpaceInEmptyBlock: false SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 2 SpacesInAngles: false SpacesInCStyleCastParentheses: false SpacesInConditionalStatement: false SpacesInContainerLiterals: false SpacesInSquareBrackets: false TabWidth: 2 UseCRLF: false UseTab: Never ================================================ FILE: .github/workflows/tests.yml ================================================ name: Run tests on: push: branches: [ master ] pull_request: branches: [ master ] env: CC: /usr/bin/clang CXX: /usr/bin/clang++ jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: update_apt run: sudo apt-get update - name: install_deps run: sudo apt install libx11-xcb-dev - name: make_build_dir run: mkdir -p build - name: run_cmake run: cd ./build && cmake .. -DNGF_BUILD_TESTS=yes - name: make run: cd ./build && make vk-backend-tests - name: test run: ./build/vk-backend-tests ================================================ FILE: .gitignore ================================================ build/* tests/build/* docs/doxygen/html docs/doxygen/xml docs/doxygen/latex tests/ngf_tests samples/binaries/* samples/deps/niceshade/* /out/build/x64-Debug /.vs samples-build-files/* .gitmodules .idea cmake-build-debug-visual-studio docs/doxygen **/.DS_Store ================================================ FILE: CMakeLists.txt ================================================ #[[ Copyright (c) 2026 nicegraf contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ]] cmake_minimum_required(VERSION 3.24) project(nicegraf) set(CMAKE_C_STANDARD 99) set(CMAKE_CXX_STANDARD 20) include("${CMAKE_CURRENT_LIST_DIR}/build-utils.cmake") # These are the compiler flags that are used on all nicegraf targets. if(MSVC) set(NICEMAKE_COMMON_COMPILE_OPTS "/W4") else() set(NICEMAKE_COMMON_COMPILE_OPTS "-Wall" "-Wconversion" "-Wno-unknown-pragmas" "-Wno-error=comment") endif() if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") list(APPEND NICEMAKE_COMMON_COMPILE_OPTS "-Wno-unknown-warning-option" "-Wno-missing-designated-field-initializers") endif() set(NICEGRAF_COMMON_DEPS nicegraf-internal) # A library with various utilities shared internally across different backends. nmk_static_library(NAME nicegraf-internal SRCS ${CMAKE_CURRENT_LIST_DIR}/include/nicegraf.h ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/macros.h ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/unique-ptr.h ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/value-or-error.h ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/cmdbuf-state.h ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/internal.cpp ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/util.h ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/arena.h ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/arena.cpp ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/default-arenas.h ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/default-arenas.cpp ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/chunked-list.h ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/hashtable.h ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/array.h) # nicegraf utility library. nmk_static_library(NAME nicegraf-util SRCS ${CMAKE_CURRENT_LIST_DIR}/include/nicegraf-util.h ${CMAKE_CURRENT_LIST_DIR}/source/ngf-common/util.c DEPS nicegraf-internal) if (APPLE) find_library(APPLE_METAL Metal) find_library(APPLE_QUARTZ QuartzCore) find_library(APPLE_COREGRAPHICS CoreGraphics) find_library(APPLE_COCOA Cocoa) find_library(APPLE_UIKIT UIKit) endif() if (APPLE AND NOT (NGF_USE_MVK STREQUAL "yes")) # Nicegraf with native Metal backend. set(APPLE_LIBS ${APPLE_METAL} ${APPLE_QUARTZ} ${APPLE_COREGRAPHICS}) if (APPLE_COCOA) set(APPLE_LIBS ${APPLE_LIBS} ${APPLE_COCOA}) # macOS else() set(APPLE_LIBS ${APPLE_LIBS} ${UIKit}) # iOS endif() nmk_static_library(NAME nicegraf-mtl SRCS ${CMAKE_CURRENT_LIST_DIR}/include/nicegraf.h ${CMAKE_CURRENT_LIST_DIR}/include/nicegraf-mtl-handles.h ${CMAKE_CURRENT_LIST_DIR}/source/ngf-mtl/impl.cpp ${CMAKE_CURRENT_LIST_DIR}/source/ngf-mtl/layer.mm DEPS ${NICEGRAF_COMMON_DEPS} ${APPLE_LIBS} PVT_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/deps/metal-cpp COPTS "-fobjc-arc") else() nmk_header_library(NAME nicegraf-vk-headers PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/deps/vulkan-headers) nmk_header_library(NAME nicegraf-renderdoc PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/deps/renderdoc) # Import VMA for handling vulkan memory allocation. add_definitions("-DVMA_STATIC_VULKAN_FUNCTIONS=0") add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/deps/vma) add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/deps/SPIRV-reflect) # Some vulkan-specific dependencies. set(NICEGRAF_VK_DEPS vma spvreflect nicegraf-vk-headers nicegraf-renderdoc) if (NOT WIN32 AND NOT APPLE) set(NICEGRAF_VK_DEPS ${NICEGRAF_VK_DEPS} xcb) elseif (APPLE) set(NICEGRAF_VK_DEPS ${NICEGRAF_VK_DEPS} ${APPLE_QUARTZ}) endif() set(NICEGRAF_VK_DEPS ${NICEGRAF_VK_DEPS} ${NICEGRAF_COMMON_DEPS}) set(NICEGRAF_VK_SRCS ${CMAKE_CURRENT_LIST_DIR}/include/nicegraf.h ${CMAKE_CURRENT_LIST_DIR}/source/ngf-vk/impl.cpp ${CMAKE_CURRENT_LIST_DIR}/source/ngf-vk/vk_10.c) if (NGF_USE_MVK STREQUAL "yes") set(NICEGRAF_VK_SRCS ${NICEGRAF_VK_SRCS} ${CMAKE_CURRENT_LIST_DIR}/source/ngf-vk/ca-metal-layer.mm) endif() # Vulkan backend. nmk_static_library(NAME nicegraf-vk SRCS ${NICEGRAF_VK_SRCS} PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/include DEPS ${NICEGRAF_VK_DEPS}) if (NGF_BUILD_TESTS STREQUAL "yes") nmk_binary(NAME vk-backend-tests SRCS ${NICEGRAF_VK_SRCS} DEPS utest ${NICEGRAF_VK_DEPS} PVT_DEFINES NGFVK_TEST_MODE) set_target_properties(vk-backend-tests PROPERTIES COMPILE_WARNING_AS_ERROR NO) endif() endif() # Build tests only if explicitly requested. if (NGF_BUILD_TESTS STREQUAL "yes") nmk_header_library(NAME utest PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/deps/utest) nmk_binary(NAME common-tests SRCS ${CMAKE_CURRENT_LIST_DIR}/tests/common-tests.cpp DEPS utest nicegraf-internal "$>,pthread,>") endif() # Build samples only if explicitly requested. if (NGF_BUILD_SAMPLES STREQUAL "yes") add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/misc/common) add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/samples/deps/glfw) # Ensure we have the required dependencies are available. if (TARGET glfw) set_target_properties(glfw PROPERTIES FOLDER "samples") else() message(FATAL_ERROR "Dependencies required for building samples not found. Make sure to run `git submodule update` from the repo root.") endif() # Determine the backend to build the samples with based on the platform, and # any platform-specific source files. set(NGF_PLATFORM_SOURCE "") if(APPLE) set(NGF_PLATFORM_SOURCE ${NGF_PLATFORM_SOURCE} ${CMAKE_CURRENT_LIST_DIR}/samples/common/platform/macos/glfw-cocoa-contentview.mm ${CMAKE_CURRENT_LIST_DIR}/samples/common/platform/macos/glfw-cocoa-contentview.h) endif() # Set the folder to hold all samples binaries. set(NGF_SAMPLES_OUTPUT_DIR ${CMAKE_CURRENT_LIST_DIR}/samples/binaries) # Custom target for generated shaders. file(GLOB shader_files ${CMAKE_CURRENT_LIST_DIR}/samples/shaders/*.hlsl) include(${CMAKE_CURRENT_LIST_DIR}/misc/shaders.cmake) ngf_shaders_target(NAME sample-shaders OUTPUT_DIR ${NGF_SAMPLES_OUTPUT_DIR}/shaders NICESHADE_PATH ${CMAKE_CURRENT_LIST_DIR}/samples/deps/niceshade/${NICESHADE_PLATFORM} SRCS ${shader_files}) set_target_properties(sample-shaders PROPERTIES FOLDER "samples") set(NGF_IMGUI_SRCS ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui_draw.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui_tables.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui_widgets.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui_demo.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/imgui.h ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/backends/imgui_impl_glfw.h ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/backends/imgui_impl_glfw.cpp) if(MSVC) set(NGF_IMGUI_COPTS "") else() # Turn off reporting warnings as errors for ImGui on gcc/clang, because it has a lot of them. set(NGF_IMGUI_COPTS "-Wno-error") endif() nmk_static_library(NAME ngf-imgui SRCS ${NGF_IMGUI_SRCS} DEPS glfw PVT_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui/backends PVT_DEFINES "GLFW_INCLUDE_NONE" COPTS ${NGF_IMGUI_COPTS}) nmk_static_library(NAME ngf-samples-common SRCS ${CMAKE_CURRENT_LIST_DIR}/samples/common/main.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/common/diagnostic-callback.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/common/sample-interface.h ${CMAKE_CURRENT_LIST_DIR}/samples/common/diagnostic-callback.h ${CMAKE_CURRENT_LIST_DIR}/samples/common/imgui-backend.h ${CMAKE_CURRENT_LIST_DIR}/samples/common/imgui-backend.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/common/staging-image.h ${CMAKE_CURRENT_LIST_DIR}/samples/common/staging-image.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/common/camera-controller.h ${CMAKE_CURRENT_LIST_DIR}/samples/common/camera-controller.cpp ${NGF_PLATFORM_SOURCE} DEPS ngf-imgui glfw nicegraf-misc-common PVT_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/samples/deps/nicemath ${CMAKE_CURRENT_LIST_DIR}/samples/common PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/samples/common ${CMAKE_CURRENT_LIST_DIR}/samples/deps/nicemath) set_target_properties(ngf-samples-common PROPERTIES FOLDER "samples") function (ngf_sample) cmake_parse_arguments(SAMPLE "" "NAME" "" ${ARGN}) file(GLOB_RECURSE SAMPLE_SRCS ${CMAKE_CURRENT_LIST_DIR}/samples/${SAMPLE_NAME}/*.cpp) nmk_binary(NAME ${SAMPLE_NAME} SRCS ${SAMPLE_SRCS} DEPS nicegraf ngf-samples-common nicegraf-misc-common nicegraf-util ngf-imgui PVT_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/samples/${SAMPLE_NAME} PVT_DEFINES "GLFW_INCLUDE_NONE" OUTPUT_DIR "${NGF_SAMPLES_OUTPUT_DIR}") add_dependencies(${SAMPLE_NAME} sample-shaders) set_target_properties(${SAMPLE_NAME} PROPERTIES FOLDER "samples") endfunction() file(MAKE_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/samples/binaries/shaders) ngf_sample(NAME 01-fullscreen-triangle) ngf_sample(NAME 02-render-to-texture) ngf_sample(NAME 03-uniform-buffers) ngf_sample(NAME 04-texture-sampling) ngf_sample(NAME 05-cubemap) ngf_sample(NAME 06-vertex-attribs) ngf_sample(NAME 07-blinn-phong) ngf_sample(NAME 08-image-arrays) ngf_sample(NAME 09-volume-rendering) ngf_sample(NAME 0a-compute-mandelbrot) ngf_sample(NAME 0b-compute-vertices) ngf_sample(NAME 0c-render-to-multisample-texture) endif() # Build image tests only if explicitly requested. # These tests run samples headlessly and compare rendered output against golden images. # Requires NGF_BUILD_SAMPLES=yes since image tests depend on sample shaders and utilities. if (NGF_BUILD_IMAGE_TESTS STREQUAL "yes") if (NOT NGF_BUILD_SAMPLES STREQUAL "yes") message(FATAL_ERROR "NGF_BUILD_IMAGE_TESTS requires NGF_BUILD_SAMPLES=yes") endif() # Sample utility sources needed by image tests (excluding main.cpp and factory.cpp) set(IMAGE_TEST_SAMPLE_UTILS ${CMAKE_CURRENT_LIST_DIR}/samples/common/staging-image.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/common/camera-controller.cpp ) # All sample sources (excluding factory.cpp which is only for interactive samples) set(IMAGE_TEST_SAMPLE_SOURCES ${CMAKE_CURRENT_LIST_DIR}/samples/01-fullscreen-triangle/fullscreen-triangle.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/02-render-to-texture/render-to-texture.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/03-uniform-buffers/uniform-buffers.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/04-texture-sampling/texture-sampling.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/05-cubemap/cubemap.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/06-vertex-attribs/vertex-attribs.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/07-blinn-phong/blinn-phong.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/08-image-arrays/image-arrays.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/09-volume-rendering/volume-rendering.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/0a-compute-mandelbrot/compute-mandelbrot.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/0b-compute-vertices/compute-vertices.cpp ${CMAKE_CURRENT_LIST_DIR}/samples/0c-render-to-multisample-texture/render-to-multisample-texture.cpp ) # Image test sources set(IMAGE_TEST_SOURCES ${CMAKE_CURRENT_LIST_DIR}/tests/image-tests/image-test-main.cpp ${CMAKE_CURRENT_LIST_DIR}/tests/image-tests/headless-harness.cpp ${CMAKE_CURRENT_LIST_DIR}/tests/image-tests/image-comparator.cpp ${IMAGE_TEST_SAMPLE_UTILS} ${IMAGE_TEST_SAMPLE_SOURCES} ) # Ensure golden images directory exists file(MAKE_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/tests/golden) # Create unified image test executable nmk_binary(NAME image-tests SRCS ${IMAGE_TEST_SOURCES} DEPS nicegraf nicegraf-util nicegraf-misc-common ngf-imgui PVT_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/tests ${CMAKE_CURRENT_LIST_DIR}/tests/image-tests ${CMAKE_CURRENT_LIST_DIR}/samples/common ${CMAKE_CURRENT_LIST_DIR}/samples/deps/nicemath ${CMAKE_CURRENT_LIST_DIR}/samples/deps/imgui OUTPUT_DIR "${NGF_SAMPLES_OUTPUT_DIR}") add_dependencies(image-tests sample-shaders) set_target_properties(image-tests PROPERTIES FOLDER "tests") endif() ================================================ FILE: README.md ================================================ nicegraf ======== ![Run tests](https://github.com/nicebyte/nicegraf/workflows/Run%20tests/badge.svg)

An abstraction layer for GPU APIs.

Discord · Reference Documentation · Sample Code

# platform support matrix | | 🟦 | 🐧 | 🍏 | |---|---|---|---| | 🌋 | 🟩 | 🟩 | 🟨 | | 🤘 | 🟥 | 🟥 | 🟩 | # credits ## current maintainers * nicebyte · [@nice_byte](http://twitter.com/nice_byte) * Bagrat 'dBuger' Dabaghyan · [@dBagrat](http://twitter.com/dBagrat) * Andranik 'HedgeTheHog' Melikyan · [@andranik3949](http://twitter.com/andranik3949) ## dependencies * The Vulkan backend uses SPIRV-Reflect, maintained by the Khronos Group, and the Vulkan Memory Allocator, maintained by AMD. * The sample code uses GLFW, maintained by Camilla Berglund, and ImGui, maintained by Omar Cornut. ================================================ FILE: build-samples.bat ================================================ @echo off echo Downloading binary dependencies and data for samples... powershell -Command "(New-Object Net.WebClient).DownloadFile('https://github.com/nicebyte/nicegraf/releases/download/v0.1.1/nicegraf-samples-data.zip', 'nicegraf-samples-data.zip')" || (exit /b) echo Unpacking binary dependencies and data for samples... powershell -Command "Expand-Archive -Force nicegraf-samples-data.zip ." || (exit /b) echo Removing temporary files... del nicegraf-samples-data.zip || (exit /b) echo Downloading library dependencies for samples... git submodule init || (exit /b) git submodule update || (exit /b) echo Setting up folder for build files... if not exist ".\samples-build-files" mkdir samples-build-files || (exit /b) cd samples-build-files || (exit /b) echo Generating build files... cmake .. -DNGF_BUILD_SAMPLES="yes" || (exit /b) echo Finished successfully! pause ================================================ FILE: build-samples.sh ================================================ #!/bin/bash set -e echo "Downloading binary dependencies for samples..." curl https://github.com/nicebyte/nicegraf/releases/download/v0.1.1/nicegraf-samples-data.zip -fL -o nicegraf-samples-data.zip echo "Unpacking binary dependencies and data for samples..." unzip -u nicegraf-samples-data.zip chmod +x ./samples/deps/niceshade/macos/niceshade chmod +x ./samples/deps/niceshade/linux/niceshade echo "Removing temporary files..." rm -rf nicegraf-samples-data.zip echo "Downloading library dependencies for samples..." git submodule init git submodule update echo "Setting up folder for build files..." mkdir -p samples-build-files cd samples-build-files echo "Generating build files..." if [ "`uname -s`" = "Darwin" ]; then NGF_GENERATOR="-GXcode" else NGF_GENERATOR= fi cmake .. -DNGF_BUILD_SAMPLES="yes" -DNGF_BUILD_TESTS="yes" ${NGF_GENERATOR} $@ cd .. echo "Finished successfully!" ================================================ FILE: build-utils.cmake ================================================ #[[ Copyright (c) 2022 nicegraf contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ]] # This function adds a new target and sets some configuration options for it. # Parameters: # TYPE - type of the target. Must be one of: # - `lib`, for a static library; # - `hdr`, for a header-only library; # - `exe`, for an executable binary. # SRCS - a list of source files for the target. # COPTS - a list of compiler options. # PVT_INCLUDES - a list of paths to add to this target's include paths. # PUB_INCLUDES - a list of paths to add to the include paths of all targets depending on this target. # PVT_DEFINES - a list of preprocessor definitions to add for this target. # PUB_DEFINES - a list of preprocessor definitions to add to all targets depending on this target. # OUTPUT_DIR - the path to the folder where the output for this target shall be stored. function (nmk_target) cmake_parse_arguments(TGT "" "NAME;TYPE" "SRCS;DEPS;COPTS;PUB_INCLUDES;PVT_INCLUDES;PUB_DEFINES;PUB_DEPS;PVT_DEFINES;OUTPUT_DIR;VS_DEBUGGER_WORKING_DIR" ${ARGN}) if (TGT_TYPE STREQUAL "lib") add_library(${TGT_NAME} STATIC ${TGT_SRCS}) elseif(TGT_TYPE STREQUAL "hdr") add_library(${TGT_NAME} INTERFACE ${TGT_SRCS}) elseif(TGT_TYPE STREQUAL "exe") add_executable(${TGT_NAME} ${TGT_SRCS}) else() message(FATAL_ERROR "invalid target type") endif() # Add dependencies. if ( TGT_DEPS ) target_link_libraries(${TGT_NAME} PRIVATE ${TGT_DEPS}) endif() if ( TGT_PUB_DEPS ) target_link_libraries(${TGT_NAME} INTERFACE ${TGT_PUB_DEPS}) endif() # Add include directories. if ( TGT_PUB_INCLUDES ) target_include_directories(${TGT_NAME} INTERFACE ${TGT_PUB_INCLUDES}) endif() if ( TGT_PVT_INCLUDES ) target_include_directories(${TGT_NAME} PRIVATE ${TGT_PVT_INCLUDES}) endif() if ( NOT ( TGT_TYPE STREQUAL "hdr" ) ) target_include_directories(${TGT_NAME} PRIVATE ${CMAKE_CURRENT_LIST_DIR}/source ${CMAKE_CURRENT_LIST_DIR}/include) endif() target_include_directories(${TGT_NAME} INTERFACE ${CMAKE_CURRENT_LIST_DIR}/include) # Add compile-time definitions. if ( TGT_PUB_DEFINES ) target_compile_definitions(${TGT_NAME} INTERFACE ${TGT_PUB_DEFINES}) endif() if ( TGT_PVT_DEFINES ) target_compile_definitions(${TGT_NAME} PRIVATE ${TGT_PVT_DEFINES}) endif() # Add compiler options. if ( NOT ( TGT_TYPE STREQUAL "hdr" ) ) if ( NICEMAKE_COMMON_COMPILE_OPTS ) target_compile_options(${TGT_NAME} PRIVATE ${NICEMAKE_COMMON_COMPILE_OPTS}) endif() if ( TGT_COPTS ) target_compile_options(${TGT_NAME} PRIVATE ${TGT_COPTS}) endif() set_target_properties(${TGT_NAME} PROPERTIES COMPILE_WARNING_AS_ERROR ON) endif() # Set output directory. if( TGT_OUTPUT_DIR ) set_target_properties(${TGT_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${TGT_OUTPUT_DIR}") set_target_properties(${TGT_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_DEBUG "${TGT_OUTPUT_DIR}") set_target_properties(${TGT_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELEASE "${TGT_OUTPUT_DIR}") set_target_properties(${TGT_NAME} PROPERTIES VS_DEBUGGER_WORKING_DIRECTORY "${TGT_OUTPUT_DIR}") endif() endfunction() # Shortcut for adding a new library target. function (nmk_static_library) nmk_target(TYPE lib ${ARGN}) endfunction() # Shortcut for adding a new header-only library target. function (nmk_header_library) nmk_target(TYPE hdr ${ARGN}) endfunction() # Shortcut for adding a new executable target. function (nmk_binary) nmk_target(TYPE exe ${ARGN}) endfunction() ================================================ FILE: deps/SPIRV-reflect/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 3.14.0) project(spvreflect) set(CMAKE_C_STANDARD 99) add_library(spvreflect STATIC ${CMAKE_CURRENT_LIST_DIR}/include/spirv/unified1/spirv.h ${CMAKE_CURRENT_LIST_DIR}/spirv_reflect.h ${CMAKE_CURRENT_LIST_DIR}/spirv_reflect.c) target_include_directories(spvreflect SYSTEM PUBLIC ${CMAKE_CURRENT_LIST_DIR}) ================================================ FILE: deps/SPIRV-reflect/include/spirv/unified1/spirv.h ================================================ /* ** Copyright (c) 2014-2020 The Khronos Group Inc. ** ** Permission is hereby granted, free of charge, to any person obtaining a copy ** of this software and/or associated documentation files (the "Materials"), ** to deal in the Materials without restriction, including without limitation ** the rights to use, copy, modify, merge, publish, distribute, sublicense, ** and/or sell copies of the Materials, and to permit persons to whom the ** Materials are furnished to do so, subject to the following conditions: ** ** The above copyright notice and this permission notice shall be included in ** all copies or substantial portions of the Materials. ** ** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS ** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND ** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ ** ** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS ** IN THE MATERIALS. */ /* ** This header is automatically generated by the same tool that creates ** the Binary Section of the SPIR-V specification. */ /* ** Enumeration tokens for SPIR-V, in various styles: ** C, C++, C++11, JSON, Lua, Python, C#, D, Beef ** ** - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL ** - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL ** - C++11 will use enum classes in the spv namespace, e.g.: *spv::SourceLanguage::GLSL ** - Lua will use tables, e.g.: spv.SourceLanguage.GLSL ** - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL'] ** - C# will use enum classes in the Specification class located in the "Spv" *namespace, ** e.g.: Spv.Specification.SourceLanguage.GLSL ** - D will have tokens under the "spv" module, e.g: spv.SourceLanguage.GLSL ** - Beef will use enum classes in the Specification class located in the "Spv" *namespace, ** e.g.: Spv.Specification.SourceLanguage.GLSL ** ** Some tokens act like mask values, which can be OR'd together, ** while others are mutually exclusive. The mask-like ones have ** "Mask" in their name, and a parallel enum that has the shift ** amount (1 << x) for each corresponding enumerant. */ #ifndef spirv_H #define spirv_H typedef unsigned int SpvId; #define SPV_VERSION 0x10600 #define SPV_REVISION 1 static const unsigned int SpvMagicNumber = 0x07230203; static const unsigned int SpvVersion = 0x00010600; static const unsigned int SpvRevision = 1; static const unsigned int SpvOpCodeMask = 0xffff; static const unsigned int SpvWordCountShift = 16; typedef enum SpvSourceLanguage_ { SpvSourceLanguageUnknown = 0, SpvSourceLanguageESSL = 1, SpvSourceLanguageGLSL = 2, SpvSourceLanguageOpenCL_C = 3, SpvSourceLanguageOpenCL_CPP = 4, SpvSourceLanguageHLSL = 5, SpvSourceLanguageCPP_for_OpenCL = 6, SpvSourceLanguageSYCL = 7, SpvSourceLanguageHERO_C = 8, SpvSourceLanguageNZSL = 9, SpvSourceLanguageMax = 0x7fffffff, } SpvSourceLanguage; typedef enum SpvExecutionModel_ { SpvExecutionModelVertex = 0, SpvExecutionModelTessellationControl = 1, SpvExecutionModelTessellationEvaluation = 2, SpvExecutionModelGeometry = 3, SpvExecutionModelFragment = 4, SpvExecutionModelGLCompute = 5, SpvExecutionModelKernel = 6, SpvExecutionModelTaskNV = 5267, SpvExecutionModelMeshNV = 5268, SpvExecutionModelRayGenerationKHR = 5313, SpvExecutionModelRayGenerationNV = 5313, SpvExecutionModelIntersectionKHR = 5314, SpvExecutionModelIntersectionNV = 5314, SpvExecutionModelAnyHitKHR = 5315, SpvExecutionModelAnyHitNV = 5315, SpvExecutionModelClosestHitKHR = 5316, SpvExecutionModelClosestHitNV = 5316, SpvExecutionModelMissKHR = 5317, SpvExecutionModelMissNV = 5317, SpvExecutionModelCallableKHR = 5318, SpvExecutionModelCallableNV = 5318, SpvExecutionModelTaskEXT = 5364, SpvExecutionModelMeshEXT = 5365, SpvExecutionModelMax = 0x7fffffff, } SpvExecutionModel; typedef enum SpvAddressingModel_ { SpvAddressingModelLogical = 0, SpvAddressingModelPhysical32 = 1, SpvAddressingModelPhysical64 = 2, SpvAddressingModelPhysicalStorageBuffer64 = 5348, SpvAddressingModelPhysicalStorageBuffer64EXT = 5348, SpvAddressingModelMax = 0x7fffffff, } SpvAddressingModel; typedef enum SpvMemoryModel_ { SpvMemoryModelSimple = 0, SpvMemoryModelGLSL450 = 1, SpvMemoryModelOpenCL = 2, SpvMemoryModelVulkan = 3, SpvMemoryModelVulkanKHR = 3, SpvMemoryModelMax = 0x7fffffff, } SpvMemoryModel; typedef enum SpvExecutionMode_ { SpvExecutionModeInvocations = 0, SpvExecutionModeSpacingEqual = 1, SpvExecutionModeSpacingFractionalEven = 2, SpvExecutionModeSpacingFractionalOdd = 3, SpvExecutionModeVertexOrderCw = 4, SpvExecutionModeVertexOrderCcw = 5, SpvExecutionModePixelCenterInteger = 6, SpvExecutionModeOriginUpperLeft = 7, SpvExecutionModeOriginLowerLeft = 8, SpvExecutionModeEarlyFragmentTests = 9, SpvExecutionModePointMode = 10, SpvExecutionModeXfb = 11, SpvExecutionModeDepthReplacing = 12, SpvExecutionModeDepthGreater = 14, SpvExecutionModeDepthLess = 15, SpvExecutionModeDepthUnchanged = 16, SpvExecutionModeLocalSize = 17, SpvExecutionModeLocalSizeHint = 18, SpvExecutionModeInputPoints = 19, SpvExecutionModeInputLines = 20, SpvExecutionModeInputLinesAdjacency = 21, SpvExecutionModeTriangles = 22, SpvExecutionModeInputTrianglesAdjacency = 23, SpvExecutionModeQuads = 24, SpvExecutionModeIsolines = 25, SpvExecutionModeOutputVertices = 26, SpvExecutionModeOutputPoints = 27, SpvExecutionModeOutputLineStrip = 28, SpvExecutionModeOutputTriangleStrip = 29, SpvExecutionModeVecTypeHint = 30, SpvExecutionModeContractionOff = 31, SpvExecutionModeInitializer = 33, SpvExecutionModeFinalizer = 34, SpvExecutionModeSubgroupSize = 35, SpvExecutionModeSubgroupsPerWorkgroup = 36, SpvExecutionModeSubgroupsPerWorkgroupId = 37, SpvExecutionModeLocalSizeId = 38, SpvExecutionModeLocalSizeHintId = 39, SpvExecutionModeNonCoherentColorAttachmentReadEXT = 4169, SpvExecutionModeNonCoherentDepthAttachmentReadEXT = 4170, SpvExecutionModeNonCoherentStencilAttachmentReadEXT = 4171, SpvExecutionModeSubgroupUniformControlFlowKHR = 4421, SpvExecutionModePostDepthCoverage = 4446, SpvExecutionModeDenormPreserve = 4459, SpvExecutionModeDenormFlushToZero = 4460, SpvExecutionModeSignedZeroInfNanPreserve = 4461, SpvExecutionModeRoundingModeRTE = 4462, SpvExecutionModeRoundingModeRTZ = 4463, SpvExecutionModeEarlyAndLateFragmentTestsAMD = 5017, SpvExecutionModeStencilRefReplacingEXT = 5027, SpvExecutionModeStencilRefUnchangedFrontAMD = 5079, SpvExecutionModeStencilRefGreaterFrontAMD = 5080, SpvExecutionModeStencilRefLessFrontAMD = 5081, SpvExecutionModeStencilRefUnchangedBackAMD = 5082, SpvExecutionModeStencilRefGreaterBackAMD = 5083, SpvExecutionModeStencilRefLessBackAMD = 5084, SpvExecutionModeOutputLinesEXT = 5269, SpvExecutionModeOutputLinesNV = 5269, SpvExecutionModeOutputPrimitivesEXT = 5270, SpvExecutionModeOutputPrimitivesNV = 5270, SpvExecutionModeDerivativeGroupQuadsNV = 5289, SpvExecutionModeDerivativeGroupLinearNV = 5290, SpvExecutionModeOutputTrianglesEXT = 5298, SpvExecutionModeOutputTrianglesNV = 5298, SpvExecutionModePixelInterlockOrderedEXT = 5366, SpvExecutionModePixelInterlockUnorderedEXT = 5367, SpvExecutionModeSampleInterlockOrderedEXT = 5368, SpvExecutionModeSampleInterlockUnorderedEXT = 5369, SpvExecutionModeShadingRateInterlockOrderedEXT = 5370, SpvExecutionModeShadingRateInterlockUnorderedEXT = 5371, SpvExecutionModeSharedLocalMemorySizeINTEL = 5618, SpvExecutionModeRoundingModeRTPINTEL = 5620, SpvExecutionModeRoundingModeRTNINTEL = 5621, SpvExecutionModeFloatingPointModeALTINTEL = 5622, SpvExecutionModeFloatingPointModeIEEEINTEL = 5623, SpvExecutionModeMaxWorkgroupSizeINTEL = 5893, SpvExecutionModeMaxWorkDimINTEL = 5894, SpvExecutionModeNoGlobalOffsetINTEL = 5895, SpvExecutionModeNumSIMDWorkitemsINTEL = 5896, SpvExecutionModeSchedulerTargetFmaxMhzINTEL = 5903, SpvExecutionModeStreamingInterfaceINTEL = 6154, SpvExecutionModeRegisterMapInterfaceINTEL = 6160, SpvExecutionModeNamedBarrierCountINTEL = 6417, SpvExecutionModeMax = 0x7fffffff, } SpvExecutionMode; typedef enum SpvStorageClass_ { SpvStorageClassUniformConstant = 0, SpvStorageClassInput = 1, SpvStorageClassUniform = 2, SpvStorageClassOutput = 3, SpvStorageClassWorkgroup = 4, SpvStorageClassCrossWorkgroup = 5, SpvStorageClassPrivate = 6, SpvStorageClassFunction = 7, SpvStorageClassGeneric = 8, SpvStorageClassPushConstant = 9, SpvStorageClassAtomicCounter = 10, SpvStorageClassImage = 11, SpvStorageClassStorageBuffer = 12, SpvStorageClassTileImageEXT = 4172, SpvStorageClassCallableDataKHR = 5328, SpvStorageClassCallableDataNV = 5328, SpvStorageClassIncomingCallableDataKHR = 5329, SpvStorageClassIncomingCallableDataNV = 5329, SpvStorageClassRayPayloadKHR = 5338, SpvStorageClassRayPayloadNV = 5338, SpvStorageClassHitAttributeKHR = 5339, SpvStorageClassHitAttributeNV = 5339, SpvStorageClassIncomingRayPayloadKHR = 5342, SpvStorageClassIncomingRayPayloadNV = 5342, SpvStorageClassShaderRecordBufferKHR = 5343, SpvStorageClassShaderRecordBufferNV = 5343, SpvStorageClassPhysicalStorageBuffer = 5349, SpvStorageClassPhysicalStorageBufferEXT = 5349, SpvStorageClassHitObjectAttributeNV = 5385, SpvStorageClassTaskPayloadWorkgroupEXT = 5402, SpvStorageClassCodeSectionINTEL = 5605, SpvStorageClassDeviceOnlyINTEL = 5936, SpvStorageClassHostOnlyINTEL = 5937, SpvStorageClassMax = 0x7fffffff, } SpvStorageClass; typedef enum SpvDim_ { SpvDim1D = 0, SpvDim2D = 1, SpvDim3D = 2, SpvDimCube = 3, SpvDimRect = 4, SpvDimBuffer = 5, SpvDimSubpassData = 6, SpvDimTileImageDataEXT = 4173, SpvDimMax = 0x7fffffff, } SpvDim; typedef enum SpvSamplerAddressingMode_ { SpvSamplerAddressingModeNone = 0, SpvSamplerAddressingModeClampToEdge = 1, SpvSamplerAddressingModeClamp = 2, SpvSamplerAddressingModeRepeat = 3, SpvSamplerAddressingModeRepeatMirrored = 4, SpvSamplerAddressingModeMax = 0x7fffffff, } SpvSamplerAddressingMode; typedef enum SpvSamplerFilterMode_ { SpvSamplerFilterModeNearest = 0, SpvSamplerFilterModeLinear = 1, SpvSamplerFilterModeMax = 0x7fffffff, } SpvSamplerFilterMode; typedef enum SpvImageFormat_ { SpvImageFormatUnknown = 0, SpvImageFormatRgba32f = 1, SpvImageFormatRgba16f = 2, SpvImageFormatR32f = 3, SpvImageFormatRgba8 = 4, SpvImageFormatRgba8Snorm = 5, SpvImageFormatRg32f = 6, SpvImageFormatRg16f = 7, SpvImageFormatR11fG11fB10f = 8, SpvImageFormatR16f = 9, SpvImageFormatRgba16 = 10, SpvImageFormatRgb10A2 = 11, SpvImageFormatRg16 = 12, SpvImageFormatRg8 = 13, SpvImageFormatR16 = 14, SpvImageFormatR8 = 15, SpvImageFormatRgba16Snorm = 16, SpvImageFormatRg16Snorm = 17, SpvImageFormatRg8Snorm = 18, SpvImageFormatR16Snorm = 19, SpvImageFormatR8Snorm = 20, SpvImageFormatRgba32i = 21, SpvImageFormatRgba16i = 22, SpvImageFormatRgba8i = 23, SpvImageFormatR32i = 24, SpvImageFormatRg32i = 25, SpvImageFormatRg16i = 26, SpvImageFormatRg8i = 27, SpvImageFormatR16i = 28, SpvImageFormatR8i = 29, SpvImageFormatRgba32ui = 30, SpvImageFormatRgba16ui = 31, SpvImageFormatRgba8ui = 32, SpvImageFormatR32ui = 33, SpvImageFormatRgb10a2ui = 34, SpvImageFormatRg32ui = 35, SpvImageFormatRg16ui = 36, SpvImageFormatRg8ui = 37, SpvImageFormatR16ui = 38, SpvImageFormatR8ui = 39, SpvImageFormatR64ui = 40, SpvImageFormatR64i = 41, SpvImageFormatMax = 0x7fffffff, } SpvImageFormat; typedef enum SpvImageChannelOrder_ { SpvImageChannelOrderR = 0, SpvImageChannelOrderA = 1, SpvImageChannelOrderRG = 2, SpvImageChannelOrderRA = 3, SpvImageChannelOrderRGB = 4, SpvImageChannelOrderRGBA = 5, SpvImageChannelOrderBGRA = 6, SpvImageChannelOrderARGB = 7, SpvImageChannelOrderIntensity = 8, SpvImageChannelOrderLuminance = 9, SpvImageChannelOrderRx = 10, SpvImageChannelOrderRGx = 11, SpvImageChannelOrderRGBx = 12, SpvImageChannelOrderDepth = 13, SpvImageChannelOrderDepthStencil = 14, SpvImageChannelOrdersRGB = 15, SpvImageChannelOrdersRGBx = 16, SpvImageChannelOrdersRGBA = 17, SpvImageChannelOrdersBGRA = 18, SpvImageChannelOrderABGR = 19, SpvImageChannelOrderMax = 0x7fffffff, } SpvImageChannelOrder; typedef enum SpvImageChannelDataType_ { SpvImageChannelDataTypeSnormInt8 = 0, SpvImageChannelDataTypeSnormInt16 = 1, SpvImageChannelDataTypeUnormInt8 = 2, SpvImageChannelDataTypeUnormInt16 = 3, SpvImageChannelDataTypeUnormShort565 = 4, SpvImageChannelDataTypeUnormShort555 = 5, SpvImageChannelDataTypeUnormInt101010 = 6, SpvImageChannelDataTypeSignedInt8 = 7, SpvImageChannelDataTypeSignedInt16 = 8, SpvImageChannelDataTypeSignedInt32 = 9, SpvImageChannelDataTypeUnsignedInt8 = 10, SpvImageChannelDataTypeUnsignedInt16 = 11, SpvImageChannelDataTypeUnsignedInt32 = 12, SpvImageChannelDataTypeHalfFloat = 13, SpvImageChannelDataTypeFloat = 14, SpvImageChannelDataTypeUnormInt24 = 15, SpvImageChannelDataTypeUnormInt101010_2 = 16, SpvImageChannelDataTypeUnsignedIntRaw10EXT = 19, SpvImageChannelDataTypeUnsignedIntRaw12EXT = 20, SpvImageChannelDataTypeMax = 0x7fffffff, } SpvImageChannelDataType; typedef enum SpvImageOperandsShift_ { SpvImageOperandsBiasShift = 0, SpvImageOperandsLodShift = 1, SpvImageOperandsGradShift = 2, SpvImageOperandsConstOffsetShift = 3, SpvImageOperandsOffsetShift = 4, SpvImageOperandsConstOffsetsShift = 5, SpvImageOperandsSampleShift = 6, SpvImageOperandsMinLodShift = 7, SpvImageOperandsMakeTexelAvailableShift = 8, SpvImageOperandsMakeTexelAvailableKHRShift = 8, SpvImageOperandsMakeTexelVisibleShift = 9, SpvImageOperandsMakeTexelVisibleKHRShift = 9, SpvImageOperandsNonPrivateTexelShift = 10, SpvImageOperandsNonPrivateTexelKHRShift = 10, SpvImageOperandsVolatileTexelShift = 11, SpvImageOperandsVolatileTexelKHRShift = 11, SpvImageOperandsSignExtendShift = 12, SpvImageOperandsZeroExtendShift = 13, SpvImageOperandsNontemporalShift = 14, SpvImageOperandsOffsetsShift = 16, SpvImageOperandsMax = 0x7fffffff, } SpvImageOperandsShift; typedef enum SpvImageOperandsMask_ { SpvImageOperandsMaskNone = 0, SpvImageOperandsBiasMask = 0x00000001, SpvImageOperandsLodMask = 0x00000002, SpvImageOperandsGradMask = 0x00000004, SpvImageOperandsConstOffsetMask = 0x00000008, SpvImageOperandsOffsetMask = 0x00000010, SpvImageOperandsConstOffsetsMask = 0x00000020, SpvImageOperandsSampleMask = 0x00000040, SpvImageOperandsMinLodMask = 0x00000080, SpvImageOperandsMakeTexelAvailableMask = 0x00000100, SpvImageOperandsMakeTexelAvailableKHRMask = 0x00000100, SpvImageOperandsMakeTexelVisibleMask = 0x00000200, SpvImageOperandsMakeTexelVisibleKHRMask = 0x00000200, SpvImageOperandsNonPrivateTexelMask = 0x00000400, SpvImageOperandsNonPrivateTexelKHRMask = 0x00000400, SpvImageOperandsVolatileTexelMask = 0x00000800, SpvImageOperandsVolatileTexelKHRMask = 0x00000800, SpvImageOperandsSignExtendMask = 0x00001000, SpvImageOperandsZeroExtendMask = 0x00002000, SpvImageOperandsNontemporalMask = 0x00004000, SpvImageOperandsOffsetsMask = 0x00010000, } SpvImageOperandsMask; typedef enum SpvFPFastMathModeShift_ { SpvFPFastMathModeNotNaNShift = 0, SpvFPFastMathModeNotInfShift = 1, SpvFPFastMathModeNSZShift = 2, SpvFPFastMathModeAllowRecipShift = 3, SpvFPFastMathModeFastShift = 4, SpvFPFastMathModeAllowContractFastINTELShift = 16, SpvFPFastMathModeAllowReassocINTELShift = 17, SpvFPFastMathModeMax = 0x7fffffff, } SpvFPFastMathModeShift; typedef enum SpvFPFastMathModeMask_ { SpvFPFastMathModeMaskNone = 0, SpvFPFastMathModeNotNaNMask = 0x00000001, SpvFPFastMathModeNotInfMask = 0x00000002, SpvFPFastMathModeNSZMask = 0x00000004, SpvFPFastMathModeAllowRecipMask = 0x00000008, SpvFPFastMathModeFastMask = 0x00000010, SpvFPFastMathModeAllowContractFastINTELMask = 0x00010000, SpvFPFastMathModeAllowReassocINTELMask = 0x00020000, } SpvFPFastMathModeMask; typedef enum SpvFPRoundingMode_ { SpvFPRoundingModeRTE = 0, SpvFPRoundingModeRTZ = 1, SpvFPRoundingModeRTP = 2, SpvFPRoundingModeRTN = 3, SpvFPRoundingModeMax = 0x7fffffff, } SpvFPRoundingMode; typedef enum SpvLinkageType_ { SpvLinkageTypeExport = 0, SpvLinkageTypeImport = 1, SpvLinkageTypeLinkOnceODR = 2, SpvLinkageTypeMax = 0x7fffffff, } SpvLinkageType; typedef enum SpvAccessQualifier_ { SpvAccessQualifierReadOnly = 0, SpvAccessQualifierWriteOnly = 1, SpvAccessQualifierReadWrite = 2, SpvAccessQualifierMax = 0x7fffffff, } SpvAccessQualifier; typedef enum SpvFunctionParameterAttribute_ { SpvFunctionParameterAttributeZext = 0, SpvFunctionParameterAttributeSext = 1, SpvFunctionParameterAttributeByVal = 2, SpvFunctionParameterAttributeSret = 3, SpvFunctionParameterAttributeNoAlias = 4, SpvFunctionParameterAttributeNoCapture = 5, SpvFunctionParameterAttributeNoWrite = 6, SpvFunctionParameterAttributeNoReadWrite = 7, SpvFunctionParameterAttributeRuntimeAlignedINTEL = 5940, SpvFunctionParameterAttributeMax = 0x7fffffff, } SpvFunctionParameterAttribute; typedef enum SpvDecoration_ { SpvDecorationRelaxedPrecision = 0, SpvDecorationSpecId = 1, SpvDecorationBlock = 2, SpvDecorationBufferBlock = 3, SpvDecorationRowMajor = 4, SpvDecorationColMajor = 5, SpvDecorationArrayStride = 6, SpvDecorationMatrixStride = 7, SpvDecorationGLSLShared = 8, SpvDecorationGLSLPacked = 9, SpvDecorationCPacked = 10, SpvDecorationBuiltIn = 11, SpvDecorationNoPerspective = 13, SpvDecorationFlat = 14, SpvDecorationPatch = 15, SpvDecorationCentroid = 16, SpvDecorationSample = 17, SpvDecorationInvariant = 18, SpvDecorationRestrict = 19, SpvDecorationAliased = 20, SpvDecorationVolatile = 21, SpvDecorationConstant = 22, SpvDecorationCoherent = 23, SpvDecorationNonWritable = 24, SpvDecorationNonReadable = 25, SpvDecorationUniform = 26, SpvDecorationUniformId = 27, SpvDecorationSaturatedConversion = 28, SpvDecorationStream = 29, SpvDecorationLocation = 30, SpvDecorationComponent = 31, SpvDecorationIndex = 32, SpvDecorationBinding = 33, SpvDecorationDescriptorSet = 34, SpvDecorationOffset = 35, SpvDecorationXfbBuffer = 36, SpvDecorationXfbStride = 37, SpvDecorationFuncParamAttr = 38, SpvDecorationFPRoundingMode = 39, SpvDecorationFPFastMathMode = 40, SpvDecorationLinkageAttributes = 41, SpvDecorationNoContraction = 42, SpvDecorationInputAttachmentIndex = 43, SpvDecorationAlignment = 44, SpvDecorationMaxByteOffset = 45, SpvDecorationAlignmentId = 46, SpvDecorationMaxByteOffsetId = 47, SpvDecorationNoSignedWrap = 4469, SpvDecorationNoUnsignedWrap = 4470, SpvDecorationWeightTextureQCOM = 4487, SpvDecorationBlockMatchTextureQCOM = 4488, SpvDecorationExplicitInterpAMD = 4999, SpvDecorationOverrideCoverageNV = 5248, SpvDecorationPassthroughNV = 5250, SpvDecorationViewportRelativeNV = 5252, SpvDecorationSecondaryViewportRelativeNV = 5256, SpvDecorationPerPrimitiveEXT = 5271, SpvDecorationPerPrimitiveNV = 5271, SpvDecorationPerViewNV = 5272, SpvDecorationPerTaskNV = 5273, SpvDecorationPerVertexKHR = 5285, SpvDecorationPerVertexNV = 5285, SpvDecorationNonUniform = 5300, SpvDecorationNonUniformEXT = 5300, SpvDecorationRestrictPointer = 5355, SpvDecorationRestrictPointerEXT = 5355, SpvDecorationAliasedPointer = 5356, SpvDecorationAliasedPointerEXT = 5356, SpvDecorationHitObjectShaderRecordBufferNV = 5386, SpvDecorationBindlessSamplerNV = 5398, SpvDecorationBindlessImageNV = 5399, SpvDecorationBoundSamplerNV = 5400, SpvDecorationBoundImageNV = 5401, SpvDecorationSIMTCallINTEL = 5599, SpvDecorationReferencedIndirectlyINTEL = 5602, SpvDecorationClobberINTEL = 5607, SpvDecorationSideEffectsINTEL = 5608, SpvDecorationVectorComputeVariableINTEL = 5624, SpvDecorationFuncParamIOKindINTEL = 5625, SpvDecorationVectorComputeFunctionINTEL = 5626, SpvDecorationStackCallINTEL = 5627, SpvDecorationGlobalVariableOffsetINTEL = 5628, SpvDecorationCounterBuffer = 5634, SpvDecorationHlslCounterBufferGOOGLE = 5634, SpvDecorationHlslSemanticGOOGLE = 5635, SpvDecorationUserSemantic = 5635, SpvDecorationUserTypeGOOGLE = 5636, SpvDecorationFunctionRoundingModeINTEL = 5822, SpvDecorationFunctionDenormModeINTEL = 5823, SpvDecorationRegisterINTEL = 5825, SpvDecorationMemoryINTEL = 5826, SpvDecorationNumbanksINTEL = 5827, SpvDecorationBankwidthINTEL = 5828, SpvDecorationMaxPrivateCopiesINTEL = 5829, SpvDecorationSinglepumpINTEL = 5830, SpvDecorationDoublepumpINTEL = 5831, SpvDecorationMaxReplicatesINTEL = 5832, SpvDecorationSimpleDualPortINTEL = 5833, SpvDecorationMergeINTEL = 5834, SpvDecorationBankBitsINTEL = 5835, SpvDecorationForcePow2DepthINTEL = 5836, SpvDecorationBurstCoalesceINTEL = 5899, SpvDecorationCacheSizeINTEL = 5900, SpvDecorationDontStaticallyCoalesceINTEL = 5901, SpvDecorationPrefetchINTEL = 5902, SpvDecorationStallEnableINTEL = 5905, SpvDecorationFuseLoopsInFunctionINTEL = 5907, SpvDecorationMathOpDSPModeINTEL = 5909, SpvDecorationAliasScopeINTEL = 5914, SpvDecorationNoAliasINTEL = 5915, SpvDecorationInitiationIntervalINTEL = 5917, SpvDecorationMaxConcurrencyINTEL = 5918, SpvDecorationPipelineEnableINTEL = 5919, SpvDecorationBufferLocationINTEL = 5921, SpvDecorationIOPipeStorageINTEL = 5944, SpvDecorationFunctionFloatingPointModeINTEL = 6080, SpvDecorationSingleElementVectorINTEL = 6085, SpvDecorationVectorComputeCallableFunctionINTEL = 6087, SpvDecorationMediaBlockIOINTEL = 6140, SpvDecorationLatencyControlLabelINTEL = 6172, SpvDecorationLatencyControlConstraintINTEL = 6173, SpvDecorationConduitKernelArgumentINTEL = 6175, SpvDecorationRegisterMapKernelArgumentINTEL = 6176, SpvDecorationMMHostInterfaceAddressWidthINTEL = 6177, SpvDecorationMMHostInterfaceDataWidthINTEL = 6178, SpvDecorationMMHostInterfaceLatencyINTEL = 6179, SpvDecorationMMHostInterfaceReadWriteModeINTEL = 6180, SpvDecorationMMHostInterfaceMaxBurstINTEL = 6181, SpvDecorationMMHostInterfaceWaitRequestINTEL = 6182, SpvDecorationStableKernelArgumentINTEL = 6183, SpvDecorationMax = 0x7fffffff, } SpvDecoration; typedef enum SpvBuiltIn_ { SpvBuiltInPosition = 0, SpvBuiltInPointSize = 1, SpvBuiltInClipDistance = 3, SpvBuiltInCullDistance = 4, SpvBuiltInVertexId = 5, SpvBuiltInInstanceId = 6, SpvBuiltInPrimitiveId = 7, SpvBuiltInInvocationId = 8, SpvBuiltInLayer = 9, SpvBuiltInViewportIndex = 10, SpvBuiltInTessLevelOuter = 11, SpvBuiltInTessLevelInner = 12, SpvBuiltInTessCoord = 13, SpvBuiltInPatchVertices = 14, SpvBuiltInFragCoord = 15, SpvBuiltInPointCoord = 16, SpvBuiltInFrontFacing = 17, SpvBuiltInSampleId = 18, SpvBuiltInSamplePosition = 19, SpvBuiltInSampleMask = 20, SpvBuiltInFragDepth = 22, SpvBuiltInHelperInvocation = 23, SpvBuiltInNumWorkgroups = 24, SpvBuiltInWorkgroupSize = 25, SpvBuiltInWorkgroupId = 26, SpvBuiltInLocalInvocationId = 27, SpvBuiltInGlobalInvocationId = 28, SpvBuiltInLocalInvocationIndex = 29, SpvBuiltInWorkDim = 30, SpvBuiltInGlobalSize = 31, SpvBuiltInEnqueuedWorkgroupSize = 32, SpvBuiltInGlobalOffset = 33, SpvBuiltInGlobalLinearId = 34, SpvBuiltInSubgroupSize = 36, SpvBuiltInSubgroupMaxSize = 37, SpvBuiltInNumSubgroups = 38, SpvBuiltInNumEnqueuedSubgroups = 39, SpvBuiltInSubgroupId = 40, SpvBuiltInSubgroupLocalInvocationId = 41, SpvBuiltInVertexIndex = 42, SpvBuiltInInstanceIndex = 43, SpvBuiltInCoreIDARM = 4160, SpvBuiltInCoreCountARM = 4161, SpvBuiltInCoreMaxIDARM = 4162, SpvBuiltInWarpIDARM = 4163, SpvBuiltInWarpMaxIDARM = 4164, SpvBuiltInSubgroupEqMask = 4416, SpvBuiltInSubgroupEqMaskKHR = 4416, SpvBuiltInSubgroupGeMask = 4417, SpvBuiltInSubgroupGeMaskKHR = 4417, SpvBuiltInSubgroupGtMask = 4418, SpvBuiltInSubgroupGtMaskKHR = 4418, SpvBuiltInSubgroupLeMask = 4419, SpvBuiltInSubgroupLeMaskKHR = 4419, SpvBuiltInSubgroupLtMask = 4420, SpvBuiltInSubgroupLtMaskKHR = 4420, SpvBuiltInBaseVertex = 4424, SpvBuiltInBaseInstance = 4425, SpvBuiltInDrawIndex = 4426, SpvBuiltInPrimitiveShadingRateKHR = 4432, SpvBuiltInDeviceIndex = 4438, SpvBuiltInViewIndex = 4440, SpvBuiltInShadingRateKHR = 4444, SpvBuiltInBaryCoordNoPerspAMD = 4992, SpvBuiltInBaryCoordNoPerspCentroidAMD = 4993, SpvBuiltInBaryCoordNoPerspSampleAMD = 4994, SpvBuiltInBaryCoordSmoothAMD = 4995, SpvBuiltInBaryCoordSmoothCentroidAMD = 4996, SpvBuiltInBaryCoordSmoothSampleAMD = 4997, SpvBuiltInBaryCoordPullModelAMD = 4998, SpvBuiltInFragStencilRefEXT = 5014, SpvBuiltInViewportMaskNV = 5253, SpvBuiltInSecondaryPositionNV = 5257, SpvBuiltInSecondaryViewportMaskNV = 5258, SpvBuiltInPositionPerViewNV = 5261, SpvBuiltInViewportMaskPerViewNV = 5262, SpvBuiltInFullyCoveredEXT = 5264, SpvBuiltInTaskCountNV = 5274, SpvBuiltInPrimitiveCountNV = 5275, SpvBuiltInPrimitiveIndicesNV = 5276, SpvBuiltInClipDistancePerViewNV = 5277, SpvBuiltInCullDistancePerViewNV = 5278, SpvBuiltInLayerPerViewNV = 5279, SpvBuiltInMeshViewCountNV = 5280, SpvBuiltInMeshViewIndicesNV = 5281, SpvBuiltInBaryCoordKHR = 5286, SpvBuiltInBaryCoordNV = 5286, SpvBuiltInBaryCoordNoPerspKHR = 5287, SpvBuiltInBaryCoordNoPerspNV = 5287, SpvBuiltInFragSizeEXT = 5292, SpvBuiltInFragmentSizeNV = 5292, SpvBuiltInFragInvocationCountEXT = 5293, SpvBuiltInInvocationsPerPixelNV = 5293, SpvBuiltInPrimitivePointIndicesEXT = 5294, SpvBuiltInPrimitiveLineIndicesEXT = 5295, SpvBuiltInPrimitiveTriangleIndicesEXT = 5296, SpvBuiltInCullPrimitiveEXT = 5299, SpvBuiltInLaunchIdKHR = 5319, SpvBuiltInLaunchIdNV = 5319, SpvBuiltInLaunchSizeKHR = 5320, SpvBuiltInLaunchSizeNV = 5320, SpvBuiltInWorldRayOriginKHR = 5321, SpvBuiltInWorldRayOriginNV = 5321, SpvBuiltInWorldRayDirectionKHR = 5322, SpvBuiltInWorldRayDirectionNV = 5322, SpvBuiltInObjectRayOriginKHR = 5323, SpvBuiltInObjectRayOriginNV = 5323, SpvBuiltInObjectRayDirectionKHR = 5324, SpvBuiltInObjectRayDirectionNV = 5324, SpvBuiltInRayTminKHR = 5325, SpvBuiltInRayTminNV = 5325, SpvBuiltInRayTmaxKHR = 5326, SpvBuiltInRayTmaxNV = 5326, SpvBuiltInInstanceCustomIndexKHR = 5327, SpvBuiltInInstanceCustomIndexNV = 5327, SpvBuiltInObjectToWorldKHR = 5330, SpvBuiltInObjectToWorldNV = 5330, SpvBuiltInWorldToObjectKHR = 5331, SpvBuiltInWorldToObjectNV = 5331, SpvBuiltInHitTNV = 5332, SpvBuiltInHitKindKHR = 5333, SpvBuiltInHitKindNV = 5333, SpvBuiltInCurrentRayTimeNV = 5334, SpvBuiltInHitTriangleVertexPositionsKHR = 5335, SpvBuiltInIncomingRayFlagsKHR = 5351, SpvBuiltInIncomingRayFlagsNV = 5351, SpvBuiltInRayGeometryIndexKHR = 5352, SpvBuiltInWarpsPerSMNV = 5374, SpvBuiltInSMCountNV = 5375, SpvBuiltInWarpIDNV = 5376, SpvBuiltInSMIDNV = 5377, SpvBuiltInCullMaskKHR = 6021, SpvBuiltInMax = 0x7fffffff, } SpvBuiltIn; typedef enum SpvSelectionControlShift_ { SpvSelectionControlFlattenShift = 0, SpvSelectionControlDontFlattenShift = 1, SpvSelectionControlMax = 0x7fffffff, } SpvSelectionControlShift; typedef enum SpvSelectionControlMask_ { SpvSelectionControlMaskNone = 0, SpvSelectionControlFlattenMask = 0x00000001, SpvSelectionControlDontFlattenMask = 0x00000002, } SpvSelectionControlMask; typedef enum SpvLoopControlShift_ { SpvLoopControlUnrollShift = 0, SpvLoopControlDontUnrollShift = 1, SpvLoopControlDependencyInfiniteShift = 2, SpvLoopControlDependencyLengthShift = 3, SpvLoopControlMinIterationsShift = 4, SpvLoopControlMaxIterationsShift = 5, SpvLoopControlIterationMultipleShift = 6, SpvLoopControlPeelCountShift = 7, SpvLoopControlPartialCountShift = 8, SpvLoopControlInitiationIntervalINTELShift = 16, SpvLoopControlMaxConcurrencyINTELShift = 17, SpvLoopControlDependencyArrayINTELShift = 18, SpvLoopControlPipelineEnableINTELShift = 19, SpvLoopControlLoopCoalesceINTELShift = 20, SpvLoopControlMaxInterleavingINTELShift = 21, SpvLoopControlSpeculatedIterationsINTELShift = 22, SpvLoopControlNoFusionINTELShift = 23, SpvLoopControlLoopCountINTELShift = 24, SpvLoopControlMaxReinvocationDelayINTELShift = 25, SpvLoopControlMax = 0x7fffffff, } SpvLoopControlShift; typedef enum SpvLoopControlMask_ { SpvLoopControlMaskNone = 0, SpvLoopControlUnrollMask = 0x00000001, SpvLoopControlDontUnrollMask = 0x00000002, SpvLoopControlDependencyInfiniteMask = 0x00000004, SpvLoopControlDependencyLengthMask = 0x00000008, SpvLoopControlMinIterationsMask = 0x00000010, SpvLoopControlMaxIterationsMask = 0x00000020, SpvLoopControlIterationMultipleMask = 0x00000040, SpvLoopControlPeelCountMask = 0x00000080, SpvLoopControlPartialCountMask = 0x00000100, SpvLoopControlInitiationIntervalINTELMask = 0x00010000, SpvLoopControlMaxConcurrencyINTELMask = 0x00020000, SpvLoopControlDependencyArrayINTELMask = 0x00040000, SpvLoopControlPipelineEnableINTELMask = 0x00080000, SpvLoopControlLoopCoalesceINTELMask = 0x00100000, SpvLoopControlMaxInterleavingINTELMask = 0x00200000, SpvLoopControlSpeculatedIterationsINTELMask = 0x00400000, SpvLoopControlNoFusionINTELMask = 0x00800000, SpvLoopControlLoopCountINTELMask = 0x01000000, SpvLoopControlMaxReinvocationDelayINTELMask = 0x02000000, } SpvLoopControlMask; typedef enum SpvFunctionControlShift_ { SpvFunctionControlInlineShift = 0, SpvFunctionControlDontInlineShift = 1, SpvFunctionControlPureShift = 2, SpvFunctionControlConstShift = 3, SpvFunctionControlOptNoneINTELShift = 16, SpvFunctionControlMax = 0x7fffffff, } SpvFunctionControlShift; typedef enum SpvFunctionControlMask_ { SpvFunctionControlMaskNone = 0, SpvFunctionControlInlineMask = 0x00000001, SpvFunctionControlDontInlineMask = 0x00000002, SpvFunctionControlPureMask = 0x00000004, SpvFunctionControlConstMask = 0x00000008, SpvFunctionControlOptNoneINTELMask = 0x00010000, } SpvFunctionControlMask; typedef enum SpvMemorySemanticsShift_ { SpvMemorySemanticsAcquireShift = 1, SpvMemorySemanticsReleaseShift = 2, SpvMemorySemanticsAcquireReleaseShift = 3, SpvMemorySemanticsSequentiallyConsistentShift = 4, SpvMemorySemanticsUniformMemoryShift = 6, SpvMemorySemanticsSubgroupMemoryShift = 7, SpvMemorySemanticsWorkgroupMemoryShift = 8, SpvMemorySemanticsCrossWorkgroupMemoryShift = 9, SpvMemorySemanticsAtomicCounterMemoryShift = 10, SpvMemorySemanticsImageMemoryShift = 11, SpvMemorySemanticsOutputMemoryShift = 12, SpvMemorySemanticsOutputMemoryKHRShift = 12, SpvMemorySemanticsMakeAvailableShift = 13, SpvMemorySemanticsMakeAvailableKHRShift = 13, SpvMemorySemanticsMakeVisibleShift = 14, SpvMemorySemanticsMakeVisibleKHRShift = 14, SpvMemorySemanticsVolatileShift = 15, SpvMemorySemanticsMax = 0x7fffffff, } SpvMemorySemanticsShift; typedef enum SpvMemorySemanticsMask_ { SpvMemorySemanticsMaskNone = 0, SpvMemorySemanticsAcquireMask = 0x00000002, SpvMemorySemanticsReleaseMask = 0x00000004, SpvMemorySemanticsAcquireReleaseMask = 0x00000008, SpvMemorySemanticsSequentiallyConsistentMask = 0x00000010, SpvMemorySemanticsUniformMemoryMask = 0x00000040, SpvMemorySemanticsSubgroupMemoryMask = 0x00000080, SpvMemorySemanticsWorkgroupMemoryMask = 0x00000100, SpvMemorySemanticsCrossWorkgroupMemoryMask = 0x00000200, SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000400, SpvMemorySemanticsImageMemoryMask = 0x00000800, SpvMemorySemanticsOutputMemoryMask = 0x00001000, SpvMemorySemanticsOutputMemoryKHRMask = 0x00001000, SpvMemorySemanticsMakeAvailableMask = 0x00002000, SpvMemorySemanticsMakeAvailableKHRMask = 0x00002000, SpvMemorySemanticsMakeVisibleMask = 0x00004000, SpvMemorySemanticsMakeVisibleKHRMask = 0x00004000, SpvMemorySemanticsVolatileMask = 0x00008000, } SpvMemorySemanticsMask; typedef enum SpvMemoryAccessShift_ { SpvMemoryAccessVolatileShift = 0, SpvMemoryAccessAlignedShift = 1, SpvMemoryAccessNontemporalShift = 2, SpvMemoryAccessMakePointerAvailableShift = 3, SpvMemoryAccessMakePointerAvailableKHRShift = 3, SpvMemoryAccessMakePointerVisibleShift = 4, SpvMemoryAccessMakePointerVisibleKHRShift = 4, SpvMemoryAccessNonPrivatePointerShift = 5, SpvMemoryAccessNonPrivatePointerKHRShift = 5, SpvMemoryAccessAliasScopeINTELMaskShift = 16, SpvMemoryAccessNoAliasINTELMaskShift = 17, SpvMemoryAccessMax = 0x7fffffff, } SpvMemoryAccessShift; typedef enum SpvMemoryAccessMask_ { SpvMemoryAccessMaskNone = 0, SpvMemoryAccessVolatileMask = 0x00000001, SpvMemoryAccessAlignedMask = 0x00000002, SpvMemoryAccessNontemporalMask = 0x00000004, SpvMemoryAccessMakePointerAvailableMask = 0x00000008, SpvMemoryAccessMakePointerAvailableKHRMask = 0x00000008, SpvMemoryAccessMakePointerVisibleMask = 0x00000010, SpvMemoryAccessMakePointerVisibleKHRMask = 0x00000010, SpvMemoryAccessNonPrivatePointerMask = 0x00000020, SpvMemoryAccessNonPrivatePointerKHRMask = 0x00000020, SpvMemoryAccessAliasScopeINTELMaskMask = 0x00010000, SpvMemoryAccessNoAliasINTELMaskMask = 0x00020000, } SpvMemoryAccessMask; typedef enum SpvScope_ { SpvScopeCrossDevice = 0, SpvScopeDevice = 1, SpvScopeWorkgroup = 2, SpvScopeSubgroup = 3, SpvScopeInvocation = 4, SpvScopeQueueFamily = 5, SpvScopeQueueFamilyKHR = 5, SpvScopeShaderCallKHR = 6, SpvScopeMax = 0x7fffffff, } SpvScope; typedef enum SpvGroupOperation_ { SpvGroupOperationReduce = 0, SpvGroupOperationInclusiveScan = 1, SpvGroupOperationExclusiveScan = 2, SpvGroupOperationClusteredReduce = 3, SpvGroupOperationPartitionedReduceNV = 6, SpvGroupOperationPartitionedInclusiveScanNV = 7, SpvGroupOperationPartitionedExclusiveScanNV = 8, SpvGroupOperationMax = 0x7fffffff, } SpvGroupOperation; typedef enum SpvKernelEnqueueFlags_ { SpvKernelEnqueueFlagsNoWait = 0, SpvKernelEnqueueFlagsWaitKernel = 1, SpvKernelEnqueueFlagsWaitWorkGroup = 2, SpvKernelEnqueueFlagsMax = 0x7fffffff, } SpvKernelEnqueueFlags; typedef enum SpvKernelProfilingInfoShift_ { SpvKernelProfilingInfoCmdExecTimeShift = 0, SpvKernelProfilingInfoMax = 0x7fffffff, } SpvKernelProfilingInfoShift; typedef enum SpvKernelProfilingInfoMask_ { SpvKernelProfilingInfoMaskNone = 0, SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001, } SpvKernelProfilingInfoMask; typedef enum SpvCapability_ { SpvCapabilityMatrix = 0, SpvCapabilityShader = 1, SpvCapabilityGeometry = 2, SpvCapabilityTessellation = 3, SpvCapabilityAddresses = 4, SpvCapabilityLinkage = 5, SpvCapabilityKernel = 6, SpvCapabilityVector16 = 7, SpvCapabilityFloat16Buffer = 8, SpvCapabilityFloat16 = 9, SpvCapabilityFloat64 = 10, SpvCapabilityInt64 = 11, SpvCapabilityInt64Atomics = 12, SpvCapabilityImageBasic = 13, SpvCapabilityImageReadWrite = 14, SpvCapabilityImageMipmap = 15, SpvCapabilityPipes = 17, SpvCapabilityGroups = 18, SpvCapabilityDeviceEnqueue = 19, SpvCapabilityLiteralSampler = 20, SpvCapabilityAtomicStorage = 21, SpvCapabilityInt16 = 22, SpvCapabilityTessellationPointSize = 23, SpvCapabilityGeometryPointSize = 24, SpvCapabilityImageGatherExtended = 25, SpvCapabilityStorageImageMultisample = 27, SpvCapabilityUniformBufferArrayDynamicIndexing = 28, SpvCapabilitySampledImageArrayDynamicIndexing = 29, SpvCapabilityStorageBufferArrayDynamicIndexing = 30, SpvCapabilityStorageImageArrayDynamicIndexing = 31, SpvCapabilityClipDistance = 32, SpvCapabilityCullDistance = 33, SpvCapabilityImageCubeArray = 34, SpvCapabilitySampleRateShading = 35, SpvCapabilityImageRect = 36, SpvCapabilitySampledRect = 37, SpvCapabilityGenericPointer = 38, SpvCapabilityInt8 = 39, SpvCapabilityInputAttachment = 40, SpvCapabilitySparseResidency = 41, SpvCapabilityMinLod = 42, SpvCapabilitySampled1D = 43, SpvCapabilityImage1D = 44, SpvCapabilitySampledCubeArray = 45, SpvCapabilitySampledBuffer = 46, SpvCapabilityImageBuffer = 47, SpvCapabilityImageMSArray = 48, SpvCapabilityStorageImageExtendedFormats = 49, SpvCapabilityImageQuery = 50, SpvCapabilityDerivativeControl = 51, SpvCapabilityInterpolationFunction = 52, SpvCapabilityTransformFeedback = 53, SpvCapabilityGeometryStreams = 54, SpvCapabilityStorageImageReadWithoutFormat = 55, SpvCapabilityStorageImageWriteWithoutFormat = 56, SpvCapabilityMultiViewport = 57, SpvCapabilitySubgroupDispatch = 58, SpvCapabilityNamedBarrier = 59, SpvCapabilityPipeStorage = 60, SpvCapabilityGroupNonUniform = 61, SpvCapabilityGroupNonUniformVote = 62, SpvCapabilityGroupNonUniformArithmetic = 63, SpvCapabilityGroupNonUniformBallot = 64, SpvCapabilityGroupNonUniformShuffle = 65, SpvCapabilityGroupNonUniformShuffleRelative = 66, SpvCapabilityGroupNonUniformClustered = 67, SpvCapabilityGroupNonUniformQuad = 68, SpvCapabilityShaderLayer = 69, SpvCapabilityShaderViewportIndex = 70, SpvCapabilityUniformDecoration = 71, SpvCapabilityCoreBuiltinsARM = 4165, SpvCapabilityTileImageColorReadAccessEXT = 4166, SpvCapabilityTileImageDepthReadAccessEXT = 4167, SpvCapabilityTileImageStencilReadAccessEXT = 4168, SpvCapabilityFragmentShadingRateKHR = 4422, SpvCapabilitySubgroupBallotKHR = 4423, SpvCapabilityDrawParameters = 4427, SpvCapabilityWorkgroupMemoryExplicitLayoutKHR = 4428, SpvCapabilityWorkgroupMemoryExplicitLayout8BitAccessKHR = 4429, SpvCapabilityWorkgroupMemoryExplicitLayout16BitAccessKHR = 4430, SpvCapabilitySubgroupVoteKHR = 4431, SpvCapabilityStorageBuffer16BitAccess = 4433, SpvCapabilityStorageUniformBufferBlock16 = 4433, SpvCapabilityStorageUniform16 = 4434, SpvCapabilityUniformAndStorageBuffer16BitAccess = 4434, SpvCapabilityStoragePushConstant16 = 4435, SpvCapabilityStorageInputOutput16 = 4436, SpvCapabilityDeviceGroup = 4437, SpvCapabilityMultiView = 4439, SpvCapabilityVariablePointersStorageBuffer = 4441, SpvCapabilityVariablePointers = 4442, SpvCapabilityAtomicStorageOps = 4445, SpvCapabilitySampleMaskPostDepthCoverage = 4447, SpvCapabilityStorageBuffer8BitAccess = 4448, SpvCapabilityUniformAndStorageBuffer8BitAccess = 4449, SpvCapabilityStoragePushConstant8 = 4450, SpvCapabilityDenormPreserve = 4464, SpvCapabilityDenormFlushToZero = 4465, SpvCapabilitySignedZeroInfNanPreserve = 4466, SpvCapabilityRoundingModeRTE = 4467, SpvCapabilityRoundingModeRTZ = 4468, SpvCapabilityRayQueryProvisionalKHR = 4471, SpvCapabilityRayQueryKHR = 4472, SpvCapabilityRayTraversalPrimitiveCullingKHR = 4478, SpvCapabilityRayTracingKHR = 4479, SpvCapabilityTextureSampleWeightedQCOM = 4484, SpvCapabilityTextureBoxFilterQCOM = 4485, SpvCapabilityTextureBlockMatchQCOM = 4486, SpvCapabilityFloat16ImageAMD = 5008, SpvCapabilityImageGatherBiasLodAMD = 5009, SpvCapabilityFragmentMaskAMD = 5010, SpvCapabilityStencilExportEXT = 5013, SpvCapabilityImageReadWriteLodAMD = 5015, SpvCapabilityInt64ImageEXT = 5016, SpvCapabilityShaderClockKHR = 5055, SpvCapabilitySampleMaskOverrideCoverageNV = 5249, SpvCapabilityGeometryShaderPassthroughNV = 5251, SpvCapabilityShaderViewportIndexLayerEXT = 5254, SpvCapabilityShaderViewportIndexLayerNV = 5254, SpvCapabilityShaderViewportMaskNV = 5255, SpvCapabilityShaderStereoViewNV = 5259, SpvCapabilityPerViewAttributesNV = 5260, SpvCapabilityFragmentFullyCoveredEXT = 5265, SpvCapabilityMeshShadingNV = 5266, SpvCapabilityImageFootprintNV = 5282, SpvCapabilityMeshShadingEXT = 5283, SpvCapabilityFragmentBarycentricKHR = 5284, SpvCapabilityFragmentBarycentricNV = 5284, SpvCapabilityComputeDerivativeGroupQuadsNV = 5288, SpvCapabilityFragmentDensityEXT = 5291, SpvCapabilityShadingRateNV = 5291, SpvCapabilityGroupNonUniformPartitionedNV = 5297, SpvCapabilityShaderNonUniform = 5301, SpvCapabilityShaderNonUniformEXT = 5301, SpvCapabilityRuntimeDescriptorArray = 5302, SpvCapabilityRuntimeDescriptorArrayEXT = 5302, SpvCapabilityInputAttachmentArrayDynamicIndexing = 5303, SpvCapabilityInputAttachmentArrayDynamicIndexingEXT = 5303, SpvCapabilityUniformTexelBufferArrayDynamicIndexing = 5304, SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304, SpvCapabilityStorageTexelBufferArrayDynamicIndexing = 5305, SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305, SpvCapabilityUniformBufferArrayNonUniformIndexing = 5306, SpvCapabilityUniformBufferArrayNonUniformIndexingEXT = 5306, SpvCapabilitySampledImageArrayNonUniformIndexing = 5307, SpvCapabilitySampledImageArrayNonUniformIndexingEXT = 5307, SpvCapabilityStorageBufferArrayNonUniformIndexing = 5308, SpvCapabilityStorageBufferArrayNonUniformIndexingEXT = 5308, SpvCapabilityStorageImageArrayNonUniformIndexing = 5309, SpvCapabilityStorageImageArrayNonUniformIndexingEXT = 5309, SpvCapabilityInputAttachmentArrayNonUniformIndexing = 5310, SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310, SpvCapabilityUniformTexelBufferArrayNonUniformIndexing = 5311, SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311, SpvCapabilityStorageTexelBufferArrayNonUniformIndexing = 5312, SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312, SpvCapabilityRayTracingPositionFetchKHR = 5336, SpvCapabilityRayTracingNV = 5340, SpvCapabilityRayTracingMotionBlurNV = 5341, SpvCapabilityVulkanMemoryModel = 5345, SpvCapabilityVulkanMemoryModelKHR = 5345, SpvCapabilityVulkanMemoryModelDeviceScope = 5346, SpvCapabilityVulkanMemoryModelDeviceScopeKHR = 5346, SpvCapabilityPhysicalStorageBufferAddresses = 5347, SpvCapabilityPhysicalStorageBufferAddressesEXT = 5347, SpvCapabilityComputeDerivativeGroupLinearNV = 5350, SpvCapabilityRayTracingProvisionalKHR = 5353, SpvCapabilityCooperativeMatrixNV = 5357, SpvCapabilityFragmentShaderSampleInterlockEXT = 5363, SpvCapabilityFragmentShaderShadingRateInterlockEXT = 5372, SpvCapabilityShaderSMBuiltinsNV = 5373, SpvCapabilityFragmentShaderPixelInterlockEXT = 5378, SpvCapabilityDemoteToHelperInvocation = 5379, SpvCapabilityDemoteToHelperInvocationEXT = 5379, SpvCapabilityRayTracingOpacityMicromapEXT = 5381, SpvCapabilityShaderInvocationReorderNV = 5383, SpvCapabilityBindlessTextureNV = 5390, SpvCapabilityRayQueryPositionFetchKHR = 5391, SpvCapabilitySubgroupShuffleINTEL = 5568, SpvCapabilitySubgroupBufferBlockIOINTEL = 5569, SpvCapabilitySubgroupImageBlockIOINTEL = 5570, SpvCapabilitySubgroupImageMediaBlockIOINTEL = 5579, SpvCapabilityRoundToInfinityINTEL = 5582, SpvCapabilityFloatingPointModeINTEL = 5583, SpvCapabilityIntegerFunctions2INTEL = 5584, SpvCapabilityFunctionPointersINTEL = 5603, SpvCapabilityIndirectReferencesINTEL = 5604, SpvCapabilityAsmINTEL = 5606, SpvCapabilityAtomicFloat32MinMaxEXT = 5612, SpvCapabilityAtomicFloat64MinMaxEXT = 5613, SpvCapabilityAtomicFloat16MinMaxEXT = 5616, SpvCapabilityVectorComputeINTEL = 5617, SpvCapabilityVectorAnyINTEL = 5619, SpvCapabilityExpectAssumeKHR = 5629, SpvCapabilitySubgroupAvcMotionEstimationINTEL = 5696, SpvCapabilitySubgroupAvcMotionEstimationIntraINTEL = 5697, SpvCapabilitySubgroupAvcMotionEstimationChromaINTEL = 5698, SpvCapabilityVariableLengthArrayINTEL = 5817, SpvCapabilityFunctionFloatControlINTEL = 5821, SpvCapabilityFPGAMemoryAttributesINTEL = 5824, SpvCapabilityFPFastMathModeINTEL = 5837, SpvCapabilityArbitraryPrecisionIntegersINTEL = 5844, SpvCapabilityArbitraryPrecisionFloatingPointINTEL = 5845, SpvCapabilityUnstructuredLoopControlsINTEL = 5886, SpvCapabilityFPGALoopControlsINTEL = 5888, SpvCapabilityKernelAttributesINTEL = 5892, SpvCapabilityFPGAKernelAttributesINTEL = 5897, SpvCapabilityFPGAMemoryAccessesINTEL = 5898, SpvCapabilityFPGAClusterAttributesINTEL = 5904, SpvCapabilityLoopFuseINTEL = 5906, SpvCapabilityFPGADSPControlINTEL = 5908, SpvCapabilityMemoryAccessAliasingINTEL = 5910, SpvCapabilityFPGAInvocationPipeliningAttributesINTEL = 5916, SpvCapabilityFPGABufferLocationINTEL = 5920, SpvCapabilityArbitraryPrecisionFixedPointINTEL = 5922, SpvCapabilityUSMStorageClassesINTEL = 5935, SpvCapabilityRuntimeAlignedAttributeINTEL = 5939, SpvCapabilityIOPipesINTEL = 5943, SpvCapabilityBlockingPipesINTEL = 5945, SpvCapabilityFPGARegINTEL = 5948, SpvCapabilityDotProductInputAll = 6016, SpvCapabilityDotProductInputAllKHR = 6016, SpvCapabilityDotProductInput4x8Bit = 6017, SpvCapabilityDotProductInput4x8BitKHR = 6017, SpvCapabilityDotProductInput4x8BitPacked = 6018, SpvCapabilityDotProductInput4x8BitPackedKHR = 6018, SpvCapabilityDotProduct = 6019, SpvCapabilityDotProductKHR = 6019, SpvCapabilityRayCullMaskKHR = 6020, SpvCapabilityCooperativeMatrixKHR = 6022, SpvCapabilityBitInstructions = 6025, SpvCapabilityGroupNonUniformRotateKHR = 6026, SpvCapabilityAtomicFloat32AddEXT = 6033, SpvCapabilityAtomicFloat64AddEXT = 6034, SpvCapabilityLongConstantCompositeINTEL = 6089, SpvCapabilityOptNoneINTEL = 6094, SpvCapabilityAtomicFloat16AddEXT = 6095, SpvCapabilityDebugInfoModuleINTEL = 6114, SpvCapabilityBFloat16ConversionINTEL = 6115, SpvCapabilitySplitBarrierINTEL = 6141, SpvCapabilityFPGAKernelAttributesv2INTEL = 6161, SpvCapabilityFPGALatencyControlINTEL = 6171, SpvCapabilityFPGAArgumentInterfacesINTEL = 6174, SpvCapabilityGroupUniformArithmeticKHR = 6400, SpvCapabilityMax = 0x7fffffff, } SpvCapability; typedef enum SpvRayFlagsShift_ { SpvRayFlagsOpaqueKHRShift = 0, SpvRayFlagsNoOpaqueKHRShift = 1, SpvRayFlagsTerminateOnFirstHitKHRShift = 2, SpvRayFlagsSkipClosestHitShaderKHRShift = 3, SpvRayFlagsCullBackFacingTrianglesKHRShift = 4, SpvRayFlagsCullFrontFacingTrianglesKHRShift = 5, SpvRayFlagsCullOpaqueKHRShift = 6, SpvRayFlagsCullNoOpaqueKHRShift = 7, SpvRayFlagsSkipTrianglesKHRShift = 8, SpvRayFlagsSkipAABBsKHRShift = 9, SpvRayFlagsForceOpacityMicromap2StateEXTShift = 10, SpvRayFlagsMax = 0x7fffffff, } SpvRayFlagsShift; typedef enum SpvRayFlagsMask_ { SpvRayFlagsMaskNone = 0, SpvRayFlagsOpaqueKHRMask = 0x00000001, SpvRayFlagsNoOpaqueKHRMask = 0x00000002, SpvRayFlagsTerminateOnFirstHitKHRMask = 0x00000004, SpvRayFlagsSkipClosestHitShaderKHRMask = 0x00000008, SpvRayFlagsCullBackFacingTrianglesKHRMask = 0x00000010, SpvRayFlagsCullFrontFacingTrianglesKHRMask = 0x00000020, SpvRayFlagsCullOpaqueKHRMask = 0x00000040, SpvRayFlagsCullNoOpaqueKHRMask = 0x00000080, SpvRayFlagsSkipTrianglesKHRMask = 0x00000100, SpvRayFlagsSkipAABBsKHRMask = 0x00000200, SpvRayFlagsForceOpacityMicromap2StateEXTMask = 0x00000400, } SpvRayFlagsMask; typedef enum SpvRayQueryIntersection_ { SpvRayQueryIntersectionRayQueryCandidateIntersectionKHR = 0, SpvRayQueryIntersectionRayQueryCommittedIntersectionKHR = 1, SpvRayQueryIntersectionMax = 0x7fffffff, } SpvRayQueryIntersection; typedef enum SpvRayQueryCommittedIntersectionType_ { SpvRayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionNoneKHR = 0, SpvRayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionTriangleKHR = 1, SpvRayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionGeneratedKHR = 2, SpvRayQueryCommittedIntersectionTypeMax = 0x7fffffff, } SpvRayQueryCommittedIntersectionType; typedef enum SpvRayQueryCandidateIntersectionType_ { SpvRayQueryCandidateIntersectionTypeRayQueryCandidateIntersectionTriangleKHR = 0, SpvRayQueryCandidateIntersectionTypeRayQueryCandidateIntersectionAABBKHR = 1, SpvRayQueryCandidateIntersectionTypeMax = 0x7fffffff, } SpvRayQueryCandidateIntersectionType; typedef enum SpvFragmentShadingRateShift_ { SpvFragmentShadingRateVertical2PixelsShift = 0, SpvFragmentShadingRateVertical4PixelsShift = 1, SpvFragmentShadingRateHorizontal2PixelsShift = 2, SpvFragmentShadingRateHorizontal4PixelsShift = 3, SpvFragmentShadingRateMax = 0x7fffffff, } SpvFragmentShadingRateShift; typedef enum SpvFragmentShadingRateMask_ { SpvFragmentShadingRateMaskNone = 0, SpvFragmentShadingRateVertical2PixelsMask = 0x00000001, SpvFragmentShadingRateVertical4PixelsMask = 0x00000002, SpvFragmentShadingRateHorizontal2PixelsMask = 0x00000004, SpvFragmentShadingRateHorizontal4PixelsMask = 0x00000008, } SpvFragmentShadingRateMask; typedef enum SpvFPDenormMode_ { SpvFPDenormModePreserve = 0, SpvFPDenormModeFlushToZero = 1, SpvFPDenormModeMax = 0x7fffffff, } SpvFPDenormMode; typedef enum SpvFPOperationMode_ { SpvFPOperationModeIEEE = 0, SpvFPOperationModeALT = 1, SpvFPOperationModeMax = 0x7fffffff, } SpvFPOperationMode; typedef enum SpvQuantizationModes_ { SpvQuantizationModesTRN = 0, SpvQuantizationModesTRN_ZERO = 1, SpvQuantizationModesRND = 2, SpvQuantizationModesRND_ZERO = 3, SpvQuantizationModesRND_INF = 4, SpvQuantizationModesRND_MIN_INF = 5, SpvQuantizationModesRND_CONV = 6, SpvQuantizationModesRND_CONV_ODD = 7, SpvQuantizationModesMax = 0x7fffffff, } SpvQuantizationModes; typedef enum SpvOverflowModes_ { SpvOverflowModesWRAP = 0, SpvOverflowModesSAT = 1, SpvOverflowModesSAT_ZERO = 2, SpvOverflowModesSAT_SYM = 3, SpvOverflowModesMax = 0x7fffffff, } SpvOverflowModes; typedef enum SpvPackedVectorFormat_ { SpvPackedVectorFormatPackedVectorFormat4x8Bit = 0, SpvPackedVectorFormatPackedVectorFormat4x8BitKHR = 0, SpvPackedVectorFormatMax = 0x7fffffff, } SpvPackedVectorFormat; typedef enum SpvCooperativeMatrixOperandsShift_ { SpvCooperativeMatrixOperandsMatrixASignedComponentsShift = 0, SpvCooperativeMatrixOperandsMatrixBSignedComponentsShift = 1, SpvCooperativeMatrixOperandsMatrixCSignedComponentsShift = 2, SpvCooperativeMatrixOperandsMatrixResultSignedComponentsShift = 3, SpvCooperativeMatrixOperandsSaturatingAccumulationShift = 4, SpvCooperativeMatrixOperandsMax = 0x7fffffff, } SpvCooperativeMatrixOperandsShift; typedef enum SpvCooperativeMatrixOperandsMask_ { SpvCooperativeMatrixOperandsMaskNone = 0, SpvCooperativeMatrixOperandsMatrixASignedComponentsMask = 0x00000001, SpvCooperativeMatrixOperandsMatrixBSignedComponentsMask = 0x00000002, SpvCooperativeMatrixOperandsMatrixCSignedComponentsMask = 0x00000004, SpvCooperativeMatrixOperandsMatrixResultSignedComponentsMask = 0x00000008, SpvCooperativeMatrixOperandsSaturatingAccumulationMask = 0x00000010, } SpvCooperativeMatrixOperandsMask; typedef enum SpvCooperativeMatrixLayout_ { SpvCooperativeMatrixLayoutRowMajorKHR = 0, SpvCooperativeMatrixLayoutColumnMajorKHR = 1, SpvCooperativeMatrixLayoutMax = 0x7fffffff, } SpvCooperativeMatrixLayout; typedef enum SpvCooperativeMatrixUse_ { SpvCooperativeMatrixUseMatrixAKHR = 0, SpvCooperativeMatrixUseMatrixBKHR = 1, SpvCooperativeMatrixUseMatrixAccumulatorKHR = 2, SpvCooperativeMatrixUseMax = 0x7fffffff, } SpvCooperativeMatrixUse; typedef enum SpvOp_ { SpvOpNop = 0, SpvOpUndef = 1, SpvOpSourceContinued = 2, SpvOpSource = 3, SpvOpSourceExtension = 4, SpvOpName = 5, SpvOpMemberName = 6, SpvOpString = 7, SpvOpLine = 8, SpvOpExtension = 10, SpvOpExtInstImport = 11, SpvOpExtInst = 12, SpvOpMemoryModel = 14, SpvOpEntryPoint = 15, SpvOpExecutionMode = 16, SpvOpCapability = 17, SpvOpTypeVoid = 19, SpvOpTypeBool = 20, SpvOpTypeInt = 21, SpvOpTypeFloat = 22, SpvOpTypeVector = 23, SpvOpTypeMatrix = 24, SpvOpTypeImage = 25, SpvOpTypeSampler = 26, SpvOpTypeSampledImage = 27, SpvOpTypeArray = 28, SpvOpTypeRuntimeArray = 29, SpvOpTypeStruct = 30, SpvOpTypeOpaque = 31, SpvOpTypePointer = 32, SpvOpTypeFunction = 33, SpvOpTypeEvent = 34, SpvOpTypeDeviceEvent = 35, SpvOpTypeReserveId = 36, SpvOpTypeQueue = 37, SpvOpTypePipe = 38, SpvOpTypeForwardPointer = 39, SpvOpConstantTrue = 41, SpvOpConstantFalse = 42, SpvOpConstant = 43, SpvOpConstantComposite = 44, SpvOpConstantSampler = 45, SpvOpConstantNull = 46, SpvOpSpecConstantTrue = 48, SpvOpSpecConstantFalse = 49, SpvOpSpecConstant = 50, SpvOpSpecConstantComposite = 51, SpvOpSpecConstantOp = 52, SpvOpFunction = 54, SpvOpFunctionParameter = 55, SpvOpFunctionEnd = 56, SpvOpFunctionCall = 57, SpvOpVariable = 59, SpvOpImageTexelPointer = 60, SpvOpLoad = 61, SpvOpStore = 62, SpvOpCopyMemory = 63, SpvOpCopyMemorySized = 64, SpvOpAccessChain = 65, SpvOpInBoundsAccessChain = 66, SpvOpPtrAccessChain = 67, SpvOpArrayLength = 68, SpvOpGenericPtrMemSemantics = 69, SpvOpInBoundsPtrAccessChain = 70, SpvOpDecorate = 71, SpvOpMemberDecorate = 72, SpvOpDecorationGroup = 73, SpvOpGroupDecorate = 74, SpvOpGroupMemberDecorate = 75, SpvOpVectorExtractDynamic = 77, SpvOpVectorInsertDynamic = 78, SpvOpVectorShuffle = 79, SpvOpCompositeConstruct = 80, SpvOpCompositeExtract = 81, SpvOpCompositeInsert = 82, SpvOpCopyObject = 83, SpvOpTranspose = 84, SpvOpSampledImage = 86, SpvOpImageSampleImplicitLod = 87, SpvOpImageSampleExplicitLod = 88, SpvOpImageSampleDrefImplicitLod = 89, SpvOpImageSampleDrefExplicitLod = 90, SpvOpImageSampleProjImplicitLod = 91, SpvOpImageSampleProjExplicitLod = 92, SpvOpImageSampleProjDrefImplicitLod = 93, SpvOpImageSampleProjDrefExplicitLod = 94, SpvOpImageFetch = 95, SpvOpImageGather = 96, SpvOpImageDrefGather = 97, SpvOpImageRead = 98, SpvOpImageWrite = 99, SpvOpImage = 100, SpvOpImageQueryFormat = 101, SpvOpImageQueryOrder = 102, SpvOpImageQuerySizeLod = 103, SpvOpImageQuerySize = 104, SpvOpImageQueryLod = 105, SpvOpImageQueryLevels = 106, SpvOpImageQuerySamples = 107, SpvOpConvertFToU = 109, SpvOpConvertFToS = 110, SpvOpConvertSToF = 111, SpvOpConvertUToF = 112, SpvOpUConvert = 113, SpvOpSConvert = 114, SpvOpFConvert = 115, SpvOpQuantizeToF16 = 116, SpvOpConvertPtrToU = 117, SpvOpSatConvertSToU = 118, SpvOpSatConvertUToS = 119, SpvOpConvertUToPtr = 120, SpvOpPtrCastToGeneric = 121, SpvOpGenericCastToPtr = 122, SpvOpGenericCastToPtrExplicit = 123, SpvOpBitcast = 124, SpvOpSNegate = 126, SpvOpFNegate = 127, SpvOpIAdd = 128, SpvOpFAdd = 129, SpvOpISub = 130, SpvOpFSub = 131, SpvOpIMul = 132, SpvOpFMul = 133, SpvOpUDiv = 134, SpvOpSDiv = 135, SpvOpFDiv = 136, SpvOpUMod = 137, SpvOpSRem = 138, SpvOpSMod = 139, SpvOpFRem = 140, SpvOpFMod = 141, SpvOpVectorTimesScalar = 142, SpvOpMatrixTimesScalar = 143, SpvOpVectorTimesMatrix = 144, SpvOpMatrixTimesVector = 145, SpvOpMatrixTimesMatrix = 146, SpvOpOuterProduct = 147, SpvOpDot = 148, SpvOpIAddCarry = 149, SpvOpISubBorrow = 150, SpvOpUMulExtended = 151, SpvOpSMulExtended = 152, SpvOpAny = 154, SpvOpAll = 155, SpvOpIsNan = 156, SpvOpIsInf = 157, SpvOpIsFinite = 158, SpvOpIsNormal = 159, SpvOpSignBitSet = 160, SpvOpLessOrGreater = 161, SpvOpOrdered = 162, SpvOpUnordered = 163, SpvOpLogicalEqual = 164, SpvOpLogicalNotEqual = 165, SpvOpLogicalOr = 166, SpvOpLogicalAnd = 167, SpvOpLogicalNot = 168, SpvOpSelect = 169, SpvOpIEqual = 170, SpvOpINotEqual = 171, SpvOpUGreaterThan = 172, SpvOpSGreaterThan = 173, SpvOpUGreaterThanEqual = 174, SpvOpSGreaterThanEqual = 175, SpvOpULessThan = 176, SpvOpSLessThan = 177, SpvOpULessThanEqual = 178, SpvOpSLessThanEqual = 179, SpvOpFOrdEqual = 180, SpvOpFUnordEqual = 181, SpvOpFOrdNotEqual = 182, SpvOpFUnordNotEqual = 183, SpvOpFOrdLessThan = 184, SpvOpFUnordLessThan = 185, SpvOpFOrdGreaterThan = 186, SpvOpFUnordGreaterThan = 187, SpvOpFOrdLessThanEqual = 188, SpvOpFUnordLessThanEqual = 189, SpvOpFOrdGreaterThanEqual = 190, SpvOpFUnordGreaterThanEqual = 191, SpvOpShiftRightLogical = 194, SpvOpShiftRightArithmetic = 195, SpvOpShiftLeftLogical = 196, SpvOpBitwiseOr = 197, SpvOpBitwiseXor = 198, SpvOpBitwiseAnd = 199, SpvOpNot = 200, SpvOpBitFieldInsert = 201, SpvOpBitFieldSExtract = 202, SpvOpBitFieldUExtract = 203, SpvOpBitReverse = 204, SpvOpBitCount = 205, SpvOpDPdx = 207, SpvOpDPdy = 208, SpvOpFwidth = 209, SpvOpDPdxFine = 210, SpvOpDPdyFine = 211, SpvOpFwidthFine = 212, SpvOpDPdxCoarse = 213, SpvOpDPdyCoarse = 214, SpvOpFwidthCoarse = 215, SpvOpEmitVertex = 218, SpvOpEndPrimitive = 219, SpvOpEmitStreamVertex = 220, SpvOpEndStreamPrimitive = 221, SpvOpControlBarrier = 224, SpvOpMemoryBarrier = 225, SpvOpAtomicLoad = 227, SpvOpAtomicStore = 228, SpvOpAtomicExchange = 229, SpvOpAtomicCompareExchange = 230, SpvOpAtomicCompareExchangeWeak = 231, SpvOpAtomicIIncrement = 232, SpvOpAtomicIDecrement = 233, SpvOpAtomicIAdd = 234, SpvOpAtomicISub = 235, SpvOpAtomicSMin = 236, SpvOpAtomicUMin = 237, SpvOpAtomicSMax = 238, SpvOpAtomicUMax = 239, SpvOpAtomicAnd = 240, SpvOpAtomicOr = 241, SpvOpAtomicXor = 242, SpvOpPhi = 245, SpvOpLoopMerge = 246, SpvOpSelectionMerge = 247, SpvOpLabel = 248, SpvOpBranch = 249, SpvOpBranchConditional = 250, SpvOpSwitch = 251, SpvOpKill = 252, SpvOpReturn = 253, SpvOpReturnValue = 254, SpvOpUnreachable = 255, SpvOpLifetimeStart = 256, SpvOpLifetimeStop = 257, SpvOpGroupAsyncCopy = 259, SpvOpGroupWaitEvents = 260, SpvOpGroupAll = 261, SpvOpGroupAny = 262, SpvOpGroupBroadcast = 263, SpvOpGroupIAdd = 264, SpvOpGroupFAdd = 265, SpvOpGroupFMin = 266, SpvOpGroupUMin = 267, SpvOpGroupSMin = 268, SpvOpGroupFMax = 269, SpvOpGroupUMax = 270, SpvOpGroupSMax = 271, SpvOpReadPipe = 274, SpvOpWritePipe = 275, SpvOpReservedReadPipe = 276, SpvOpReservedWritePipe = 277, SpvOpReserveReadPipePackets = 278, SpvOpReserveWritePipePackets = 279, SpvOpCommitReadPipe = 280, SpvOpCommitWritePipe = 281, SpvOpIsValidReserveId = 282, SpvOpGetNumPipePackets = 283, SpvOpGetMaxPipePackets = 284, SpvOpGroupReserveReadPipePackets = 285, SpvOpGroupReserveWritePipePackets = 286, SpvOpGroupCommitReadPipe = 287, SpvOpGroupCommitWritePipe = 288, SpvOpEnqueueMarker = 291, SpvOpEnqueueKernel = 292, SpvOpGetKernelNDrangeSubGroupCount = 293, SpvOpGetKernelNDrangeMaxSubGroupSize = 294, SpvOpGetKernelWorkGroupSize = 295, SpvOpGetKernelPreferredWorkGroupSizeMultiple = 296, SpvOpRetainEvent = 297, SpvOpReleaseEvent = 298, SpvOpCreateUserEvent = 299, SpvOpIsValidEvent = 300, SpvOpSetUserEventStatus = 301, SpvOpCaptureEventProfilingInfo = 302, SpvOpGetDefaultQueue = 303, SpvOpBuildNDRange = 304, SpvOpImageSparseSampleImplicitLod = 305, SpvOpImageSparseSampleExplicitLod = 306, SpvOpImageSparseSampleDrefImplicitLod = 307, SpvOpImageSparseSampleDrefExplicitLod = 308, SpvOpImageSparseSampleProjImplicitLod = 309, SpvOpImageSparseSampleProjExplicitLod = 310, SpvOpImageSparseSampleProjDrefImplicitLod = 311, SpvOpImageSparseSampleProjDrefExplicitLod = 312, SpvOpImageSparseFetch = 313, SpvOpImageSparseGather = 314, SpvOpImageSparseDrefGather = 315, SpvOpImageSparseTexelsResident = 316, SpvOpNoLine = 317, SpvOpAtomicFlagTestAndSet = 318, SpvOpAtomicFlagClear = 319, SpvOpImageSparseRead = 320, SpvOpSizeOf = 321, SpvOpTypePipeStorage = 322, SpvOpConstantPipeStorage = 323, SpvOpCreatePipeFromPipeStorage = 324, SpvOpGetKernelLocalSizeForSubgroupCount = 325, SpvOpGetKernelMaxNumSubgroups = 326, SpvOpTypeNamedBarrier = 327, SpvOpNamedBarrierInitialize = 328, SpvOpMemoryNamedBarrier = 329, SpvOpModuleProcessed = 330, SpvOpExecutionModeId = 331, SpvOpDecorateId = 332, SpvOpGroupNonUniformElect = 333, SpvOpGroupNonUniformAll = 334, SpvOpGroupNonUniformAny = 335, SpvOpGroupNonUniformAllEqual = 336, SpvOpGroupNonUniformBroadcast = 337, SpvOpGroupNonUniformBroadcastFirst = 338, SpvOpGroupNonUniformBallot = 339, SpvOpGroupNonUniformInverseBallot = 340, SpvOpGroupNonUniformBallotBitExtract = 341, SpvOpGroupNonUniformBallotBitCount = 342, SpvOpGroupNonUniformBallotFindLSB = 343, SpvOpGroupNonUniformBallotFindMSB = 344, SpvOpGroupNonUniformShuffle = 345, SpvOpGroupNonUniformShuffleXor = 346, SpvOpGroupNonUniformShuffleUp = 347, SpvOpGroupNonUniformShuffleDown = 348, SpvOpGroupNonUniformIAdd = 349, SpvOpGroupNonUniformFAdd = 350, SpvOpGroupNonUniformIMul = 351, SpvOpGroupNonUniformFMul = 352, SpvOpGroupNonUniformSMin = 353, SpvOpGroupNonUniformUMin = 354, SpvOpGroupNonUniformFMin = 355, SpvOpGroupNonUniformSMax = 356, SpvOpGroupNonUniformUMax = 357, SpvOpGroupNonUniformFMax = 358, SpvOpGroupNonUniformBitwiseAnd = 359, SpvOpGroupNonUniformBitwiseOr = 360, SpvOpGroupNonUniformBitwiseXor = 361, SpvOpGroupNonUniformLogicalAnd = 362, SpvOpGroupNonUniformLogicalOr = 363, SpvOpGroupNonUniformLogicalXor = 364, SpvOpGroupNonUniformQuadBroadcast = 365, SpvOpGroupNonUniformQuadSwap = 366, SpvOpCopyLogical = 400, SpvOpPtrEqual = 401, SpvOpPtrNotEqual = 402, SpvOpPtrDiff = 403, SpvOpColorAttachmentReadEXT = 4160, SpvOpDepthAttachmentReadEXT = 4161, SpvOpStencilAttachmentReadEXT = 4162, SpvOpTerminateInvocation = 4416, SpvOpSubgroupBallotKHR = 4421, SpvOpSubgroupFirstInvocationKHR = 4422, SpvOpSubgroupAllKHR = 4428, SpvOpSubgroupAnyKHR = 4429, SpvOpSubgroupAllEqualKHR = 4430, SpvOpGroupNonUniformRotateKHR = 4431, SpvOpSubgroupReadInvocationKHR = 4432, SpvOpTraceRayKHR = 4445, SpvOpExecuteCallableKHR = 4446, SpvOpConvertUToAccelerationStructureKHR = 4447, SpvOpIgnoreIntersectionKHR = 4448, SpvOpTerminateRayKHR = 4449, SpvOpSDot = 4450, SpvOpSDotKHR = 4450, SpvOpUDot = 4451, SpvOpUDotKHR = 4451, SpvOpSUDot = 4452, SpvOpSUDotKHR = 4452, SpvOpSDotAccSat = 4453, SpvOpSDotAccSatKHR = 4453, SpvOpUDotAccSat = 4454, SpvOpUDotAccSatKHR = 4454, SpvOpSUDotAccSat = 4455, SpvOpSUDotAccSatKHR = 4455, SpvOpTypeCooperativeMatrixKHR = 4456, SpvOpCooperativeMatrixLoadKHR = 4457, SpvOpCooperativeMatrixStoreKHR = 4458, SpvOpCooperativeMatrixMulAddKHR = 4459, SpvOpCooperativeMatrixLengthKHR = 4460, SpvOpTypeRayQueryKHR = 4472, SpvOpRayQueryInitializeKHR = 4473, SpvOpRayQueryTerminateKHR = 4474, SpvOpRayQueryGenerateIntersectionKHR = 4475, SpvOpRayQueryConfirmIntersectionKHR = 4476, SpvOpRayQueryProceedKHR = 4477, SpvOpRayQueryGetIntersectionTypeKHR = 4479, SpvOpImageSampleWeightedQCOM = 4480, SpvOpImageBoxFilterQCOM = 4481, SpvOpImageBlockMatchSSDQCOM = 4482, SpvOpImageBlockMatchSADQCOM = 4483, SpvOpGroupIAddNonUniformAMD = 5000, SpvOpGroupFAddNonUniformAMD = 5001, SpvOpGroupFMinNonUniformAMD = 5002, SpvOpGroupUMinNonUniformAMD = 5003, SpvOpGroupSMinNonUniformAMD = 5004, SpvOpGroupFMaxNonUniformAMD = 5005, SpvOpGroupUMaxNonUniformAMD = 5006, SpvOpGroupSMaxNonUniformAMD = 5007, SpvOpFragmentMaskFetchAMD = 5011, SpvOpFragmentFetchAMD = 5012, SpvOpReadClockKHR = 5056, SpvOpHitObjectRecordHitMotionNV = 5249, SpvOpHitObjectRecordHitWithIndexMotionNV = 5250, SpvOpHitObjectRecordMissMotionNV = 5251, SpvOpHitObjectGetWorldToObjectNV = 5252, SpvOpHitObjectGetObjectToWorldNV = 5253, SpvOpHitObjectGetObjectRayDirectionNV = 5254, SpvOpHitObjectGetObjectRayOriginNV = 5255, SpvOpHitObjectTraceRayMotionNV = 5256, SpvOpHitObjectGetShaderRecordBufferHandleNV = 5257, SpvOpHitObjectGetShaderBindingTableRecordIndexNV = 5258, SpvOpHitObjectRecordEmptyNV = 5259, SpvOpHitObjectTraceRayNV = 5260, SpvOpHitObjectRecordHitNV = 5261, SpvOpHitObjectRecordHitWithIndexNV = 5262, SpvOpHitObjectRecordMissNV = 5263, SpvOpHitObjectExecuteShaderNV = 5264, SpvOpHitObjectGetCurrentTimeNV = 5265, SpvOpHitObjectGetAttributesNV = 5266, SpvOpHitObjectGetHitKindNV = 5267, SpvOpHitObjectGetPrimitiveIndexNV = 5268, SpvOpHitObjectGetGeometryIndexNV = 5269, SpvOpHitObjectGetInstanceIdNV = 5270, SpvOpHitObjectGetInstanceCustomIndexNV = 5271, SpvOpHitObjectGetWorldRayDirectionNV = 5272, SpvOpHitObjectGetWorldRayOriginNV = 5273, SpvOpHitObjectGetRayTMaxNV = 5274, SpvOpHitObjectGetRayTMinNV = 5275, SpvOpHitObjectIsEmptyNV = 5276, SpvOpHitObjectIsHitNV = 5277, SpvOpHitObjectIsMissNV = 5278, SpvOpReorderThreadWithHitObjectNV = 5279, SpvOpReorderThreadWithHintNV = 5280, SpvOpTypeHitObjectNV = 5281, SpvOpImageSampleFootprintNV = 5283, SpvOpEmitMeshTasksEXT = 5294, SpvOpSetMeshOutputsEXT = 5295, SpvOpGroupNonUniformPartitionNV = 5296, SpvOpWritePackedPrimitiveIndices4x8NV = 5299, SpvOpReportIntersectionKHR = 5334, SpvOpReportIntersectionNV = 5334, SpvOpIgnoreIntersectionNV = 5335, SpvOpTerminateRayNV = 5336, SpvOpTraceNV = 5337, SpvOpTraceMotionNV = 5338, SpvOpTraceRayMotionNV = 5339, SpvOpRayQueryGetIntersectionTriangleVertexPositionsKHR = 5340, SpvOpTypeAccelerationStructureKHR = 5341, SpvOpTypeAccelerationStructureNV = 5341, SpvOpExecuteCallableNV = 5344, SpvOpTypeCooperativeMatrixNV = 5358, SpvOpCooperativeMatrixLoadNV = 5359, SpvOpCooperativeMatrixStoreNV = 5360, SpvOpCooperativeMatrixMulAddNV = 5361, SpvOpCooperativeMatrixLengthNV = 5362, SpvOpBeginInvocationInterlockEXT = 5364, SpvOpEndInvocationInterlockEXT = 5365, SpvOpDemoteToHelperInvocation = 5380, SpvOpDemoteToHelperInvocationEXT = 5380, SpvOpIsHelperInvocationEXT = 5381, SpvOpConvertUToImageNV = 5391, SpvOpConvertUToSamplerNV = 5392, SpvOpConvertImageToUNV = 5393, SpvOpConvertSamplerToUNV = 5394, SpvOpConvertUToSampledImageNV = 5395, SpvOpConvertSampledImageToUNV = 5396, SpvOpSamplerImageAddressingModeNV = 5397, SpvOpSubgroupShuffleINTEL = 5571, SpvOpSubgroupShuffleDownINTEL = 5572, SpvOpSubgroupShuffleUpINTEL = 5573, SpvOpSubgroupShuffleXorINTEL = 5574, SpvOpSubgroupBlockReadINTEL = 5575, SpvOpSubgroupBlockWriteINTEL = 5576, SpvOpSubgroupImageBlockReadINTEL = 5577, SpvOpSubgroupImageBlockWriteINTEL = 5578, SpvOpSubgroupImageMediaBlockReadINTEL = 5580, SpvOpSubgroupImageMediaBlockWriteINTEL = 5581, SpvOpUCountLeadingZerosINTEL = 5585, SpvOpUCountTrailingZerosINTEL = 5586, SpvOpAbsISubINTEL = 5587, SpvOpAbsUSubINTEL = 5588, SpvOpIAddSatINTEL = 5589, SpvOpUAddSatINTEL = 5590, SpvOpIAverageINTEL = 5591, SpvOpUAverageINTEL = 5592, SpvOpIAverageRoundedINTEL = 5593, SpvOpUAverageRoundedINTEL = 5594, SpvOpISubSatINTEL = 5595, SpvOpUSubSatINTEL = 5596, SpvOpIMul32x16INTEL = 5597, SpvOpUMul32x16INTEL = 5598, SpvOpConstantFunctionPointerINTEL = 5600, SpvOpFunctionPointerCallINTEL = 5601, SpvOpAsmTargetINTEL = 5609, SpvOpAsmINTEL = 5610, SpvOpAsmCallINTEL = 5611, SpvOpAtomicFMinEXT = 5614, SpvOpAtomicFMaxEXT = 5615, SpvOpAssumeTrueKHR = 5630, SpvOpExpectKHR = 5631, SpvOpDecorateString = 5632, SpvOpDecorateStringGOOGLE = 5632, SpvOpMemberDecorateString = 5633, SpvOpMemberDecorateStringGOOGLE = 5633, SpvOpVmeImageINTEL = 5699, SpvOpTypeVmeImageINTEL = 5700, SpvOpTypeAvcImePayloadINTEL = 5701, SpvOpTypeAvcRefPayloadINTEL = 5702, SpvOpTypeAvcSicPayloadINTEL = 5703, SpvOpTypeAvcMcePayloadINTEL = 5704, SpvOpTypeAvcMceResultINTEL = 5705, SpvOpTypeAvcImeResultINTEL = 5706, SpvOpTypeAvcImeResultSingleReferenceStreamoutINTEL = 5707, SpvOpTypeAvcImeResultDualReferenceStreamoutINTEL = 5708, SpvOpTypeAvcImeSingleReferenceStreaminINTEL = 5709, SpvOpTypeAvcImeDualReferenceStreaminINTEL = 5710, SpvOpTypeAvcRefResultINTEL = 5711, SpvOpTypeAvcSicResultINTEL = 5712, SpvOpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL = 5713, SpvOpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL = 5714, SpvOpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL = 5715, SpvOpSubgroupAvcMceSetInterShapePenaltyINTEL = 5716, SpvOpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL = 5717, SpvOpSubgroupAvcMceSetInterDirectionPenaltyINTEL = 5718, SpvOpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL = 5719, SpvOpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL = 5720, SpvOpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL = 5721, SpvOpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL = 5722, SpvOpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL = 5723, SpvOpSubgroupAvcMceSetMotionVectorCostFunctionINTEL = 5724, SpvOpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL = 5725, SpvOpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL = 5726, SpvOpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL = 5727, SpvOpSubgroupAvcMceSetAcOnlyHaarINTEL = 5728, SpvOpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL = 5729, SpvOpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL = 5730, SpvOpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL = 5731, SpvOpSubgroupAvcMceConvertToImePayloadINTEL = 5732, SpvOpSubgroupAvcMceConvertToImeResultINTEL = 5733, SpvOpSubgroupAvcMceConvertToRefPayloadINTEL = 5734, SpvOpSubgroupAvcMceConvertToRefResultINTEL = 5735, SpvOpSubgroupAvcMceConvertToSicPayloadINTEL = 5736, SpvOpSubgroupAvcMceConvertToSicResultINTEL = 5737, SpvOpSubgroupAvcMceGetMotionVectorsINTEL = 5738, SpvOpSubgroupAvcMceGetInterDistortionsINTEL = 5739, SpvOpSubgroupAvcMceGetBestInterDistortionsINTEL = 5740, SpvOpSubgroupAvcMceGetInterMajorShapeINTEL = 5741, SpvOpSubgroupAvcMceGetInterMinorShapeINTEL = 5742, SpvOpSubgroupAvcMceGetInterDirectionsINTEL = 5743, SpvOpSubgroupAvcMceGetInterMotionVectorCountINTEL = 5744, SpvOpSubgroupAvcMceGetInterReferenceIdsINTEL = 5745, SpvOpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL = 5746, SpvOpSubgroupAvcImeInitializeINTEL = 5747, SpvOpSubgroupAvcImeSetSingleReferenceINTEL = 5748, SpvOpSubgroupAvcImeSetDualReferenceINTEL = 5749, SpvOpSubgroupAvcImeRefWindowSizeINTEL = 5750, SpvOpSubgroupAvcImeAdjustRefOffsetINTEL = 5751, SpvOpSubgroupAvcImeConvertToMcePayloadINTEL = 5752, SpvOpSubgroupAvcImeSetMaxMotionVectorCountINTEL = 5753, SpvOpSubgroupAvcImeSetUnidirectionalMixDisableINTEL = 5754, SpvOpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL = 5755, SpvOpSubgroupAvcImeSetWeightedSadINTEL = 5756, SpvOpSubgroupAvcImeEvaluateWithSingleReferenceINTEL = 5757, SpvOpSubgroupAvcImeEvaluateWithDualReferenceINTEL = 5758, SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL = 5759, SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL = 5760, SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL = 5761, SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL = 5762, SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL = 5763, SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL = 5764, SpvOpSubgroupAvcImeConvertToMceResultINTEL = 5765, SpvOpSubgroupAvcImeGetSingleReferenceStreaminINTEL = 5766, SpvOpSubgroupAvcImeGetDualReferenceStreaminINTEL = 5767, SpvOpSubgroupAvcImeStripSingleReferenceStreamoutINTEL = 5768, SpvOpSubgroupAvcImeStripDualReferenceStreamoutINTEL = 5769, SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL = 5770, SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL = 5771, SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL = 5772, SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL = 5773, SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL = 5774, SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL = 5775, SpvOpSubgroupAvcImeGetBorderReachedINTEL = 5776, SpvOpSubgroupAvcImeGetTruncatedSearchIndicationINTEL = 5777, SpvOpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL = 5778, SpvOpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL = 5779, SpvOpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL = 5780, SpvOpSubgroupAvcFmeInitializeINTEL = 5781, SpvOpSubgroupAvcBmeInitializeINTEL = 5782, SpvOpSubgroupAvcRefConvertToMcePayloadINTEL = 5783, SpvOpSubgroupAvcRefSetBidirectionalMixDisableINTEL = 5784, SpvOpSubgroupAvcRefSetBilinearFilterEnableINTEL = 5785, SpvOpSubgroupAvcRefEvaluateWithSingleReferenceINTEL = 5786, SpvOpSubgroupAvcRefEvaluateWithDualReferenceINTEL = 5787, SpvOpSubgroupAvcRefEvaluateWithMultiReferenceINTEL = 5788, SpvOpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL = 5789, SpvOpSubgroupAvcRefConvertToMceResultINTEL = 5790, SpvOpSubgroupAvcSicInitializeINTEL = 5791, SpvOpSubgroupAvcSicConfigureSkcINTEL = 5792, SpvOpSubgroupAvcSicConfigureIpeLumaINTEL = 5793, SpvOpSubgroupAvcSicConfigureIpeLumaChromaINTEL = 5794, SpvOpSubgroupAvcSicGetMotionVectorMaskINTEL = 5795, SpvOpSubgroupAvcSicConvertToMcePayloadINTEL = 5796, SpvOpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL = 5797, SpvOpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL = 5798, SpvOpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL = 5799, SpvOpSubgroupAvcSicSetBilinearFilterEnableINTEL = 5800, SpvOpSubgroupAvcSicSetSkcForwardTransformEnableINTEL = 5801, SpvOpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL = 5802, SpvOpSubgroupAvcSicEvaluateIpeINTEL = 5803, SpvOpSubgroupAvcSicEvaluateWithSingleReferenceINTEL = 5804, SpvOpSubgroupAvcSicEvaluateWithDualReferenceINTEL = 5805, SpvOpSubgroupAvcSicEvaluateWithMultiReferenceINTEL = 5806, SpvOpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL = 5807, SpvOpSubgroupAvcSicConvertToMceResultINTEL = 5808, SpvOpSubgroupAvcSicGetIpeLumaShapeINTEL = 5809, SpvOpSubgroupAvcSicGetBestIpeLumaDistortionINTEL = 5810, SpvOpSubgroupAvcSicGetBestIpeChromaDistortionINTEL = 5811, SpvOpSubgroupAvcSicGetPackedIpeLumaModesINTEL = 5812, SpvOpSubgroupAvcSicGetIpeChromaModeINTEL = 5813, SpvOpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814, SpvOpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815, SpvOpSubgroupAvcSicGetInterRawSadsINTEL = 5816, SpvOpVariableLengthArrayINTEL = 5818, SpvOpSaveMemoryINTEL = 5819, SpvOpRestoreMemoryINTEL = 5820, SpvOpArbitraryFloatSinCosPiINTEL = 5840, SpvOpArbitraryFloatCastINTEL = 5841, SpvOpArbitraryFloatCastFromIntINTEL = 5842, SpvOpArbitraryFloatCastToIntINTEL = 5843, SpvOpArbitraryFloatAddINTEL = 5846, SpvOpArbitraryFloatSubINTEL = 5847, SpvOpArbitraryFloatMulINTEL = 5848, SpvOpArbitraryFloatDivINTEL = 5849, SpvOpArbitraryFloatGTINTEL = 5850, SpvOpArbitraryFloatGEINTEL = 5851, SpvOpArbitraryFloatLTINTEL = 5852, SpvOpArbitraryFloatLEINTEL = 5853, SpvOpArbitraryFloatEQINTEL = 5854, SpvOpArbitraryFloatRecipINTEL = 5855, SpvOpArbitraryFloatRSqrtINTEL = 5856, SpvOpArbitraryFloatCbrtINTEL = 5857, SpvOpArbitraryFloatHypotINTEL = 5858, SpvOpArbitraryFloatSqrtINTEL = 5859, SpvOpArbitraryFloatLogINTEL = 5860, SpvOpArbitraryFloatLog2INTEL = 5861, SpvOpArbitraryFloatLog10INTEL = 5862, SpvOpArbitraryFloatLog1pINTEL = 5863, SpvOpArbitraryFloatExpINTEL = 5864, SpvOpArbitraryFloatExp2INTEL = 5865, SpvOpArbitraryFloatExp10INTEL = 5866, SpvOpArbitraryFloatExpm1INTEL = 5867, SpvOpArbitraryFloatSinINTEL = 5868, SpvOpArbitraryFloatCosINTEL = 5869, SpvOpArbitraryFloatSinCosINTEL = 5870, SpvOpArbitraryFloatSinPiINTEL = 5871, SpvOpArbitraryFloatCosPiINTEL = 5872, SpvOpArbitraryFloatASinINTEL = 5873, SpvOpArbitraryFloatASinPiINTEL = 5874, SpvOpArbitraryFloatACosINTEL = 5875, SpvOpArbitraryFloatACosPiINTEL = 5876, SpvOpArbitraryFloatATanINTEL = 5877, SpvOpArbitraryFloatATanPiINTEL = 5878, SpvOpArbitraryFloatATan2INTEL = 5879, SpvOpArbitraryFloatPowINTEL = 5880, SpvOpArbitraryFloatPowRINTEL = 5881, SpvOpArbitraryFloatPowNINTEL = 5882, SpvOpLoopControlINTEL = 5887, SpvOpAliasDomainDeclINTEL = 5911, SpvOpAliasScopeDeclINTEL = 5912, SpvOpAliasScopeListDeclINTEL = 5913, SpvOpFixedSqrtINTEL = 5923, SpvOpFixedRecipINTEL = 5924, SpvOpFixedRsqrtINTEL = 5925, SpvOpFixedSinINTEL = 5926, SpvOpFixedCosINTEL = 5927, SpvOpFixedSinCosINTEL = 5928, SpvOpFixedSinPiINTEL = 5929, SpvOpFixedCosPiINTEL = 5930, SpvOpFixedSinCosPiINTEL = 5931, SpvOpFixedLogINTEL = 5932, SpvOpFixedExpINTEL = 5933, SpvOpPtrCastToCrossWorkgroupINTEL = 5934, SpvOpCrossWorkgroupCastToPtrINTEL = 5938, SpvOpReadPipeBlockingINTEL = 5946, SpvOpWritePipeBlockingINTEL = 5947, SpvOpFPGARegINTEL = 5949, SpvOpRayQueryGetRayTMinKHR = 6016, SpvOpRayQueryGetRayFlagsKHR = 6017, SpvOpRayQueryGetIntersectionTKHR = 6018, SpvOpRayQueryGetIntersectionInstanceCustomIndexKHR = 6019, SpvOpRayQueryGetIntersectionInstanceIdKHR = 6020, SpvOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR = 6021, SpvOpRayQueryGetIntersectionGeometryIndexKHR = 6022, SpvOpRayQueryGetIntersectionPrimitiveIndexKHR = 6023, SpvOpRayQueryGetIntersectionBarycentricsKHR = 6024, SpvOpRayQueryGetIntersectionFrontFaceKHR = 6025, SpvOpRayQueryGetIntersectionCandidateAABBOpaqueKHR = 6026, SpvOpRayQueryGetIntersectionObjectRayDirectionKHR = 6027, SpvOpRayQueryGetIntersectionObjectRayOriginKHR = 6028, SpvOpRayQueryGetWorldRayDirectionKHR = 6029, SpvOpRayQueryGetWorldRayOriginKHR = 6030, SpvOpRayQueryGetIntersectionObjectToWorldKHR = 6031, SpvOpRayQueryGetIntersectionWorldToObjectKHR = 6032, SpvOpAtomicFAddEXT = 6035, SpvOpTypeBufferSurfaceINTEL = 6086, SpvOpTypeStructContinuedINTEL = 6090, SpvOpConstantCompositeContinuedINTEL = 6091, SpvOpSpecConstantCompositeContinuedINTEL = 6092, SpvOpConvertFToBF16INTEL = 6116, SpvOpConvertBF16ToFINTEL = 6117, SpvOpControlBarrierArriveINTEL = 6142, SpvOpControlBarrierWaitINTEL = 6143, SpvOpGroupIMulKHR = 6401, SpvOpGroupFMulKHR = 6402, SpvOpGroupBitwiseAndKHR = 6403, SpvOpGroupBitwiseOrKHR = 6404, SpvOpGroupBitwiseXorKHR = 6405, SpvOpGroupLogicalAndKHR = 6406, SpvOpGroupLogicalOrKHR = 6407, SpvOpGroupLogicalXorKHR = 6408, SpvOpMax = 0x7fffffff, } SpvOp; #ifdef SPV_ENABLE_UTILITY_CODE #ifndef __cplusplus #include #endif inline void SpvHasResultAndType(SpvOp opcode, bool* hasResult, bool* hasResultType) { *hasResult = *hasResultType = false; switch (opcode) { default: /* unknown opcode */ break; case SpvOpNop: *hasResult = false; *hasResultType = false; break; case SpvOpUndef: *hasResult = true; *hasResultType = true; break; case SpvOpSourceContinued: *hasResult = false; *hasResultType = false; break; case SpvOpSource: *hasResult = false; *hasResultType = false; break; case SpvOpSourceExtension: *hasResult = false; *hasResultType = false; break; case SpvOpName: *hasResult = false; *hasResultType = false; break; case SpvOpMemberName: *hasResult = false; *hasResultType = false; break; case SpvOpString: *hasResult = true; *hasResultType = false; break; case SpvOpLine: *hasResult = false; *hasResultType = false; break; case SpvOpExtension: *hasResult = false; *hasResultType = false; break; case SpvOpExtInstImport: *hasResult = true; *hasResultType = false; break; case SpvOpExtInst: *hasResult = true; *hasResultType = true; break; case SpvOpMemoryModel: *hasResult = false; *hasResultType = false; break; case SpvOpEntryPoint: *hasResult = false; *hasResultType = false; break; case SpvOpExecutionMode: *hasResult = false; *hasResultType = false; break; case SpvOpCapability: *hasResult = false; *hasResultType = false; break; case SpvOpTypeVoid: *hasResult = true; *hasResultType = false; break; case SpvOpTypeBool: *hasResult = true; *hasResultType = false; break; case SpvOpTypeInt: *hasResult = true; *hasResultType = false; break; case SpvOpTypeFloat: *hasResult = true; *hasResultType = false; break; case SpvOpTypeVector: *hasResult = true; *hasResultType = false; break; case SpvOpTypeMatrix: *hasResult = true; *hasResultType = false; break; case SpvOpTypeImage: *hasResult = true; *hasResultType = false; break; case SpvOpTypeSampler: *hasResult = true; *hasResultType = false; break; case SpvOpTypeSampledImage: *hasResult = true; *hasResultType = false; break; case SpvOpTypeArray: *hasResult = true; *hasResultType = false; break; case SpvOpTypeRuntimeArray: *hasResult = true; *hasResultType = false; break; case SpvOpTypeStruct: *hasResult = true; *hasResultType = false; break; case SpvOpTypeOpaque: *hasResult = true; *hasResultType = false; break; case SpvOpTypePointer: *hasResult = true; *hasResultType = false; break; case SpvOpTypeFunction: *hasResult = true; *hasResultType = false; break; case SpvOpTypeEvent: *hasResult = true; *hasResultType = false; break; case SpvOpTypeDeviceEvent: *hasResult = true; *hasResultType = false; break; case SpvOpTypeReserveId: *hasResult = true; *hasResultType = false; break; case SpvOpTypeQueue: *hasResult = true; *hasResultType = false; break; case SpvOpTypePipe: *hasResult = true; *hasResultType = false; break; case SpvOpTypeForwardPointer: *hasResult = false; *hasResultType = false; break; case SpvOpConstantTrue: *hasResult = true; *hasResultType = true; break; case SpvOpConstantFalse: *hasResult = true; *hasResultType = true; break; case SpvOpConstant: *hasResult = true; *hasResultType = true; break; case SpvOpConstantComposite: *hasResult = true; *hasResultType = true; break; case SpvOpConstantSampler: *hasResult = true; *hasResultType = true; break; case SpvOpConstantNull: *hasResult = true; *hasResultType = true; break; case SpvOpSpecConstantTrue: *hasResult = true; *hasResultType = true; break; case SpvOpSpecConstantFalse: *hasResult = true; *hasResultType = true; break; case SpvOpSpecConstant: *hasResult = true; *hasResultType = true; break; case SpvOpSpecConstantComposite: *hasResult = true; *hasResultType = true; break; case SpvOpSpecConstantOp: *hasResult = true; *hasResultType = true; break; case SpvOpFunction: *hasResult = true; *hasResultType = true; break; case SpvOpFunctionParameter: *hasResult = true; *hasResultType = true; break; case SpvOpFunctionEnd: *hasResult = false; *hasResultType = false; break; case SpvOpFunctionCall: *hasResult = true; *hasResultType = true; break; case SpvOpVariable: *hasResult = true; *hasResultType = true; break; case SpvOpImageTexelPointer: *hasResult = true; *hasResultType = true; break; case SpvOpLoad: *hasResult = true; *hasResultType = true; break; case SpvOpStore: *hasResult = false; *hasResultType = false; break; case SpvOpCopyMemory: *hasResult = false; *hasResultType = false; break; case SpvOpCopyMemorySized: *hasResult = false; *hasResultType = false; break; case SpvOpAccessChain: *hasResult = true; *hasResultType = true; break; case SpvOpInBoundsAccessChain: *hasResult = true; *hasResultType = true; break; case SpvOpPtrAccessChain: *hasResult = true; *hasResultType = true; break; case SpvOpArrayLength: *hasResult = true; *hasResultType = true; break; case SpvOpGenericPtrMemSemantics: *hasResult = true; *hasResultType = true; break; case SpvOpInBoundsPtrAccessChain: *hasResult = true; *hasResultType = true; break; case SpvOpDecorate: *hasResult = false; *hasResultType = false; break; case SpvOpMemberDecorate: *hasResult = false; *hasResultType = false; break; case SpvOpDecorationGroup: *hasResult = true; *hasResultType = false; break; case SpvOpGroupDecorate: *hasResult = false; *hasResultType = false; break; case SpvOpGroupMemberDecorate: *hasResult = false; *hasResultType = false; break; case SpvOpVectorExtractDynamic: *hasResult = true; *hasResultType = true; break; case SpvOpVectorInsertDynamic: *hasResult = true; *hasResultType = true; break; case SpvOpVectorShuffle: *hasResult = true; *hasResultType = true; break; case SpvOpCompositeConstruct: *hasResult = true; *hasResultType = true; break; case SpvOpCompositeExtract: *hasResult = true; *hasResultType = true; break; case SpvOpCompositeInsert: *hasResult = true; *hasResultType = true; break; case SpvOpCopyObject: *hasResult = true; *hasResultType = true; break; case SpvOpTranspose: *hasResult = true; *hasResultType = true; break; case SpvOpSampledImage: *hasResult = true; *hasResultType = true; break; case SpvOpImageSampleImplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSampleExplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageFetch: *hasResult = true; *hasResultType = true; break; case SpvOpImageGather: *hasResult = true; *hasResultType = true; break; case SpvOpImageDrefGather: *hasResult = true; *hasResultType = true; break; case SpvOpImageRead: *hasResult = true; *hasResultType = true; break; case SpvOpImageWrite: *hasResult = false; *hasResultType = false; break; case SpvOpImage: *hasResult = true; *hasResultType = true; break; case SpvOpImageQueryFormat: *hasResult = true; *hasResultType = true; break; case SpvOpImageQueryOrder: *hasResult = true; *hasResultType = true; break; case SpvOpImageQuerySizeLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageQuerySize: *hasResult = true; *hasResultType = true; break; case SpvOpImageQueryLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageQueryLevels: *hasResult = true; *hasResultType = true; break; case SpvOpImageQuerySamples: *hasResult = true; *hasResultType = true; break; case SpvOpConvertFToU: *hasResult = true; *hasResultType = true; break; case SpvOpConvertFToS: *hasResult = true; *hasResultType = true; break; case SpvOpConvertSToF: *hasResult = true; *hasResultType = true; break; case SpvOpConvertUToF: *hasResult = true; *hasResultType = true; break; case SpvOpUConvert: *hasResult = true; *hasResultType = true; break; case SpvOpSConvert: *hasResult = true; *hasResultType = true; break; case SpvOpFConvert: *hasResult = true; *hasResultType = true; break; case SpvOpQuantizeToF16: *hasResult = true; *hasResultType = true; break; case SpvOpConvertPtrToU: *hasResult = true; *hasResultType = true; break; case SpvOpSatConvertSToU: *hasResult = true; *hasResultType = true; break; case SpvOpSatConvertUToS: *hasResult = true; *hasResultType = true; break; case SpvOpConvertUToPtr: *hasResult = true; *hasResultType = true; break; case SpvOpPtrCastToGeneric: *hasResult = true; *hasResultType = true; break; case SpvOpGenericCastToPtr: *hasResult = true; *hasResultType = true; break; case SpvOpGenericCastToPtrExplicit: *hasResult = true; *hasResultType = true; break; case SpvOpBitcast: *hasResult = true; *hasResultType = true; break; case SpvOpSNegate: *hasResult = true; *hasResultType = true; break; case SpvOpFNegate: *hasResult = true; *hasResultType = true; break; case SpvOpIAdd: *hasResult = true; *hasResultType = true; break; case SpvOpFAdd: *hasResult = true; *hasResultType = true; break; case SpvOpISub: *hasResult = true; *hasResultType = true; break; case SpvOpFSub: *hasResult = true; *hasResultType = true; break; case SpvOpIMul: *hasResult = true; *hasResultType = true; break; case SpvOpFMul: *hasResult = true; *hasResultType = true; break; case SpvOpUDiv: *hasResult = true; *hasResultType = true; break; case SpvOpSDiv: *hasResult = true; *hasResultType = true; break; case SpvOpFDiv: *hasResult = true; *hasResultType = true; break; case SpvOpUMod: *hasResult = true; *hasResultType = true; break; case SpvOpSRem: *hasResult = true; *hasResultType = true; break; case SpvOpSMod: *hasResult = true; *hasResultType = true; break; case SpvOpFRem: *hasResult = true; *hasResultType = true; break; case SpvOpFMod: *hasResult = true; *hasResultType = true; break; case SpvOpVectorTimesScalar: *hasResult = true; *hasResultType = true; break; case SpvOpMatrixTimesScalar: *hasResult = true; *hasResultType = true; break; case SpvOpVectorTimesMatrix: *hasResult = true; *hasResultType = true; break; case SpvOpMatrixTimesVector: *hasResult = true; *hasResultType = true; break; case SpvOpMatrixTimesMatrix: *hasResult = true; *hasResultType = true; break; case SpvOpOuterProduct: *hasResult = true; *hasResultType = true; break; case SpvOpDot: *hasResult = true; *hasResultType = true; break; case SpvOpIAddCarry: *hasResult = true; *hasResultType = true; break; case SpvOpISubBorrow: *hasResult = true; *hasResultType = true; break; case SpvOpUMulExtended: *hasResult = true; *hasResultType = true; break; case SpvOpSMulExtended: *hasResult = true; *hasResultType = true; break; case SpvOpAny: *hasResult = true; *hasResultType = true; break; case SpvOpAll: *hasResult = true; *hasResultType = true; break; case SpvOpIsNan: *hasResult = true; *hasResultType = true; break; case SpvOpIsInf: *hasResult = true; *hasResultType = true; break; case SpvOpIsFinite: *hasResult = true; *hasResultType = true; break; case SpvOpIsNormal: *hasResult = true; *hasResultType = true; break; case SpvOpSignBitSet: *hasResult = true; *hasResultType = true; break; case SpvOpLessOrGreater: *hasResult = true; *hasResultType = true; break; case SpvOpOrdered: *hasResult = true; *hasResultType = true; break; case SpvOpUnordered: *hasResult = true; *hasResultType = true; break; case SpvOpLogicalEqual: *hasResult = true; *hasResultType = true; break; case SpvOpLogicalNotEqual: *hasResult = true; *hasResultType = true; break; case SpvOpLogicalOr: *hasResult = true; *hasResultType = true; break; case SpvOpLogicalAnd: *hasResult = true; *hasResultType = true; break; case SpvOpLogicalNot: *hasResult = true; *hasResultType = true; break; case SpvOpSelect: *hasResult = true; *hasResultType = true; break; case SpvOpIEqual: *hasResult = true; *hasResultType = true; break; case SpvOpINotEqual: *hasResult = true; *hasResultType = true; break; case SpvOpUGreaterThan: *hasResult = true; *hasResultType = true; break; case SpvOpSGreaterThan: *hasResult = true; *hasResultType = true; break; case SpvOpUGreaterThanEqual: *hasResult = true; *hasResultType = true; break; case SpvOpSGreaterThanEqual: *hasResult = true; *hasResultType = true; break; case SpvOpULessThan: *hasResult = true; *hasResultType = true; break; case SpvOpSLessThan: *hasResult = true; *hasResultType = true; break; case SpvOpULessThanEqual: *hasResult = true; *hasResultType = true; break; case SpvOpSLessThanEqual: *hasResult = true; *hasResultType = true; break; case SpvOpFOrdEqual: *hasResult = true; *hasResultType = true; break; case SpvOpFUnordEqual: *hasResult = true; *hasResultType = true; break; case SpvOpFOrdNotEqual: *hasResult = true; *hasResultType = true; break; case SpvOpFUnordNotEqual: *hasResult = true; *hasResultType = true; break; case SpvOpFOrdLessThan: *hasResult = true; *hasResultType = true; break; case SpvOpFUnordLessThan: *hasResult = true; *hasResultType = true; break; case SpvOpFOrdGreaterThan: *hasResult = true; *hasResultType = true; break; case SpvOpFUnordGreaterThan: *hasResult = true; *hasResultType = true; break; case SpvOpFOrdLessThanEqual: *hasResult = true; *hasResultType = true; break; case SpvOpFUnordLessThanEqual: *hasResult = true; *hasResultType = true; break; case SpvOpFOrdGreaterThanEqual: *hasResult = true; *hasResultType = true; break; case SpvOpFUnordGreaterThanEqual: *hasResult = true; *hasResultType = true; break; case SpvOpShiftRightLogical: *hasResult = true; *hasResultType = true; break; case SpvOpShiftRightArithmetic: *hasResult = true; *hasResultType = true; break; case SpvOpShiftLeftLogical: *hasResult = true; *hasResultType = true; break; case SpvOpBitwiseOr: *hasResult = true; *hasResultType = true; break; case SpvOpBitwiseXor: *hasResult = true; *hasResultType = true; break; case SpvOpBitwiseAnd: *hasResult = true; *hasResultType = true; break; case SpvOpNot: *hasResult = true; *hasResultType = true; break; case SpvOpBitFieldInsert: *hasResult = true; *hasResultType = true; break; case SpvOpBitFieldSExtract: *hasResult = true; *hasResultType = true; break; case SpvOpBitFieldUExtract: *hasResult = true; *hasResultType = true; break; case SpvOpBitReverse: *hasResult = true; *hasResultType = true; break; case SpvOpBitCount: *hasResult = true; *hasResultType = true; break; case SpvOpDPdx: *hasResult = true; *hasResultType = true; break; case SpvOpDPdy: *hasResult = true; *hasResultType = true; break; case SpvOpFwidth: *hasResult = true; *hasResultType = true; break; case SpvOpDPdxFine: *hasResult = true; *hasResultType = true; break; case SpvOpDPdyFine: *hasResult = true; *hasResultType = true; break; case SpvOpFwidthFine: *hasResult = true; *hasResultType = true; break; case SpvOpDPdxCoarse: *hasResult = true; *hasResultType = true; break; case SpvOpDPdyCoarse: *hasResult = true; *hasResultType = true; break; case SpvOpFwidthCoarse: *hasResult = true; *hasResultType = true; break; case SpvOpEmitVertex: *hasResult = false; *hasResultType = false; break; case SpvOpEndPrimitive: *hasResult = false; *hasResultType = false; break; case SpvOpEmitStreamVertex: *hasResult = false; *hasResultType = false; break; case SpvOpEndStreamPrimitive: *hasResult = false; *hasResultType = false; break; case SpvOpControlBarrier: *hasResult = false; *hasResultType = false; break; case SpvOpMemoryBarrier: *hasResult = false; *hasResultType = false; break; case SpvOpAtomicLoad: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicStore: *hasResult = false; *hasResultType = false; break; case SpvOpAtomicExchange: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicCompareExchange: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicCompareExchangeWeak: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicIIncrement: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicIDecrement: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicIAdd: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicISub: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicSMin: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicUMin: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicSMax: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicUMax: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicAnd: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicOr: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicXor: *hasResult = true; *hasResultType = true; break; case SpvOpPhi: *hasResult = true; *hasResultType = true; break; case SpvOpLoopMerge: *hasResult = false; *hasResultType = false; break; case SpvOpSelectionMerge: *hasResult = false; *hasResultType = false; break; case SpvOpLabel: *hasResult = true; *hasResultType = false; break; case SpvOpBranch: *hasResult = false; *hasResultType = false; break; case SpvOpBranchConditional: *hasResult = false; *hasResultType = false; break; case SpvOpSwitch: *hasResult = false; *hasResultType = false; break; case SpvOpKill: *hasResult = false; *hasResultType = false; break; case SpvOpReturn: *hasResult = false; *hasResultType = false; break; case SpvOpReturnValue: *hasResult = false; *hasResultType = false; break; case SpvOpUnreachable: *hasResult = false; *hasResultType = false; break; case SpvOpLifetimeStart: *hasResult = false; *hasResultType = false; break; case SpvOpLifetimeStop: *hasResult = false; *hasResultType = false; break; case SpvOpGroupAsyncCopy: *hasResult = true; *hasResultType = true; break; case SpvOpGroupWaitEvents: *hasResult = false; *hasResultType = false; break; case SpvOpGroupAll: *hasResult = true; *hasResultType = true; break; case SpvOpGroupAny: *hasResult = true; *hasResultType = true; break; case SpvOpGroupBroadcast: *hasResult = true; *hasResultType = true; break; case SpvOpGroupIAdd: *hasResult = true; *hasResultType = true; break; case SpvOpGroupFAdd: *hasResult = true; *hasResultType = true; break; case SpvOpGroupFMin: *hasResult = true; *hasResultType = true; break; case SpvOpGroupUMin: *hasResult = true; *hasResultType = true; break; case SpvOpGroupSMin: *hasResult = true; *hasResultType = true; break; case SpvOpGroupFMax: *hasResult = true; *hasResultType = true; break; case SpvOpGroupUMax: *hasResult = true; *hasResultType = true; break; case SpvOpGroupSMax: *hasResult = true; *hasResultType = true; break; case SpvOpReadPipe: *hasResult = true; *hasResultType = true; break; case SpvOpWritePipe: *hasResult = true; *hasResultType = true; break; case SpvOpReservedReadPipe: *hasResult = true; *hasResultType = true; break; case SpvOpReservedWritePipe: *hasResult = true; *hasResultType = true; break; case SpvOpReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; case SpvOpReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; case SpvOpCommitReadPipe: *hasResult = false; *hasResultType = false; break; case SpvOpCommitWritePipe: *hasResult = false; *hasResultType = false; break; case SpvOpIsValidReserveId: *hasResult = true; *hasResultType = true; break; case SpvOpGetNumPipePackets: *hasResult = true; *hasResultType = true; break; case SpvOpGetMaxPipePackets: *hasResult = true; *hasResultType = true; break; case SpvOpGroupReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; case SpvOpGroupReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; case SpvOpGroupCommitReadPipe: *hasResult = false; *hasResultType = false; break; case SpvOpGroupCommitWritePipe: *hasResult = false; *hasResultType = false; break; case SpvOpEnqueueMarker: *hasResult = true; *hasResultType = true; break; case SpvOpEnqueueKernel: *hasResult = true; *hasResultType = true; break; case SpvOpGetKernelNDrangeSubGroupCount: *hasResult = true; *hasResultType = true; break; case SpvOpGetKernelNDrangeMaxSubGroupSize: *hasResult = true; *hasResultType = true; break; case SpvOpGetKernelWorkGroupSize: *hasResult = true; *hasResultType = true; break; case SpvOpGetKernelPreferredWorkGroupSizeMultiple: *hasResult = true; *hasResultType = true; break; case SpvOpRetainEvent: *hasResult = false; *hasResultType = false; break; case SpvOpReleaseEvent: *hasResult = false; *hasResultType = false; break; case SpvOpCreateUserEvent: *hasResult = true; *hasResultType = true; break; case SpvOpIsValidEvent: *hasResult = true; *hasResultType = true; break; case SpvOpSetUserEventStatus: *hasResult = false; *hasResultType = false; break; case SpvOpCaptureEventProfilingInfo: *hasResult = false; *hasResultType = false; break; case SpvOpGetDefaultQueue: *hasResult = true; *hasResultType = true; break; case SpvOpBuildNDRange: *hasResult = true; *hasResultType = true; break; case SpvOpImageSparseSampleImplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSparseSampleExplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSparseSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSparseSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSparseSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSparseSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSparseSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSparseSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; case SpvOpImageSparseFetch: *hasResult = true; *hasResultType = true; break; case SpvOpImageSparseGather: *hasResult = true; *hasResultType = true; break; case SpvOpImageSparseDrefGather: *hasResult = true; *hasResultType = true; break; case SpvOpImageSparseTexelsResident: *hasResult = true; *hasResultType = true; break; case SpvOpNoLine: *hasResult = false; *hasResultType = false; break; case SpvOpAtomicFlagTestAndSet: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicFlagClear: *hasResult = false; *hasResultType = false; break; case SpvOpImageSparseRead: *hasResult = true; *hasResultType = true; break; case SpvOpSizeOf: *hasResult = true; *hasResultType = true; break; case SpvOpTypePipeStorage: *hasResult = true; *hasResultType = false; break; case SpvOpConstantPipeStorage: *hasResult = true; *hasResultType = true; break; case SpvOpCreatePipeFromPipeStorage: *hasResult = true; *hasResultType = true; break; case SpvOpGetKernelLocalSizeForSubgroupCount: *hasResult = true; *hasResultType = true; break; case SpvOpGetKernelMaxNumSubgroups: *hasResult = true; *hasResultType = true; break; case SpvOpTypeNamedBarrier: *hasResult = true; *hasResultType = false; break; case SpvOpNamedBarrierInitialize: *hasResult = true; *hasResultType = true; break; case SpvOpMemoryNamedBarrier: *hasResult = false; *hasResultType = false; break; case SpvOpModuleProcessed: *hasResult = false; *hasResultType = false; break; case SpvOpExecutionModeId: *hasResult = false; *hasResultType = false; break; case SpvOpDecorateId: *hasResult = false; *hasResultType = false; break; case SpvOpGroupNonUniformElect: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformAll: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformAny: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformAllEqual: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformBroadcast: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformBroadcastFirst: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformBallot: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformInverseBallot: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformBallotBitExtract: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformBallotBitCount: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformBallotFindLSB: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformBallotFindMSB: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformShuffle: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformShuffleXor: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformShuffleUp: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformShuffleDown: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformIAdd: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformFAdd: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformIMul: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformFMul: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformSMin: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformUMin: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformFMin: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformSMax: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformUMax: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformFMax: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformBitwiseAnd: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformBitwiseOr: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformBitwiseXor: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformLogicalAnd: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformLogicalOr: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformLogicalXor: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformQuadBroadcast: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformQuadSwap: *hasResult = true; *hasResultType = true; break; case SpvOpCopyLogical: *hasResult = true; *hasResultType = true; break; case SpvOpPtrEqual: *hasResult = true; *hasResultType = true; break; case SpvOpPtrNotEqual: *hasResult = true; *hasResultType = true; break; case SpvOpPtrDiff: *hasResult = true; *hasResultType = true; break; case SpvOpColorAttachmentReadEXT: *hasResult = true; *hasResultType = true; break; case SpvOpDepthAttachmentReadEXT: *hasResult = true; *hasResultType = true; break; case SpvOpStencilAttachmentReadEXT: *hasResult = true; *hasResultType = true; break; case SpvOpTerminateInvocation: *hasResult = false; *hasResultType = false; break; case SpvOpSubgroupBallotKHR: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupFirstInvocationKHR: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAllKHR: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAnyKHR: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAllEqualKHR: *hasResult = true; *hasResultType = true; break; case SpvOpGroupNonUniformRotateKHR: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupReadInvocationKHR: *hasResult = true; *hasResultType = true; break; case SpvOpTraceRayKHR: *hasResult = false; *hasResultType = false; break; case SpvOpExecuteCallableKHR: *hasResult = false; *hasResultType = false; break; case SpvOpConvertUToAccelerationStructureKHR: *hasResult = true; *hasResultType = true; break; case SpvOpIgnoreIntersectionKHR: *hasResult = false; *hasResultType = false; break; case SpvOpTerminateRayKHR: *hasResult = false; *hasResultType = false; break; case SpvOpSDot: *hasResult = true; *hasResultType = true; break; case SpvOpUDot: *hasResult = true; *hasResultType = true; break; case SpvOpSUDot: *hasResult = true; *hasResultType = true; break; case SpvOpSDotAccSat: *hasResult = true; *hasResultType = true; break; case SpvOpUDotAccSat: *hasResult = true; *hasResultType = true; break; case SpvOpSUDotAccSat: *hasResult = true; *hasResultType = true; break; case SpvOpTypeCooperativeMatrixKHR: *hasResult = true; *hasResultType = false; break; case SpvOpCooperativeMatrixLoadKHR: *hasResult = true; *hasResultType = true; break; case SpvOpCooperativeMatrixStoreKHR: *hasResult = false; *hasResultType = false; break; case SpvOpCooperativeMatrixMulAddKHR: *hasResult = true; *hasResultType = true; break; case SpvOpCooperativeMatrixLengthKHR: *hasResult = true; *hasResultType = true; break; case SpvOpTypeRayQueryKHR: *hasResult = true; *hasResultType = false; break; case SpvOpRayQueryInitializeKHR: *hasResult = false; *hasResultType = false; break; case SpvOpRayQueryTerminateKHR: *hasResult = false; *hasResultType = false; break; case SpvOpRayQueryGenerateIntersectionKHR: *hasResult = false; *hasResultType = false; break; case SpvOpRayQueryConfirmIntersectionKHR: *hasResult = false; *hasResultType = false; break; case SpvOpRayQueryProceedKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionTypeKHR: *hasResult = true; *hasResultType = true; break; case SpvOpImageSampleWeightedQCOM: *hasResult = true; *hasResultType = true; break; case SpvOpImageBoxFilterQCOM: *hasResult = true; *hasResultType = true; break; case SpvOpImageBlockMatchSSDQCOM: *hasResult = true; *hasResultType = true; break; case SpvOpImageBlockMatchSADQCOM: *hasResult = true; *hasResultType = true; break; case SpvOpGroupIAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; case SpvOpGroupFAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; case SpvOpGroupFMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; case SpvOpGroupUMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; case SpvOpGroupSMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; case SpvOpGroupFMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; case SpvOpGroupUMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; case SpvOpGroupSMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; case SpvOpFragmentMaskFetchAMD: *hasResult = true; *hasResultType = true; break; case SpvOpFragmentFetchAMD: *hasResult = true; *hasResultType = true; break; case SpvOpReadClockKHR: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectRecordHitMotionNV: *hasResult = false; *hasResultType = false; break; case SpvOpHitObjectRecordHitWithIndexMotionNV: *hasResult = false; *hasResultType = false; break; case SpvOpHitObjectRecordMissMotionNV: *hasResult = false; *hasResultType = false; break; case SpvOpHitObjectGetWorldToObjectNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectGetObjectToWorldNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectGetObjectRayDirectionNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectGetObjectRayOriginNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectTraceRayMotionNV: *hasResult = false; *hasResultType = false; break; case SpvOpHitObjectGetShaderRecordBufferHandleNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectGetShaderBindingTableRecordIndexNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectRecordEmptyNV: *hasResult = false; *hasResultType = false; break; case SpvOpHitObjectTraceRayNV: *hasResult = false; *hasResultType = false; break; case SpvOpHitObjectRecordHitNV: *hasResult = false; *hasResultType = false; break; case SpvOpHitObjectRecordHitWithIndexNV: *hasResult = false; *hasResultType = false; break; case SpvOpHitObjectRecordMissNV: *hasResult = false; *hasResultType = false; break; case SpvOpHitObjectExecuteShaderNV: *hasResult = false; *hasResultType = false; break; case SpvOpHitObjectGetCurrentTimeNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectGetAttributesNV: *hasResult = false; *hasResultType = false; break; case SpvOpHitObjectGetHitKindNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectGetPrimitiveIndexNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectGetGeometryIndexNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectGetInstanceIdNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectGetInstanceCustomIndexNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectGetWorldRayDirectionNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectGetWorldRayOriginNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectGetRayTMaxNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectGetRayTMinNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectIsEmptyNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectIsHitNV: *hasResult = true; *hasResultType = true; break; case SpvOpHitObjectIsMissNV: *hasResult = true; *hasResultType = true; break; case SpvOpReorderThreadWithHitObjectNV: *hasResult = false; *hasResultType = false; break; case SpvOpReorderThreadWithHintNV: *hasResult = false; *hasResultType = false; break; case SpvOpTypeHitObjectNV: *hasResult = true; *hasResultType = false; break; case SpvOpImageSampleFootprintNV: *hasResult = true; *hasResultType = true; break; case SpvOpEmitMeshTasksEXT: *hasResult = false; *hasResultType = false; break; case SpvOpSetMeshOutputsEXT: *hasResult = false; *hasResultType = false; break; case SpvOpGroupNonUniformPartitionNV: *hasResult = true; *hasResultType = true; break; case SpvOpWritePackedPrimitiveIndices4x8NV: *hasResult = false; *hasResultType = false; break; case SpvOpReportIntersectionNV: *hasResult = true; *hasResultType = true; break; case SpvOpIgnoreIntersectionNV: *hasResult = false; *hasResultType = false; break; case SpvOpTerminateRayNV: *hasResult = false; *hasResultType = false; break; case SpvOpTraceNV: *hasResult = false; *hasResultType = false; break; case SpvOpTraceMotionNV: *hasResult = false; *hasResultType = false; break; case SpvOpTraceRayMotionNV: *hasResult = false; *hasResultType = false; break; case SpvOpRayQueryGetIntersectionTriangleVertexPositionsKHR: *hasResult = true; *hasResultType = true; break; case SpvOpTypeAccelerationStructureNV: *hasResult = true; *hasResultType = false; break; case SpvOpExecuteCallableNV: *hasResult = false; *hasResultType = false; break; case SpvOpTypeCooperativeMatrixNV: *hasResult = true; *hasResultType = false; break; case SpvOpCooperativeMatrixLoadNV: *hasResult = true; *hasResultType = true; break; case SpvOpCooperativeMatrixStoreNV: *hasResult = false; *hasResultType = false; break; case SpvOpCooperativeMatrixMulAddNV: *hasResult = true; *hasResultType = true; break; case SpvOpCooperativeMatrixLengthNV: *hasResult = true; *hasResultType = true; break; case SpvOpBeginInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; case SpvOpEndInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; case SpvOpDemoteToHelperInvocation: *hasResult = false; *hasResultType = false; break; case SpvOpIsHelperInvocationEXT: *hasResult = true; *hasResultType = true; break; case SpvOpConvertUToImageNV: *hasResult = true; *hasResultType = true; break; case SpvOpConvertUToSamplerNV: *hasResult = true; *hasResultType = true; break; case SpvOpConvertImageToUNV: *hasResult = true; *hasResultType = true; break; case SpvOpConvertSamplerToUNV: *hasResult = true; *hasResultType = true; break; case SpvOpConvertUToSampledImageNV: *hasResult = true; *hasResultType = true; break; case SpvOpConvertSampledImageToUNV: *hasResult = true; *hasResultType = true; break; case SpvOpSamplerImageAddressingModeNV: *hasResult = false; *hasResultType = false; break; case SpvOpSubgroupShuffleINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupShuffleDownINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupShuffleUpINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupShuffleXorINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupBlockReadINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; case SpvOpSubgroupImageBlockReadINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupImageBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; case SpvOpSubgroupImageMediaBlockReadINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupImageMediaBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; case SpvOpUCountLeadingZerosINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpUCountTrailingZerosINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpAbsISubINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpAbsUSubINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpIAddSatINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpUAddSatINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpIAverageINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpUAverageINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpIAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpUAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpISubSatINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpUSubSatINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpIMul32x16INTEL: *hasResult = true; *hasResultType = true; break; case SpvOpUMul32x16INTEL: *hasResult = true; *hasResultType = true; break; case SpvOpConstantFunctionPointerINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpFunctionPointerCallINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpAsmTargetINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpAsmINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpAsmCallINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicFMinEXT: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicFMaxEXT: *hasResult = true; *hasResultType = true; break; case SpvOpAssumeTrueKHR: *hasResult = false; *hasResultType = false; break; case SpvOpExpectKHR: *hasResult = true; *hasResultType = true; break; case SpvOpDecorateString: *hasResult = false; *hasResultType = false; break; case SpvOpMemberDecorateString: *hasResult = false; *hasResultType = false; break; case SpvOpVmeImageINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpTypeVmeImageINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpTypeAvcImePayloadINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpTypeAvcRefPayloadINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpTypeAvcSicPayloadINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpTypeAvcMcePayloadINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpTypeAvcMceResultINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpTypeAvcImeResultINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpTypeAvcImeResultSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpTypeAvcImeResultDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpTypeAvcImeSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpTypeAvcImeDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpTypeAvcRefResultINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpTypeAvcSicResultINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceSetInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceSetInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceSetMotionVectorCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceSetAcOnlyHaarINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceConvertToImePayloadINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceConvertToImeResultINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceConvertToRefPayloadINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceConvertToRefResultINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceConvertToSicPayloadINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceConvertToSicResultINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetBestInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetInterMajorShapeINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetInterMinorShapeINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetInterDirectionsINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetInterMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetInterReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeInitializeINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeSetSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeSetDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeRefWindowSizeINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeAdjustRefOffsetINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeSetMaxMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeSetUnidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeSetWeightedSadINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeGetSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeGetDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeStripSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeStripDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeGetBorderReachedINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeGetTruncatedSearchIndicationINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcFmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcBmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcRefConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcRefSetBidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcRefSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcRefEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcRefEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcRefEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcRefConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicInitializeINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicConfigureSkcINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicConfigureIpeLumaINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicConfigureIpeLumaChromaINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicGetMotionVectorMaskINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicSetSkcForwardTransformEnableINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicEvaluateIpeINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicGetIpeLumaShapeINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicGetBestIpeLumaDistortionINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicGetBestIpeChromaDistortionINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicGetPackedIpeLumaModesINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicGetIpeChromaModeINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSubgroupAvcSicGetInterRawSadsINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpVariableLengthArrayINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpSaveMemoryINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpRestoreMemoryINTEL: *hasResult = false; *hasResultType = false; break; case SpvOpArbitraryFloatSinCosPiINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatCastINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatCastFromIntINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatCastToIntINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatAddINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatSubINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatMulINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatDivINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatGTINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatGEINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatLTINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatLEINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatEQINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatRecipINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatRSqrtINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatCbrtINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatHypotINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatSqrtINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatLogINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatLog2INTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatLog10INTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatLog1pINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatExpINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatExp2INTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatExp10INTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatExpm1INTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatSinINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatCosINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatSinCosINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatSinPiINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatCosPiINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatASinINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatASinPiINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatACosINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatACosPiINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatATanINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatATanPiINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatATan2INTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatPowINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatPowRINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpArbitraryFloatPowNINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpLoopControlINTEL: *hasResult = false; *hasResultType = false; break; case SpvOpAliasDomainDeclINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpAliasScopeDeclINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpAliasScopeListDeclINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpFixedSqrtINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpFixedRecipINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpFixedRsqrtINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpFixedSinINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpFixedCosINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpFixedSinCosINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpFixedSinPiINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpFixedCosPiINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpFixedSinCosPiINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpFixedLogINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpFixedExpINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpPtrCastToCrossWorkgroupINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpCrossWorkgroupCastToPtrINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpReadPipeBlockingINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpWritePipeBlockingINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpFPGARegINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetRayTMinKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetRayFlagsKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionTKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionInstanceCustomIndexKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionInstanceIdKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionGeometryIndexKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionPrimitiveIndexKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionBarycentricsKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionFrontFaceKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionCandidateAABBOpaqueKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionObjectRayDirectionKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionObjectRayOriginKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetWorldRayDirectionKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetWorldRayOriginKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionObjectToWorldKHR: *hasResult = true; *hasResultType = true; break; case SpvOpRayQueryGetIntersectionWorldToObjectKHR: *hasResult = true; *hasResultType = true; break; case SpvOpAtomicFAddEXT: *hasResult = true; *hasResultType = true; break; case SpvOpTypeBufferSurfaceINTEL: *hasResult = true; *hasResultType = false; break; case SpvOpTypeStructContinuedINTEL: *hasResult = false; *hasResultType = false; break; case SpvOpConstantCompositeContinuedINTEL: *hasResult = false; *hasResultType = false; break; case SpvOpSpecConstantCompositeContinuedINTEL: *hasResult = false; *hasResultType = false; break; case SpvOpConvertFToBF16INTEL: *hasResult = true; *hasResultType = true; break; case SpvOpConvertBF16ToFINTEL: *hasResult = true; *hasResultType = true; break; case SpvOpControlBarrierArriveINTEL: *hasResult = false; *hasResultType = false; break; case SpvOpControlBarrierWaitINTEL: *hasResult = false; *hasResultType = false; break; case SpvOpGroupIMulKHR: *hasResult = true; *hasResultType = true; break; case SpvOpGroupFMulKHR: *hasResult = true; *hasResultType = true; break; case SpvOpGroupBitwiseAndKHR: *hasResult = true; *hasResultType = true; break; case SpvOpGroupBitwiseOrKHR: *hasResult = true; *hasResultType = true; break; case SpvOpGroupBitwiseXorKHR: *hasResult = true; *hasResultType = true; break; case SpvOpGroupLogicalAndKHR: *hasResult = true; *hasResultType = true; break; case SpvOpGroupLogicalOrKHR: *hasResult = true; *hasResultType = true; break; case SpvOpGroupLogicalXorKHR: *hasResult = true; *hasResultType = true; break; } } #endif /* SPV_ENABLE_UTILITY_CODE */ #endif ================================================ FILE: deps/SPIRV-reflect/spirv_reflect.c ================================================ /* Copyright 2017-2022 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "spirv_reflect.h" #include #include #include #if defined(WIN32) #define _CRTDBG_MAP_ALLOC #include #include #else #include #endif #if defined(__clang__) || defined(__GNUC__) || defined(__APPLE_CC__) #define FALLTHROUGH __attribute__((fallthrough)) #else #define FALLTHROUGH #endif #if defined(SPIRV_REFLECT_ENABLE_ASSERTS) #define SPV_REFLECT_ASSERT(COND) assert(COND); #else #define SPV_REFLECT_ASSERT(COND) #endif // clang-format off enum { SPIRV_STARTING_WORD_INDEX = 5, SPIRV_WORD_SIZE = sizeof(uint32_t), SPIRV_BYTE_WIDTH = 8, SPIRV_MINIMUM_FILE_SIZE = SPIRV_STARTING_WORD_INDEX * SPIRV_WORD_SIZE, SPIRV_DATA_ALIGNMENT = 4 * SPIRV_WORD_SIZE, // 16 SPIRV_ACCESS_CHAIN_INDEX_OFFSET = 4, }; enum { INVALID_VALUE = 0xFFFFFFFF, }; enum { MAX_NODE_NAME_LENGTH = 1024, // Number of unique PhysicalStorageBuffer structs tracked to detect recursion MAX_RECURSIVE_PHYSICAL_POINTER_CHECK = 128, }; enum { IMAGE_SAMPLED = 1, IMAGE_STORAGE = 2, }; typedef struct SpvReflectPrvArrayTraits { uint32_t element_type_id; uint32_t length_id; } SpvReflectPrvArrayTraits; typedef struct SpvReflectPrvImageTraits { uint32_t sampled_type_id; SpvDim dim; uint32_t depth; uint32_t arrayed; uint32_t ms; uint32_t sampled; SpvImageFormat image_format; } SpvReflectPrvImageTraits; typedef struct SpvReflectPrvNumberDecoration { uint32_t word_offset; uint32_t value; } SpvReflectPrvNumberDecoration; typedef struct SpvReflectPrvStringDecoration { uint32_t word_offset; const char* value; } SpvReflectPrvStringDecoration; typedef struct SpvReflectPrvDecorations { bool is_relaxed_precision; bool is_block; bool is_buffer_block; bool is_row_major; bool is_column_major; bool is_built_in; bool is_noperspective; bool is_flat; bool is_non_writable; bool is_non_readable; bool is_patch; bool is_per_vertex; bool is_per_task; bool is_weight_texture; bool is_block_match_texture; SpvReflectUserType user_type; SpvReflectPrvNumberDecoration set; SpvReflectPrvNumberDecoration binding; SpvReflectPrvNumberDecoration input_attachment_index; SpvReflectPrvNumberDecoration location; SpvReflectPrvNumberDecoration component; SpvReflectPrvNumberDecoration offset; SpvReflectPrvNumberDecoration uav_counter_buffer; SpvReflectPrvStringDecoration semantic; uint32_t array_stride; uint32_t matrix_stride; uint32_t spec_id; SpvBuiltIn built_in; } SpvReflectPrvDecorations; typedef struct SpvReflectPrvNode { uint32_t result_id; SpvOp op; uint32_t result_type_id; uint32_t type_id; SpvCapability capability; SpvStorageClass storage_class; uint32_t word_offset; uint32_t word_count; bool is_type; SpvReflectPrvArrayTraits array_traits; SpvReflectPrvImageTraits image_traits; uint32_t image_type_id; const char* name; SpvReflectPrvDecorations decorations; uint32_t member_count; const char** member_names; SpvReflectPrvDecorations* member_decorations; } SpvReflectPrvNode; typedef struct SpvReflectPrvString { uint32_t result_id; const char* string; } SpvReflectPrvString; // There are a limit set of instructions that can touch an OpVariable, // these are represented here with how it was accessed // Examples: // OpImageRead -> OpLoad -> OpVariable // OpImageWrite -> OpLoad -> OpVariable // OpStore -> OpAccessChain -> OpAccessChain -> OpVariable // OpAtomicIAdd -> OpAccessChain -> OpVariable // OpAtomicLoad -> OpImageTexelPointer -> OpVariable typedef struct SpvReflectPrvAccessedVariable { SpvReflectPrvNode* p_node; uint32_t result_id; uint32_t variable_ptr; } SpvReflectPrvAccessedVariable; typedef struct SpvReflectPrvFunction { uint32_t id; uint32_t callee_count; uint32_t* callees; struct SpvReflectPrvFunction** callee_ptrs; uint32_t accessed_variable_count; SpvReflectPrvAccessedVariable* accessed_variables; } SpvReflectPrvFunction; typedef struct SpvReflectPrvAccessChain { uint32_t result_id; uint32_t result_type_id; // // Pointing to the base of a composite object. // Generally the id of descriptor block variable uint32_t base_id; // // From spec: // The first index in Indexes will select the // top-level member/element/component/element // of the base composite uint32_t index_count; uint32_t* indexes; // // Block variable ac is pointing to (for block references) SpvReflectBlockVariable* block_var; } SpvReflectPrvAccessChain; // To prevent infinite recursion, we never walk down a // PhysicalStorageBuffer struct twice, but incase a 2nd variable // needs to use that struct, save a copy typedef struct SpvReflectPrvPhysicalPointerStruct { uint32_t struct_id; // first variable to see the PhysicalStorageBuffer struct SpvReflectBlockVariable* p_var; } SpvReflectPrvPhysicalPointerStruct; typedef struct SpvReflectPrvParser { size_t spirv_word_count; uint32_t* spirv_code; uint32_t string_count; SpvReflectPrvString* strings; SpvSourceLanguage source_language; uint32_t source_language_version; uint32_t source_file_id; const char* source_embedded; size_t node_count; SpvReflectPrvNode* nodes; uint32_t entry_point_count; uint32_t capability_count; uint32_t function_count; SpvReflectPrvFunction* functions; uint32_t access_chain_count; SpvReflectPrvAccessChain* access_chains; uint32_t type_count; uint32_t descriptor_count; uint32_t push_constant_count; SpvReflectTypeDescription* physical_pointer_check[MAX_RECURSIVE_PHYSICAL_POINTER_CHECK]; uint32_t physical_pointer_count; SpvReflectPrvPhysicalPointerStruct* physical_pointer_structs; uint32_t physical_pointer_struct_count; } SpvReflectPrvParser; // clang-format on static uint32_t Max(uint32_t a, uint32_t b) { return a > b ? a : b; } static uint32_t Min(uint32_t a, uint32_t b) { return a < b ? a : b; } static uint32_t RoundUp(uint32_t value, uint32_t multiple) { assert(multiple && ((multiple & (multiple - 1)) == 0)); return (value + multiple - 1) & ~(multiple - 1); } #define IsNull(ptr) (ptr == NULL) #define IsNotNull(ptr) (ptr != NULL) #define SafeFree(ptr) \ { \ free((void*)ptr); \ ptr = NULL; \ } static int SortCompareUint32(const void* a, const void* b) { const uint32_t* p_a = (const uint32_t*)a; const uint32_t* p_b = (const uint32_t*)b; return (int)*p_a - (int)*p_b; } static int SortCompareAccessedVariable(const void* a, const void* b) { const SpvReflectPrvAccessedVariable* p_a = (const SpvReflectPrvAccessedVariable*)a; const SpvReflectPrvAccessedVariable* p_b = (const SpvReflectPrvAccessedVariable*)b; return (int)p_a->variable_ptr - (int)p_b->variable_ptr; } // // De-duplicates a sorted array and returns the new size. // // Note: The array doesn't actually need to be sorted, just // arranged into "runs" so that all the entries with one // value are adjacent. // static size_t DedupSortedUint32(uint32_t* arr, size_t size) { if (size == 0) { return 0; } size_t dedup_idx = 0; for (size_t i = 0; i < size; ++i) { if (arr[dedup_idx] != arr[i]) { ++dedup_idx; arr[dedup_idx] = arr[i]; } } return dedup_idx + 1; } static bool SearchSortedUint32(const uint32_t* arr, size_t size, uint32_t target) { size_t lo = 0; size_t hi = size; while (lo < hi) { size_t mid = (hi - lo) / 2 + lo; if (arr[mid] == target) { return true; } else if (arr[mid] < target) { lo = mid + 1; } else { hi = mid; } } return false; } static SpvReflectResult IntersectSortedAccessedVariable(const SpvReflectPrvAccessedVariable* p_arr0, size_t arr0_size, const uint32_t* p_arr1, size_t arr1_size, uint32_t** pp_res, size_t* res_size) { *pp_res = NULL; *res_size = 0; if (IsNull(p_arr0) || IsNull(p_arr1)) { return SPV_REFLECT_RESULT_SUCCESS; } const SpvReflectPrvAccessedVariable* p_arr0_end = p_arr0 + arr0_size; const uint32_t* p_arr1_end = p_arr1 + arr1_size; const SpvReflectPrvAccessedVariable* p_idx0 = p_arr0; const uint32_t* p_idx1 = p_arr1; while (p_idx0 != p_arr0_end && p_idx1 != p_arr1_end) { if (p_idx0->variable_ptr < *p_idx1) { ++p_idx0; } else if (p_idx0->variable_ptr > *p_idx1) { ++p_idx1; } else { ++*res_size; ++p_idx0; ++p_idx1; } } if (*res_size > 0) { *pp_res = (uint32_t*)calloc(*res_size, sizeof(**pp_res)); if (IsNull(*pp_res)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } uint32_t* p_idxr = *pp_res; p_idx0 = p_arr0; p_idx1 = p_arr1; while (p_idx0 != p_arr0_end && p_idx1 != p_arr1_end) { if (p_idx0->variable_ptr < *p_idx1) { ++p_idx0; } else if (p_idx0->variable_ptr > *p_idx1) { ++p_idx1; } else { *(p_idxr++) = p_idx0->variable_ptr; ++p_idx0; ++p_idx1; } } } return SPV_REFLECT_RESULT_SUCCESS; } static bool InRange(const SpvReflectPrvParser* p_parser, uint32_t index) { bool in_range = false; if (IsNotNull(p_parser)) { in_range = (index < p_parser->spirv_word_count); } return in_range; } static SpvReflectResult ReadU32(SpvReflectPrvParser* p_parser, uint32_t word_offset, uint32_t* p_value) { assert(IsNotNull(p_parser)); assert(IsNotNull(p_parser->spirv_code)); assert(InRange(p_parser, word_offset)); SpvReflectResult result = SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_EOF; if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && InRange(p_parser, word_offset)) { *p_value = *(p_parser->spirv_code + word_offset); result = SPV_REFLECT_RESULT_SUCCESS; } return result; } #define UNCHECKED_READU32(parser, word_offset, value) \ { (void)ReadU32(parser, word_offset, (uint32_t*)&(value)); } #define CHECKED_READU32(parser, word_offset, value) \ { \ SpvReflectResult checked_readu32_result = ReadU32(parser, word_offset, (uint32_t*)&(value)); \ if (checked_readu32_result != SPV_REFLECT_RESULT_SUCCESS) { \ return checked_readu32_result; \ } \ } #define CHECKED_READU32_CAST(parser, word_offset, cast_to_type, value) \ { \ uint32_t checked_readu32_cast_u32 = UINT32_MAX; \ SpvReflectResult checked_readu32_cast_result = ReadU32(parser, word_offset, (uint32_t*)&(checked_readu32_cast_u32)); \ if (checked_readu32_cast_result != SPV_REFLECT_RESULT_SUCCESS) { \ return checked_readu32_cast_result; \ } \ value = (cast_to_type)checked_readu32_cast_u32; \ } #define IF_READU32(result, parser, word_offset, value) \ if ((result) == SPV_REFLECT_RESULT_SUCCESS) { \ result = ReadU32(parser, word_offset, (uint32_t*)&(value)); \ } #define IF_READU32_CAST(result, parser, word_offset, cast_to_type, value) \ if ((result) == SPV_REFLECT_RESULT_SUCCESS) { \ uint32_t if_readu32_cast_u32 = UINT32_MAX; \ result = ReadU32(parser, word_offset, &if_readu32_cast_u32); \ if ((result) == SPV_REFLECT_RESULT_SUCCESS) { \ value = (cast_to_type)if_readu32_cast_u32; \ } \ } static SpvReflectResult ReadStr(SpvReflectPrvParser* p_parser, uint32_t word_offset, uint32_t word_index, uint32_t word_count, uint32_t* p_buf_size, char* p_buf) { uint32_t limit = (word_offset + word_count); assert(IsNotNull(p_parser)); assert(IsNotNull(p_parser->spirv_code)); assert(InRange(p_parser, limit)); SpvReflectResult result = SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_EOF; if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && InRange(p_parser, limit)) { const char* c_str = (const char*)(p_parser->spirv_code + word_offset + word_index); uint32_t n = word_count * SPIRV_WORD_SIZE; uint32_t length_with_terminator = 0; for (uint32_t i = 0; i < n; ++i) { char c = *(c_str + i); if (c == 0) { length_with_terminator = i + 1; break; } } if (length_with_terminator > 0) { result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER; if (IsNotNull(p_buf_size) && IsNotNull(p_buf)) { result = SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED; if (length_with_terminator <= *p_buf_size) { memset(p_buf, 0, *p_buf_size); memcpy(p_buf, c_str, length_with_terminator); result = SPV_REFLECT_RESULT_SUCCESS; } } else { if (IsNotNull(p_buf_size)) { *p_buf_size = length_with_terminator; result = SPV_REFLECT_RESULT_SUCCESS; } } } } return result; } static SpvReflectDecorationFlags ApplyDecorations(const SpvReflectPrvDecorations* p_decoration_fields) { SpvReflectDecorationFlags decorations = SPV_REFLECT_DECORATION_NONE; if (p_decoration_fields->is_relaxed_precision) { decorations |= SPV_REFLECT_DECORATION_RELAXED_PRECISION; } if (p_decoration_fields->is_block) { decorations |= SPV_REFLECT_DECORATION_BLOCK; } if (p_decoration_fields->is_buffer_block) { decorations |= SPV_REFLECT_DECORATION_BUFFER_BLOCK; } if (p_decoration_fields->is_row_major) { decorations |= SPV_REFLECT_DECORATION_ROW_MAJOR; } if (p_decoration_fields->is_column_major) { decorations |= SPV_REFLECT_DECORATION_COLUMN_MAJOR; } if (p_decoration_fields->is_built_in) { decorations |= SPV_REFLECT_DECORATION_BUILT_IN; } if (p_decoration_fields->is_noperspective) { decorations |= SPV_REFLECT_DECORATION_NOPERSPECTIVE; } if (p_decoration_fields->is_flat) { decorations |= SPV_REFLECT_DECORATION_FLAT; } if (p_decoration_fields->is_non_writable) { decorations |= SPV_REFLECT_DECORATION_NON_WRITABLE; } if (p_decoration_fields->is_non_readable) { decorations |= SPV_REFLECT_DECORATION_NON_READABLE; } if (p_decoration_fields->is_patch) { decorations |= SPV_REFLECT_DECORATION_PATCH; } if (p_decoration_fields->is_per_vertex) { decorations |= SPV_REFLECT_DECORATION_PER_VERTEX; } if (p_decoration_fields->is_per_task) { decorations |= SPV_REFLECT_DECORATION_PER_TASK; } if (p_decoration_fields->is_weight_texture) { decorations |= SPV_REFLECT_DECORATION_WEIGHT_TEXTURE; } if (p_decoration_fields->is_block_match_texture) { decorations |= SPV_REFLECT_DECORATION_BLOCK_MATCH_TEXTURE; } return decorations; } static void ApplyNumericTraits(const SpvReflectTypeDescription* p_type, SpvReflectNumericTraits* p_numeric_traits) { memcpy(p_numeric_traits, &p_type->traits.numeric, sizeof(p_type->traits.numeric)); } static void ApplyArrayTraits(const SpvReflectTypeDescription* p_type, SpvReflectArrayTraits* p_array_traits) { memcpy(p_array_traits, &p_type->traits.array, sizeof(p_type->traits.array)); } static bool IsSpecConstant(const SpvReflectPrvNode* p_node) { return (p_node->op == SpvOpSpecConstant || p_node->op == SpvOpSpecConstantOp || p_node->op == SpvOpSpecConstantTrue || p_node->op == SpvOpSpecConstantFalse); } static SpvReflectPrvNode* FindNode(SpvReflectPrvParser* p_parser, uint32_t result_id) { SpvReflectPrvNode* p_node = NULL; for (size_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_elem = &(p_parser->nodes[i]); if (p_elem->result_id == result_id) { p_node = p_elem; break; } } return p_node; } static SpvReflectTypeDescription* FindType(SpvReflectShaderModule* p_module, uint32_t type_id) { SpvReflectTypeDescription* p_type = NULL; for (size_t i = 0; i < p_module->_internal->type_description_count; ++i) { SpvReflectTypeDescription* p_elem = &(p_module->_internal->type_descriptions[i]); if (p_elem->id == type_id) { p_type = p_elem; break; } } return p_type; } static SpvReflectPrvAccessChain* FindAccessChain(SpvReflectPrvParser* p_parser, uint32_t id) { uint32_t ac_cnt = p_parser->access_chain_count; for (uint32_t i = 0; i < ac_cnt; i++) { if (p_parser->access_chains[i].result_id == id) { return &p_parser->access_chains[i]; } } return 0; } static uint32_t FindBaseId(SpvReflectPrvParser* p_parser, SpvReflectPrvAccessChain* ac) { uint32_t base_id = ac->base_id; SpvReflectPrvNode* base_node = FindNode(p_parser, base_id); // TODO - This is just a band-aid to fix crashes. // Need to understand why here and hopefully remove // https://github.com/KhronosGroup/SPIRV-Reflect/pull/206 if (IsNull(base_node)) { return 0; } while (base_node->op != SpvOpVariable) { switch (base_node->op) { case SpvOpLoad: { UNCHECKED_READU32(p_parser, base_node->word_offset + 3, base_id); } break; case SpvOpFunctionParameter: { UNCHECKED_READU32(p_parser, base_node->word_offset + 2, base_id); } break; default: { assert(false); } break; } SpvReflectPrvAccessChain* base_ac = FindAccessChain(p_parser, base_id); if (base_ac == 0) { return 0; } base_id = base_ac->base_id; base_node = FindNode(p_parser, base_id); if (IsNull(base_node)) { return 0; } } return base_id; } static SpvReflectBlockVariable* GetRefBlkVar(SpvReflectPrvParser* p_parser, SpvReflectPrvAccessChain* ac) { uint32_t base_id = ac->base_id; SpvReflectPrvNode* base_node = FindNode(p_parser, base_id); assert(base_node->op == SpvOpLoad); UNCHECKED_READU32(p_parser, base_node->word_offset + 3, base_id); SpvReflectPrvAccessChain* base_ac = FindAccessChain(p_parser, base_id); assert(base_ac != 0); SpvReflectBlockVariable* base_var = base_ac->block_var; assert(base_var != 0); return base_var; } bool IsPointerToPointer(SpvReflectPrvParser* p_parser, uint32_t type_id) { SpvReflectPrvNode* ptr_node = FindNode(p_parser, type_id); if (IsNull(ptr_node) || (ptr_node->op != SpvOpTypePointer)) { return false; } uint32_t pte_id = 0; UNCHECKED_READU32(p_parser, ptr_node->word_offset + 3, pte_id); SpvReflectPrvNode* pte_node = FindNode(p_parser, pte_id); if (IsNull(pte_node)) { return false; } return pte_node->op == SpvOpTypePointer; } static SpvReflectResult CreateParser(size_t size, void* p_code, SpvReflectPrvParser* p_parser) { if (p_code == NULL) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (size < SPIRV_MINIMUM_FILE_SIZE) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_CODE_SIZE; } if ((size % 4) != 0) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_CODE_SIZE; } p_parser->spirv_word_count = size / SPIRV_WORD_SIZE; p_parser->spirv_code = (uint32_t*)p_code; if (p_parser->spirv_code[0] != SpvMagicNumber) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_MAGIC_NUMBER; } return SPV_REFLECT_RESULT_SUCCESS; } static void DestroyParser(SpvReflectPrvParser* p_parser) { if (!IsNull(p_parser->nodes)) { // Free nodes for (size_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if (IsNotNull(p_node->member_names)) { SafeFree(p_node->member_names); } if (IsNotNull(p_node->member_decorations)) { SafeFree(p_node->member_decorations); } } // Free functions for (size_t i = 0; i < p_parser->function_count; ++i) { SafeFree(p_parser->functions[i].callees); SafeFree(p_parser->functions[i].callee_ptrs); SafeFree(p_parser->functions[i].accessed_variables); } // Free access chains for (uint32_t i = 0; i < p_parser->access_chain_count; ++i) { SafeFree(p_parser->access_chains[i].indexes); } SafeFree(p_parser->nodes); SafeFree(p_parser->strings); SafeFree(p_parser->source_embedded); SafeFree(p_parser->functions); SafeFree(p_parser->access_chains); if (IsNotNull(p_parser->physical_pointer_structs)) { SafeFree(p_parser->physical_pointer_structs); } p_parser->node_count = 0; } } static SpvReflectResult ParseNodes(SpvReflectPrvParser* p_parser) { assert(IsNotNull(p_parser)); assert(IsNotNull(p_parser->spirv_code)); uint32_t* p_spirv = p_parser->spirv_code; uint32_t spirv_word_index = SPIRV_STARTING_WORD_INDEX; // Count nodes uint32_t node_count = 0; while (spirv_word_index < p_parser->spirv_word_count) { uint32_t word = p_spirv[spirv_word_index]; SpvOp op = (SpvOp)(word & 0xFFFF); uint32_t node_word_count = (word >> 16) & 0xFFFF; if (node_word_count == 0) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_INSTRUCTION; } if (op == SpvOpAccessChain) { ++(p_parser->access_chain_count); } spirv_word_index += node_word_count; ++node_count; } if (node_count == 0) { return SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_EOF; } // Allocate nodes p_parser->node_count = node_count; p_parser->nodes = (SpvReflectPrvNode*)calloc(p_parser->node_count, sizeof(*(p_parser->nodes))); if (IsNull(p_parser->nodes)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } // Mark all nodes with an invalid state for (uint32_t i = 0; i < node_count; ++i) { p_parser->nodes[i].op = (SpvOp)INVALID_VALUE; p_parser->nodes[i].storage_class = (SpvStorageClass)INVALID_VALUE; p_parser->nodes[i].decorations.set.value = (uint32_t)INVALID_VALUE; p_parser->nodes[i].decorations.binding.value = (uint32_t)INVALID_VALUE; p_parser->nodes[i].decorations.location.value = (uint32_t)INVALID_VALUE; p_parser->nodes[i].decorations.component.value = (uint32_t)INVALID_VALUE; p_parser->nodes[i].decorations.offset.value = (uint32_t)INVALID_VALUE; p_parser->nodes[i].decorations.uav_counter_buffer.value = (uint32_t)INVALID_VALUE; p_parser->nodes[i].decorations.spec_id = (uint32_t)INVALID_VALUE; p_parser->nodes[i].decorations.built_in = (SpvBuiltIn)INVALID_VALUE; } // Mark source file id node p_parser->source_file_id = (uint32_t)INVALID_VALUE; p_parser->source_embedded = NULL; // Function node uint32_t function_node = (uint32_t)INVALID_VALUE; // Allocate access chain if (p_parser->access_chain_count > 0) { p_parser->access_chains = (SpvReflectPrvAccessChain*)calloc(p_parser->access_chain_count, sizeof(*(p_parser->access_chains))); if (IsNull(p_parser->access_chains)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } // Parse nodes uint32_t node_index = 0; uint32_t access_chain_index = 0; spirv_word_index = SPIRV_STARTING_WORD_INDEX; while (spirv_word_index < p_parser->spirv_word_count) { uint32_t word = p_spirv[spirv_word_index]; SpvOp op = (SpvOp)(word & 0xFFFF); uint32_t node_word_count = (word >> 16) & 0xFFFF; SpvReflectPrvNode* p_node = &(p_parser->nodes[node_index]); p_node->op = op; p_node->word_offset = spirv_word_index; p_node->word_count = node_word_count; switch (p_node->op) { default: break; case SpvOpString: { ++(p_parser->string_count); } break; case SpvOpSource: { CHECKED_READU32_CAST(p_parser, p_node->word_offset + 1, SpvSourceLanguage, p_parser->source_language); CHECKED_READU32(p_parser, p_node->word_offset + 2, p_parser->source_language_version); if (p_node->word_count >= 4) { CHECKED_READU32(p_parser, p_node->word_offset + 3, p_parser->source_file_id); } if (p_node->word_count >= 5) { const char* p_source = (const char*)(p_parser->spirv_code + p_node->word_offset + 4); const size_t source_len = strlen(p_source); char* p_source_temp = (char*)calloc(source_len + 1, sizeof(char)); if (IsNull(p_source_temp)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } #ifdef _WIN32 strcpy_s(p_source_temp, source_len + 1, p_source); #else strcpy(p_source_temp, p_source); #endif SafeFree(p_parser->source_embedded); p_parser->source_embedded = p_source_temp; } } break; case SpvOpSourceContinued: { const char* p_source = (const char*)(p_parser->spirv_code + p_node->word_offset + 1); const size_t source_len = strlen(p_source); const size_t embedded_source_len = strlen(p_parser->source_embedded); char* p_continued_source = (char*)calloc(source_len + embedded_source_len + 1, sizeof(char)); if (IsNull(p_continued_source)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } #ifdef _WIN32 strcpy_s(p_continued_source, embedded_source_len + 1, p_parser->source_embedded); strcat_s(p_continued_source, embedded_source_len + source_len + 1, p_source); #else strcpy(p_continued_source, p_parser->source_embedded); strcat(p_continued_source, p_source); #endif SafeFree(p_parser->source_embedded); p_parser->source_embedded = p_continued_source; } break; case SpvOpEntryPoint: { ++(p_parser->entry_point_count); } break; case SpvOpCapability: { CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->capability); ++(p_parser->capability_count); } break; case SpvOpName: case SpvOpMemberName: { uint32_t member_offset = (p_node->op == SpvOpMemberName) ? 1 : 0; uint32_t name_start = p_node->word_offset + member_offset + 2; p_node->name = (const char*)(p_parser->spirv_code + name_start); } break; case SpvOpTypeStruct: { p_node->member_count = p_node->word_count - 2; FALLTHROUGH; } // Fall through // This is all the rest of OpType* that need to be tracked // Possible new extensions might expose new type, will need to be added // here case SpvOpTypeVoid: case SpvOpTypeBool: case SpvOpTypeInt: case SpvOpTypeFloat: case SpvOpTypeVector: case SpvOpTypeMatrix: case SpvOpTypeSampler: case SpvOpTypeOpaque: case SpvOpTypeFunction: case SpvOpTypeEvent: case SpvOpTypeDeviceEvent: case SpvOpTypeReserveId: case SpvOpTypeQueue: case SpvOpTypePipe: case SpvOpTypeAccelerationStructureKHR: case SpvOpTypeRayQueryKHR: case SpvOpTypeHitObjectNV: case SpvOpTypeCooperativeMatrixNV: case SpvOpTypeCooperativeMatrixKHR: { CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id); p_node->is_type = true; } break; case SpvOpTypeImage: { CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id); CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->image_traits.sampled_type_id); CHECKED_READU32(p_parser, p_node->word_offset + 3, p_node->image_traits.dim); CHECKED_READU32(p_parser, p_node->word_offset + 4, p_node->image_traits.depth); CHECKED_READU32(p_parser, p_node->word_offset + 5, p_node->image_traits.arrayed); CHECKED_READU32(p_parser, p_node->word_offset + 6, p_node->image_traits.ms); CHECKED_READU32(p_parser, p_node->word_offset + 7, p_node->image_traits.sampled); CHECKED_READU32(p_parser, p_node->word_offset + 8, p_node->image_traits.image_format); p_node->is_type = true; } break; case SpvOpTypeSampledImage: { CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id); CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->image_type_id); p_node->is_type = true; } break; case SpvOpTypeArray: { CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id); CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->array_traits.element_type_id); CHECKED_READU32(p_parser, p_node->word_offset + 3, p_node->array_traits.length_id); p_node->is_type = true; } break; case SpvOpTypeRuntimeArray: { CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id); CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->array_traits.element_type_id); p_node->is_type = true; } break; case SpvOpTypePointer: { uint32_t result_id; CHECKED_READU32(p_parser, p_node->word_offset + 1, result_id); // Look for forward pointer. Clear result id if found SpvReflectPrvNode* p_fwd_node = FindNode(p_parser, result_id); if (p_fwd_node) { p_fwd_node->result_id = 0; } // Register pointer type p_node->result_id = result_id; CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->storage_class); CHECKED_READU32(p_parser, p_node->word_offset + 3, p_node->type_id); p_node->is_type = true; } break; case SpvOpTypeForwardPointer: { CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id); CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->storage_class); p_node->is_type = true; } break; case SpvOpConstantTrue: case SpvOpConstantFalse: case SpvOpConstant: case SpvOpConstantComposite: case SpvOpConstantSampler: case SpvOpConstantNull: { CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_type_id); CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id); } break; case SpvOpSpecConstantTrue: case SpvOpSpecConstantFalse: case SpvOpSpecConstant: case SpvOpSpecConstantComposite: case SpvOpSpecConstantOp: { CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_type_id); CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id); } break; case SpvOpVariable: { CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->type_id); CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id); CHECKED_READU32(p_parser, p_node->word_offset + 3, p_node->storage_class); } break; case SpvOpLoad: { // Only load enough so OpDecorate can reference the node, skip the remaining operands. CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_type_id); CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id); } break; case SpvOpAccessChain: { SpvReflectPrvAccessChain* p_access_chain = &(p_parser->access_chains[access_chain_index]); CHECKED_READU32(p_parser, p_node->word_offset + 1, p_access_chain->result_type_id); CHECKED_READU32(p_parser, p_node->word_offset + 2, p_access_chain->result_id); CHECKED_READU32(p_parser, p_node->word_offset + 3, p_access_chain->base_id); // // SPIRV_ACCESS_CHAIN_INDEX_OFFSET (4) is the number of words up until the first index: // [Node, Result Type Id, Result Id, Base Id, ] // p_access_chain->index_count = (node_word_count - SPIRV_ACCESS_CHAIN_INDEX_OFFSET); if (p_access_chain->index_count > 0) { p_access_chain->indexes = (uint32_t*)calloc(p_access_chain->index_count, sizeof(*(p_access_chain->indexes))); if (IsNull(p_access_chain->indexes)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } // Parse any index values for access chain for (uint32_t index_index = 0; index_index < p_access_chain->index_count; ++index_index) { // Read index id uint32_t index_id = 0; CHECKED_READU32(p_parser, p_node->word_offset + SPIRV_ACCESS_CHAIN_INDEX_OFFSET + index_index, index_id); // Find OpConstant node that contains index value SpvReflectPrvNode* p_index_value_node = FindNode(p_parser, index_id); if ((p_index_value_node != NULL) && (p_index_value_node->op == SpvOpConstant || p_index_value_node->op == SpvOpSpecConstant)) { // Read index value uint32_t index_value = UINT32_MAX; CHECKED_READU32(p_parser, p_index_value_node->word_offset + 3, index_value); assert(index_value != UINT32_MAX); // Write index value to array p_access_chain->indexes[index_index] = index_value; } } } ++access_chain_index; } break; case SpvOpFunction: { CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id); // Count function definitions, not function declarations. To determine // the difference, set an in-function variable, and then if an OpLabel // is reached before the end of the function increment the function // count. function_node = node_index; } break; case SpvOpLabel: { if (function_node != (uint32_t)INVALID_VALUE) { SpvReflectPrvNode* p_func_node = &(p_parser->nodes[function_node]); CHECKED_READU32(p_parser, p_func_node->word_offset + 2, p_func_node->result_id); ++(p_parser->function_count); } FALLTHROUGH; } // Fall through case SpvOpFunctionEnd: { function_node = (uint32_t)INVALID_VALUE; } break; case SpvOpFunctionParameter: { CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id); } break; case SpvOpBitcast: case SpvOpShiftRightLogical: case SpvOpIAdd: case SpvOpISub: case SpvOpIMul: case SpvOpUDiv: case SpvOpSDiv: { CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id); } break; } if (p_node->is_type) { ++(p_parser->type_count); } spirv_word_index += node_word_count; ++node_index; } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseStrings(SpvReflectPrvParser* p_parser) { assert(IsNotNull(p_parser)); assert(IsNotNull(p_parser->spirv_code)); assert(IsNotNull(p_parser->nodes)); // Early out if (p_parser->string_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) { // Allocate string storage p_parser->strings = (SpvReflectPrvString*)calloc(p_parser->string_count, sizeof(*(p_parser->strings))); uint32_t string_index = 0; for (size_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if (p_node->op != SpvOpString) { continue; } // Paranoid check against string count assert(string_index < p_parser->string_count); if (string_index >= p_parser->string_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } // Result id SpvReflectPrvString* p_string = &(p_parser->strings[string_index]); CHECKED_READU32(p_parser, p_node->word_offset + 1, p_string->result_id); // String uint32_t string_start = p_node->word_offset + 2; p_string->string = (const char*)(p_parser->spirv_code + string_start); // Increment string index ++string_index; } } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseSource(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) { assert(IsNotNull(p_parser)); assert(IsNotNull(p_parser->spirv_code)); if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code)) { // Source file if (IsNotNull(p_parser->strings)) { for (uint32_t i = 0; i < p_parser->string_count; ++i) { SpvReflectPrvString* p_string = &(p_parser->strings[i]); if (p_string->result_id == p_parser->source_file_id) { p_module->source_file = p_string->string; break; } } } // Source code if (IsNotNull(p_parser->source_embedded)) { const size_t source_len = strlen(p_parser->source_embedded); char* p_source = (char*)calloc(source_len + 1, sizeof(char)); if (IsNull(p_source)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } #ifdef _WIN32 strcpy_s(p_source, source_len + 1, p_parser->source_embedded); #else strcpy(p_source, p_parser->source_embedded); #endif p_module->source_source = p_source; } } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseFunction(SpvReflectPrvParser* p_parser, SpvReflectPrvNode* p_func_node, SpvReflectPrvFunction* p_func, size_t first_label_index) { p_func->id = p_func_node->result_id; p_func->callee_count = 0; p_func->accessed_variable_count = 0; // First get count to know how much to allocate for (size_t i = first_label_index; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if (p_node->op == SpvOpFunctionEnd) { break; } switch (p_node->op) { case SpvOpFunctionCall: { ++(p_func->callee_count); } break; case SpvOpLoad: case SpvOpAccessChain: case SpvOpInBoundsAccessChain: case SpvOpPtrAccessChain: case SpvOpArrayLength: case SpvOpGenericPtrMemSemantics: case SpvOpInBoundsPtrAccessChain: case SpvOpStore: case SpvOpImageTexelPointer: { ++(p_func->accessed_variable_count); } break; case SpvOpCopyMemory: case SpvOpCopyMemorySized: { p_func->accessed_variable_count += 2; } break; default: break; } } if (p_func->callee_count > 0) { p_func->callees = (uint32_t*)calloc(p_func->callee_count, sizeof(*(p_func->callees))); if (IsNull(p_func->callees)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } if (p_func->accessed_variable_count > 0) { p_func->accessed_variables = (SpvReflectPrvAccessedVariable*)calloc(p_func->accessed_variable_count, sizeof(*(p_func->accessed_variables))); if (IsNull(p_func->accessed_variables)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } p_func->callee_count = 0; p_func->accessed_variable_count = 0; // Now have allocation, fill in values for (size_t i = first_label_index; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if (p_node->op == SpvOpFunctionEnd) { break; } switch (p_node->op) { case SpvOpFunctionCall: { CHECKED_READU32(p_parser, p_node->word_offset + 3, p_func->callees[p_func->callee_count]); (++p_func->callee_count); } break; case SpvOpLoad: case SpvOpAccessChain: case SpvOpInBoundsAccessChain: case SpvOpPtrAccessChain: case SpvOpArrayLength: case SpvOpGenericPtrMemSemantics: case SpvOpInBoundsPtrAccessChain: case SpvOpImageTexelPointer: { const uint32_t result_index = p_node->word_offset + 2; const uint32_t ptr_index = p_node->word_offset + 3; SpvReflectPrvAccessedVariable* access_ptr = &p_func->accessed_variables[p_func->accessed_variable_count]; access_ptr->p_node = p_node; // Need to track Result ID as not sure there has been any memory access through here yet CHECKED_READU32(p_parser, result_index, access_ptr->result_id); CHECKED_READU32(p_parser, ptr_index, access_ptr->variable_ptr); (++p_func->accessed_variable_count); } break; case SpvOpStore: { const uint32_t result_index = p_node->word_offset + 2; CHECKED_READU32(p_parser, result_index, p_func->accessed_variables[p_func->accessed_variable_count].variable_ptr); p_func->accessed_variables[p_func->accessed_variable_count].p_node = p_node; (++p_func->accessed_variable_count); } break; case SpvOpCopyMemory: case SpvOpCopyMemorySized: { // There is no result_id or node, being zero is same as being invalid CHECKED_READU32(p_parser, p_node->word_offset + 1, p_func->accessed_variables[p_func->accessed_variable_count].variable_ptr); (++p_func->accessed_variable_count); CHECKED_READU32(p_parser, p_node->word_offset + 2, p_func->accessed_variables[p_func->accessed_variable_count].variable_ptr); (++p_func->accessed_variable_count); } break; default: break; } } if (p_func->callee_count > 0) { qsort(p_func->callees, p_func->callee_count, sizeof(*(p_func->callees)), SortCompareUint32); } p_func->callee_count = (uint32_t)DedupSortedUint32(p_func->callees, p_func->callee_count); if (p_func->accessed_variable_count > 0) { qsort(p_func->accessed_variables, p_func->accessed_variable_count, sizeof(*(p_func->accessed_variables)), SortCompareAccessedVariable); } return SPV_REFLECT_RESULT_SUCCESS; } static int SortCompareFunctions(const void* a, const void* b) { const SpvReflectPrvFunction* af = (const SpvReflectPrvFunction*)a; const SpvReflectPrvFunction* bf = (const SpvReflectPrvFunction*)b; return (int)af->id - (int)bf->id; } static SpvReflectResult ParseFunctions(SpvReflectPrvParser* p_parser) { assert(IsNotNull(p_parser)); assert(IsNotNull(p_parser->spirv_code)); assert(IsNotNull(p_parser->nodes)); if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) { if (p_parser->function_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } p_parser->functions = (SpvReflectPrvFunction*)calloc(p_parser->function_count, sizeof(*(p_parser->functions))); if (IsNull(p_parser->functions)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } size_t function_index = 0; for (size_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if (p_node->op != SpvOpFunction) { continue; } // Skip over function declarations that aren't definitions bool func_definition = false; // Intentionally reuse i to avoid iterating over these nodes more than // once for (; i < p_parser->node_count; ++i) { if (p_parser->nodes[i].op == SpvOpLabel) { func_definition = true; break; } if (p_parser->nodes[i].op == SpvOpFunctionEnd) { break; } } if (!func_definition) { continue; } SpvReflectPrvFunction* p_function = &(p_parser->functions[function_index]); SpvReflectResult result = ParseFunction(p_parser, p_node, p_function, i); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } ++function_index; } qsort(p_parser->functions, p_parser->function_count, sizeof(*(p_parser->functions)), SortCompareFunctions); // Once they're sorted, link the functions with pointers to improve graph // traversal efficiency for (size_t i = 0; i < p_parser->function_count; ++i) { SpvReflectPrvFunction* p_func = &(p_parser->functions[i]); if (p_func->callee_count == 0) { continue; } p_func->callee_ptrs = (SpvReflectPrvFunction**)calloc(p_func->callee_count, sizeof(*(p_func->callee_ptrs))); for (size_t j = 0, k = 0; j < p_func->callee_count; ++j) { while (p_parser->functions[k].id != p_func->callees[j]) { ++k; if (k >= p_parser->function_count) { // Invalid called function ID somewhere return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } } p_func->callee_ptrs[j] = &(p_parser->functions[k]); } } } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseMemberCounts(SpvReflectPrvParser* p_parser) { assert(IsNotNull(p_parser)); assert(IsNotNull(p_parser->spirv_code)); assert(IsNotNull(p_parser->nodes)); if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) { for (size_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if ((p_node->op != SpvOpMemberName) && (p_node->op != SpvOpMemberDecorate)) { continue; } uint32_t target_id = 0; uint32_t member_index = (uint32_t)INVALID_VALUE; CHECKED_READU32(p_parser, p_node->word_offset + 1, target_id); CHECKED_READU32(p_parser, p_node->word_offset + 2, member_index); SpvReflectPrvNode* p_target_node = FindNode(p_parser, target_id); // Not all nodes get parsed, so FindNode returning NULL is expected. if (IsNull(p_target_node)) { continue; } if (member_index == INVALID_VALUE) { return SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED; } p_target_node->member_count = Max(p_target_node->member_count, member_index + 1); } for (uint32_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if (p_node->member_count == 0) { continue; } p_node->member_names = (const char**)calloc(p_node->member_count, sizeof(*(p_node->member_names))); if (IsNull(p_node->member_names)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } p_node->member_decorations = (SpvReflectPrvDecorations*)calloc(p_node->member_count, sizeof(*(p_node->member_decorations))); if (IsNull(p_node->member_decorations)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseNames(SpvReflectPrvParser* p_parser) { assert(IsNotNull(p_parser)); assert(IsNotNull(p_parser->spirv_code)); assert(IsNotNull(p_parser->nodes)); if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) { for (size_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if ((p_node->op != SpvOpName) && (p_node->op != SpvOpMemberName)) { continue; } uint32_t target_id = 0; CHECKED_READU32(p_parser, p_node->word_offset + 1, target_id); SpvReflectPrvNode* p_target_node = FindNode(p_parser, target_id); // Not all nodes get parsed, so FindNode returning NULL is expected. if (IsNull(p_target_node)) { continue; } const char** pp_target_name = &(p_target_node->name); if (p_node->op == SpvOpMemberName) { uint32_t member_index = UINT32_MAX; CHECKED_READU32(p_parser, p_node->word_offset + 2, member_index); pp_target_name = &(p_target_node->member_names[member_index]); } *pp_target_name = p_node->name; } } return SPV_REFLECT_RESULT_SUCCESS; } // Returns true if user_type matches pattern or if user_type begins with pattern and the next character is ':' // For example, UserTypeMatches("rwbuffer", "rwbuffer") will be true, UserTypeMatches("rwbuffer", "rwbuffer:") will be true, and // UserTypeMatches("rwbuffer", "rwbufferfoo") will be false. static bool UserTypeMatches(const char* user_type, const char* pattern) { const size_t pattern_length = strlen(pattern); if (strncmp(user_type, pattern, pattern_length) == 0) { if (user_type[pattern_length] == ':' || user_type[pattern_length] == '\0') { return true; } } return false; } static SpvReflectResult ParseDecorations(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) { uint32_t spec_constant_count = 0; for (uint32_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if ((p_node->op != SpvOpDecorate) && (p_node->op != SpvOpMemberDecorate) && (p_node->op != SpvOpDecorateId) && (p_node->op != SpvOpDecorateString) && (p_node->op != SpvOpMemberDecorateString)) { continue; } // Need to adjust the read offset if this is a member decoration uint32_t member_offset = 0; if (p_node->op == SpvOpMemberDecorate) { member_offset = 1; } // Get decoration uint32_t decoration = (uint32_t)INVALID_VALUE; CHECKED_READU32(p_parser, p_node->word_offset + member_offset + 2, decoration); // Filter out the decoration that do not affect reflection, otherwise // there will be random crashes because the nodes aren't found. bool skip = false; switch (decoration) { default: { skip = true; } break; case SpvDecorationRelaxedPrecision: case SpvDecorationBlock: case SpvDecorationBufferBlock: case SpvDecorationColMajor: case SpvDecorationRowMajor: case SpvDecorationArrayStride: case SpvDecorationMatrixStride: case SpvDecorationBuiltIn: case SpvDecorationNoPerspective: case SpvDecorationFlat: case SpvDecorationNonWritable: case SpvDecorationNonReadable: case SpvDecorationPatch: case SpvDecorationPerVertexKHR: case SpvDecorationPerTaskNV: case SpvDecorationLocation: case SpvDecorationComponent: case SpvDecorationBinding: case SpvDecorationDescriptorSet: case SpvDecorationOffset: case SpvDecorationInputAttachmentIndex: case SpvDecorationSpecId: case SpvDecorationWeightTextureQCOM: case SpvDecorationBlockMatchTextureQCOM: case SpvDecorationUserTypeGOOGLE: case SpvDecorationHlslCounterBufferGOOGLE: case SpvDecorationHlslSemanticGOOGLE: { skip = false; } break; } if (skip) { continue; } // Find target node uint32_t target_id = 0; CHECKED_READU32(p_parser, p_node->word_offset + 1, target_id); SpvReflectPrvNode* p_target_node = FindNode(p_parser, target_id); if (IsNull(p_target_node)) { if ((p_node->op == (uint32_t)SpvOpDecorate) && (decoration == SpvDecorationRelaxedPrecision)) { // Many OPs can be decorated that we don't care about. Ignore those. // See https://github.com/KhronosGroup/SPIRV-Reflect/issues/134 continue; } return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } // Get decorations SpvReflectPrvDecorations* p_target_decorations = &(p_target_node->decorations); // Update pointer if this is a member decoration if (p_node->op == SpvOpMemberDecorate) { uint32_t member_index = (uint32_t)INVALID_VALUE; CHECKED_READU32(p_parser, p_node->word_offset + 2, member_index); p_target_decorations = &(p_target_node->member_decorations[member_index]); } switch (decoration) { default: break; case SpvDecorationRelaxedPrecision: { p_target_decorations->is_relaxed_precision = true; } break; case SpvDecorationBlock: { p_target_decorations->is_block = true; } break; case SpvDecorationBufferBlock: { p_target_decorations->is_buffer_block = true; } break; case SpvDecorationColMajor: { p_target_decorations->is_column_major = true; } break; case SpvDecorationRowMajor: { p_target_decorations->is_row_major = true; } break; case SpvDecorationArrayStride: { uint32_t word_offset = p_node->word_offset + member_offset + 3; CHECKED_READU32(p_parser, word_offset, p_target_decorations->array_stride); } break; case SpvDecorationMatrixStride: { uint32_t word_offset = p_node->word_offset + member_offset + 3; CHECKED_READU32(p_parser, word_offset, p_target_decorations->matrix_stride); } break; case SpvDecorationBuiltIn: { p_target_decorations->is_built_in = true; uint32_t word_offset = p_node->word_offset + member_offset + 3; CHECKED_READU32_CAST(p_parser, word_offset, SpvBuiltIn, p_target_decorations->built_in); } break; case SpvDecorationNoPerspective: { p_target_decorations->is_noperspective = true; } break; case SpvDecorationFlat: { p_target_decorations->is_flat = true; } break; case SpvDecorationNonWritable: { p_target_decorations->is_non_writable = true; } break; case SpvDecorationNonReadable: { p_target_decorations->is_non_readable = true; } break; case SpvDecorationPatch: { p_target_decorations->is_patch = true; } break; case SpvDecorationPerVertexKHR: { p_target_decorations->is_per_vertex = true; } break; case SpvDecorationPerTaskNV: { p_target_decorations->is_per_task = true; } break; case SpvDecorationLocation: { uint32_t word_offset = p_node->word_offset + member_offset + 3; CHECKED_READU32(p_parser, word_offset, p_target_decorations->location.value); p_target_decorations->location.word_offset = word_offset; } break; case SpvDecorationComponent: { uint32_t word_offset = p_node->word_offset + member_offset + 3; CHECKED_READU32(p_parser, word_offset, p_target_decorations->component.value); p_target_decorations->component.word_offset = word_offset; } break; case SpvDecorationBinding: { uint32_t word_offset = p_node->word_offset + member_offset + 3; CHECKED_READU32(p_parser, word_offset, p_target_decorations->binding.value); p_target_decorations->binding.word_offset = word_offset; } break; case SpvDecorationDescriptorSet: { uint32_t word_offset = p_node->word_offset + member_offset + 3; CHECKED_READU32(p_parser, word_offset, p_target_decorations->set.value); p_target_decorations->set.word_offset = word_offset; } break; case SpvDecorationOffset: { uint32_t word_offset = p_node->word_offset + member_offset + 3; CHECKED_READU32(p_parser, word_offset, p_target_decorations->offset.value); p_target_decorations->offset.word_offset = word_offset; } break; case SpvDecorationInputAttachmentIndex: { uint32_t word_offset = p_node->word_offset + member_offset + 3; CHECKED_READU32(p_parser, word_offset, p_target_decorations->input_attachment_index.value); p_target_decorations->input_attachment_index.word_offset = word_offset; } break; case SpvDecorationSpecId: { spec_constant_count++; } break; case SpvDecorationHlslCounterBufferGOOGLE: { uint32_t word_offset = p_node->word_offset + member_offset + 3; CHECKED_READU32(p_parser, word_offset, p_target_decorations->uav_counter_buffer.value); p_target_decorations->uav_counter_buffer.word_offset = word_offset; } break; case SpvDecorationHlslSemanticGOOGLE: { uint32_t word_offset = p_node->word_offset + member_offset + 3; p_target_decorations->semantic.value = (const char*)(p_parser->spirv_code + word_offset); p_target_decorations->semantic.word_offset = word_offset; } break; case SpvDecorationWeightTextureQCOM: { p_target_decorations->is_weight_texture = true; } break; case SpvDecorationBlockMatchTextureQCOM: { p_target_decorations->is_block_match_texture = true; } break; } if (p_node->op == SpvOpDecorateString && decoration == SpvDecorationUserTypeGOOGLE) { uint32_t terminator = 0; SpvReflectResult result = ReadStr(p_parser, p_node->word_offset + 3, 0, p_node->word_count, &terminator, NULL); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } const char* name = (const char*)(p_parser->spirv_code + p_node->word_offset + 3); if (UserTypeMatches(name, "cbuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_CBUFFER; } else if (UserTypeMatches(name, "tbuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TBUFFER; } else if (UserTypeMatches(name, "appendstructuredbuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_APPEND_STRUCTURED_BUFFER; } else if (UserTypeMatches(name, "buffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_BUFFER; } else if (UserTypeMatches(name, "byteaddressbuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_BYTE_ADDRESS_BUFFER; } else if (UserTypeMatches(name, "constantbuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_CONSTANT_BUFFER; } else if (UserTypeMatches(name, "consumestructuredbuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_CONSUME_STRUCTURED_BUFFER; } else if (UserTypeMatches(name, "inputpatch")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_INPUT_PATCH; } else if (UserTypeMatches(name, "outputpatch")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_OUTPUT_PATCH; } else if (UserTypeMatches(name, "rasterizerorderedbuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_BUFFER; } else if (UserTypeMatches(name, "rasterizerorderedbyteaddressbuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_BYTE_ADDRESS_BUFFER; } else if (UserTypeMatches(name, "rasterizerorderedstructuredbuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_STRUCTURED_BUFFER; } else if (UserTypeMatches(name, "rasterizerorderedtexture1d")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_1D; } else if (UserTypeMatches(name, "rasterizerorderedtexture1darray")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_1D_ARRAY; } else if (UserTypeMatches(name, "rasterizerorderedtexture2d")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_2D; } else if (UserTypeMatches(name, "rasterizerorderedtexture2darray")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_2D_ARRAY; } else if (UserTypeMatches(name, "rasterizerorderedtexture3d")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_3D; } else if (UserTypeMatches(name, "raytracingaccelerationstructure")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RAYTRACING_ACCELERATION_STRUCTURE; } else if (UserTypeMatches(name, "rwbuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_BUFFER; } else if (UserTypeMatches(name, "rwbyteaddressbuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_BYTE_ADDRESS_BUFFER; } else if (UserTypeMatches(name, "rwstructuredbuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_STRUCTURED_BUFFER; } else if (UserTypeMatches(name, "rwtexture1d")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_TEXTURE_1D; } else if (UserTypeMatches(name, "rwtexture1darray")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_TEXTURE_1D_ARRAY; } else if (UserTypeMatches(name, "rwtexture2d")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_TEXTURE_2D; } else if (UserTypeMatches(name, "rwtexture2darray")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_TEXTURE_2D_ARRAY; } else if (UserTypeMatches(name, "rwtexture3d")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_RW_TEXTURE_3D; } else if (UserTypeMatches(name, "structuredbuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_STRUCTURED_BUFFER; } else if (UserTypeMatches(name, "subpassinput")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_SUBPASS_INPUT; } else if (UserTypeMatches(name, "subpassinputms")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_SUBPASS_INPUT_MS; } else if (UserTypeMatches(name, "texture1d")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_1D; } else if (UserTypeMatches(name, "texture1darray")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_1D_ARRAY; } else if (UserTypeMatches(name, "texture2d")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_2D; } else if (UserTypeMatches(name, "texture2darray")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_2D_ARRAY; } else if (UserTypeMatches(name, "texture2dms")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_2DMS; } else if (UserTypeMatches(name, "texture2dmsarray")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_2DMS_ARRAY; } else if (UserTypeMatches(name, "texture3d")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_3D; } else if (UserTypeMatches(name, "texturebuffer")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_BUFFER; } else if (UserTypeMatches(name, "texturecube")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_CUBE; } else if (UserTypeMatches(name, "texturecubearray")) { p_target_decorations->user_type = SPV_REFLECT_USER_TYPE_TEXTURE_CUBE_ARRAY; } } } if (spec_constant_count > 0) { p_module->spec_constants = (SpvReflectSpecializationConstant*)calloc(spec_constant_count, sizeof(*p_module->spec_constants)); if (IsNull(p_module->spec_constants)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } for (uint32_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if (p_node->op == SpvOpDecorate) { uint32_t decoration = (uint32_t)INVALID_VALUE; CHECKED_READU32(p_parser, p_node->word_offset + 2, decoration); if (decoration == SpvDecorationSpecId) { const uint32_t count = p_module->spec_constant_count; CHECKED_READU32(p_parser, p_node->word_offset + 1, p_module->spec_constants[count].spirv_id); CHECKED_READU32(p_parser, p_node->word_offset + 3, p_module->spec_constants[count].constant_id); // If being used for a OpSpecConstantComposite (ex. LocalSizeId), there won't be a name SpvReflectPrvNode* target_node = FindNode(p_parser, p_module->spec_constants[count].spirv_id); if (IsNotNull(target_node)) { p_module->spec_constants[count].name = target_node->name; } p_module->spec_constant_count++; } } } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult EnumerateAllUniforms(SpvReflectShaderModule* p_module, size_t* p_uniform_count, uint32_t** pp_uniforms) { *p_uniform_count = p_module->descriptor_binding_count; if (*p_uniform_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } *pp_uniforms = (uint32_t*)calloc(*p_uniform_count, sizeof(**pp_uniforms)); if (IsNull(*pp_uniforms)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } for (size_t i = 0; i < *p_uniform_count; ++i) { (*pp_uniforms)[i] = p_module->descriptor_bindings[i].spirv_id; } qsort(*pp_uniforms, *p_uniform_count, sizeof(**pp_uniforms), SortCompareUint32); return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseType(SpvReflectPrvParser* p_parser, SpvReflectPrvNode* p_node, SpvReflectPrvDecorations* p_struct_member_decorations, SpvReflectShaderModule* p_module, SpvReflectTypeDescription* p_type) { SpvReflectResult result = SPV_REFLECT_RESULT_SUCCESS; if (p_node->member_count > 0) { p_type->struct_type_description = FindType(p_module, p_node->result_id); p_type->member_count = p_node->member_count; p_type->members = (SpvReflectTypeDescription*)calloc(p_type->member_count, sizeof(*(p_type->members))); if (IsNotNull(p_type->members)) { // Mark all members types with an invalid state for (size_t i = 0; i < p_type->members->member_count; ++i) { SpvReflectTypeDescription* p_member_type = &(p_type->members[i]); p_member_type->id = (uint32_t)INVALID_VALUE; p_member_type->op = (SpvOp)INVALID_VALUE; p_member_type->storage_class = (SpvStorageClass)INVALID_VALUE; } } else { result = SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } if (result == SPV_REFLECT_RESULT_SUCCESS) { // Since the parse descends on type information, these will get overwritten // if not guarded against assignment. Only assign if the id is invalid. if (p_type->id == INVALID_VALUE) { p_type->id = p_node->result_id; p_type->op = p_node->op; p_type->decoration_flags = 0; } // Top level types need to pick up decorations from all types below it. // Issue and fix here: https://github.com/chaoticbob/SPIRV-Reflect/issues/64 p_type->decoration_flags = ApplyDecorations(&p_node->decorations); switch (p_node->op) { default: break; case SpvOpTypeVoid: p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_VOID; break; case SpvOpTypeBool: p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_BOOL; break; case SpvOpTypeInt: { p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_INT; IF_READU32(result, p_parser, p_node->word_offset + 2, p_type->traits.numeric.scalar.width); IF_READU32(result, p_parser, p_node->word_offset + 3, p_type->traits.numeric.scalar.signedness); } break; case SpvOpTypeFloat: { p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_FLOAT; IF_READU32(result, p_parser, p_node->word_offset + 2, p_type->traits.numeric.scalar.width); } break; case SpvOpTypeVector: { p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_VECTOR; uint32_t component_type_id = (uint32_t)INVALID_VALUE; IF_READU32(result, p_parser, p_node->word_offset + 2, component_type_id); IF_READU32(result, p_parser, p_node->word_offset + 3, p_type->traits.numeric.vector.component_count); // Parse component type SpvReflectPrvNode* p_next_node = FindNode(p_parser, component_type_id); if (IsNotNull(p_next_node)) { result = ParseType(p_parser, p_next_node, NULL, p_module, p_type); } else { result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; SPV_REFLECT_ASSERT(false); } } break; case SpvOpTypeMatrix: { p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_MATRIX; uint32_t column_type_id = (uint32_t)INVALID_VALUE; IF_READU32(result, p_parser, p_node->word_offset + 2, column_type_id); IF_READU32(result, p_parser, p_node->word_offset + 3, p_type->traits.numeric.matrix.column_count); SpvReflectPrvNode* p_next_node = FindNode(p_parser, column_type_id); if (IsNotNull(p_next_node)) { result = ParseType(p_parser, p_next_node, NULL, p_module, p_type); } else { result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; SPV_REFLECT_ASSERT(false); } p_type->traits.numeric.matrix.row_count = p_type->traits.numeric.vector.component_count; p_type->traits.numeric.matrix.stride = p_node->decorations.matrix_stride; // NOTE: Matrix stride is decorated using OpMemberDecoreate - not OpDecoreate. if (IsNotNull(p_struct_member_decorations)) { p_type->traits.numeric.matrix.stride = p_struct_member_decorations->matrix_stride; } } break; case SpvOpTypeImage: { p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE; uint32_t sampled_type_id = (uint32_t)INVALID_VALUE; IF_READU32(result, p_parser, p_node->word_offset + 2, sampled_type_id); SpvReflectPrvNode* p_next_node = FindNode(p_parser, sampled_type_id); if (IsNotNull(p_next_node)) { result = ParseType(p_parser, p_next_node, NULL, p_module, p_type); } else { result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } IF_READU32_CAST(result, p_parser, p_node->word_offset + 3, SpvDim, p_type->traits.image.dim); IF_READU32(result, p_parser, p_node->word_offset + 4, p_type->traits.image.depth); IF_READU32(result, p_parser, p_node->word_offset + 5, p_type->traits.image.arrayed); IF_READU32(result, p_parser, p_node->word_offset + 6, p_type->traits.image.ms); IF_READU32(result, p_parser, p_node->word_offset + 7, p_type->traits.image.sampled); IF_READU32_CAST(result, p_parser, p_node->word_offset + 8, SpvImageFormat, p_type->traits.image.image_format); } break; case SpvOpTypeSampler: { p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLER; } break; case SpvOpTypeSampledImage: { p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLED_IMAGE; uint32_t image_type_id = (uint32_t)INVALID_VALUE; IF_READU32(result, p_parser, p_node->word_offset + 2, image_type_id); SpvReflectPrvNode* p_next_node = FindNode(p_parser, image_type_id); if (IsNotNull(p_next_node)) { result = ParseType(p_parser, p_next_node, NULL, p_module, p_type); } else { result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; SPV_REFLECT_ASSERT(false); } } break; case SpvOpTypeArray: { p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_ARRAY; if (result == SPV_REFLECT_RESULT_SUCCESS) { uint32_t element_type_id = (uint32_t)INVALID_VALUE; uint32_t length_id = (uint32_t)INVALID_VALUE; IF_READU32(result, p_parser, p_node->word_offset + 2, element_type_id); IF_READU32(result, p_parser, p_node->word_offset + 3, length_id); // NOTE: Array stride is decorated using OpDecorate instead of // OpMemberDecorate, even if the array is apart of a struct. p_type->traits.array.stride = p_node->decorations.array_stride; // Get length for current dimension SpvReflectPrvNode* p_length_node = FindNode(p_parser, length_id); if (IsNotNull(p_length_node)) { uint32_t dim_index = p_type->traits.array.dims_count; uint32_t length = 0; IF_READU32(result, p_parser, p_length_node->word_offset + 3, length); if (result == SPV_REFLECT_RESULT_SUCCESS) { p_type->traits.array.dims[dim_index] = length; p_type->traits.array.dims_count += 1; p_type->traits.array.spec_constant_op_ids[dim_index] = IsSpecConstant(p_length_node) ? p_length_node->decorations.spec_id : (uint32_t)INVALID_VALUE; } else { result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; SPV_REFLECT_ASSERT(false); } // Parse next dimension or element type SpvReflectPrvNode* p_next_node = FindNode(p_parser, element_type_id); if (IsNotNull(p_next_node)) { result = ParseType(p_parser, p_next_node, NULL, p_module, p_type); } } else { result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; SPV_REFLECT_ASSERT(false); } } } break; case SpvOpTypeRuntimeArray: { p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_ARRAY; uint32_t element_type_id = (uint32_t)INVALID_VALUE; IF_READU32(result, p_parser, p_node->word_offset + 2, element_type_id); p_type->traits.array.stride = p_node->decorations.array_stride; uint32_t dim_index = p_type->traits.array.dims_count; p_type->traits.array.dims[dim_index] = (uint32_t)SPV_REFLECT_ARRAY_DIM_RUNTIME; p_type->traits.array.spec_constant_op_ids[dim_index] = (uint32_t)INVALID_VALUE; p_type->traits.array.dims_count += 1; // Parse next dimension or element type SpvReflectPrvNode* p_next_node = FindNode(p_parser, element_type_id); if (IsNotNull(p_next_node)) { result = ParseType(p_parser, p_next_node, NULL, p_module, p_type); } else { result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; SPV_REFLECT_ASSERT(false); } } break; case SpvOpTypeStruct: { p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_STRUCT; p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_BLOCK; uint32_t word_index = 2; uint32_t member_index = 0; for (; word_index < p_node->word_count; ++word_index, ++member_index) { uint32_t member_id = (uint32_t)INVALID_VALUE; IF_READU32(result, p_parser, p_node->word_offset + word_index, member_id); // Find member node SpvReflectPrvNode* p_member_node = FindNode(p_parser, member_id); if (IsNull(p_member_node)) { result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; SPV_REFLECT_ASSERT(false); break; } // Member decorations SpvReflectPrvDecorations* p_member_decorations = &p_node->member_decorations[member_index]; assert(member_index < p_type->member_count); // Parse member type SpvReflectTypeDescription* p_member_type = &(p_type->members[member_index]); p_member_type->id = member_id; p_member_type->op = p_member_node->op; result = ParseType(p_parser, p_member_node, p_member_decorations, p_module, p_member_type); if (result != SPV_REFLECT_RESULT_SUCCESS) { break; } // This looks wrong // p_member_type->type_name = p_member_node->name; p_member_type->struct_member_name = p_node->member_names[member_index]; } } break; case SpvOpTypeOpaque: break; case SpvOpTypePointer: { p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_REF; IF_READU32_CAST(result, p_parser, p_node->word_offset + 2, SpvStorageClass, p_type->storage_class); bool found_recursion = false; if (p_type->storage_class == SpvStorageClassPhysicalStorageBuffer) { // Need to make sure we haven't started an infinite recursive loop for (uint32_t i = 0; i < p_parser->physical_pointer_count; i++) { if (p_type->id == p_parser->physical_pointer_check[i]->id) { found_recursion = true; memcpy(p_type, p_parser->physical_pointer_check[i], sizeof(SpvReflectTypeDescription)); p_type->copied = 1; return SPV_REFLECT_RESULT_SUCCESS; } } if (!found_recursion) { p_parser->physical_pointer_struct_count++; p_parser->physical_pointer_check[p_parser->physical_pointer_count] = p_type; p_parser->physical_pointer_count++; if (p_parser->physical_pointer_count >= MAX_RECURSIVE_PHYSICAL_POINTER_CHECK) { return SPV_REFLECT_RESULT_ERROR_SPIRV_MAX_RECURSIVE_EXCEEDED; } } } // Parse type SpvReflectPrvNode* p_next_node = FindNode(p_parser, p_node->type_id); if (IsNull(p_next_node)) { result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; SPV_REFLECT_ASSERT(false); } else if (!found_recursion) { if (p_next_node->op == SpvOpTypeStruct) { p_type->struct_type_description = FindType(p_module, p_next_node->result_id); } result = ParseType(p_parser, p_next_node, NULL, p_module, p_type); } } break; case SpvOpTypeAccelerationStructureKHR: { p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_ACCELERATION_STRUCTURE; } break; } if (result == SPV_REFLECT_RESULT_SUCCESS) { // Names get assigned on the way down. Guard against names // get overwritten on the way up. if (IsNull(p_type->type_name)) { p_type->type_name = p_node->name; } } } return result; } static SpvReflectResult ParseTypes(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) { if (p_parser->type_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } p_module->_internal->type_description_count = p_parser->type_count; p_module->_internal->type_descriptions = (SpvReflectTypeDescription*)calloc(p_module->_internal->type_description_count, sizeof(*(p_module->_internal->type_descriptions))); if (IsNull(p_module->_internal->type_descriptions)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } // Mark all types with an invalid state for (size_t i = 0; i < p_module->_internal->type_description_count; ++i) { SpvReflectTypeDescription* p_type = &(p_module->_internal->type_descriptions[i]); p_type->id = (uint32_t)INVALID_VALUE; p_type->op = (SpvOp)INVALID_VALUE; p_type->storage_class = (SpvStorageClass)INVALID_VALUE; } size_t type_index = 0; for (size_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if (!p_node->is_type) { continue; } SpvReflectTypeDescription* p_type = &(p_module->_internal->type_descriptions[type_index]); p_parser->physical_pointer_count = 0; SpvReflectResult result = ParseType(p_parser, p_node, NULL, p_module, p_type); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } ++type_index; } // allocate now and fill in when parsing struct variable later if (p_parser->physical_pointer_struct_count > 0) { p_parser->physical_pointer_structs = (SpvReflectPrvPhysicalPointerStruct*)calloc(p_parser->physical_pointer_struct_count, sizeof(*(p_parser->physical_pointer_structs))); if (IsNull(p_parser->physical_pointer_structs)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseCapabilities(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) { if (p_parser->capability_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } p_module->capability_count = p_parser->capability_count; p_module->capabilities = (SpvReflectCapability*)calloc(p_module->capability_count, sizeof(*(p_module->capabilities))); if (IsNull(p_module->capabilities)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } // Mark all types with an invalid state for (size_t i = 0; i < p_module->capability_count; ++i) { SpvReflectCapability* p_cap = &(p_module->capabilities[i]); p_cap->value = SpvCapabilityMax; p_cap->word_offset = (uint32_t)INVALID_VALUE; } size_t capability_index = 0; for (size_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if (SpvOpCapability != p_node->op) { continue; } SpvReflectCapability* p_cap = &(p_module->capabilities[capability_index]); p_cap->value = p_node->capability; p_cap->word_offset = p_node->word_offset + 1; ++capability_index; } return SPV_REFLECT_RESULT_SUCCESS; } static int SortCompareDescriptorBinding(const void* a, const void* b) { const SpvReflectDescriptorBinding* p_elem_a = (const SpvReflectDescriptorBinding*)a; const SpvReflectDescriptorBinding* p_elem_b = (const SpvReflectDescriptorBinding*)b; int value = (int)(p_elem_a->binding) - (int)(p_elem_b->binding); if (value == 0) { // use spirv-id as a tiebreaker to ensure a stable ordering, as they're guaranteed // unique. assert(p_elem_a->spirv_id != p_elem_b->spirv_id); value = (int)(p_elem_a->spirv_id) - (int)(p_elem_b->spirv_id); } return value; } static SpvReflectResult ParseDescriptorBindings(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) { p_module->descriptor_binding_count = 0; for (size_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if ((p_node->op != SpvOpVariable) || ((p_node->storage_class != SpvStorageClassUniform) && (p_node->storage_class != SpvStorageClassStorageBuffer) && (p_node->storage_class != SpvStorageClassUniformConstant))) { continue; } if ((p_node->decorations.set.value == INVALID_VALUE) || (p_node->decorations.binding.value == INVALID_VALUE)) { continue; } p_module->descriptor_binding_count += 1; } if (p_module->descriptor_binding_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } p_module->descriptor_bindings = (SpvReflectDescriptorBinding*)calloc(p_module->descriptor_binding_count, sizeof(*(p_module->descriptor_bindings))); if (IsNull(p_module->descriptor_bindings)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } // Mark all types with an invalid state for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) { SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]); p_descriptor->binding = (uint32_t)INVALID_VALUE; p_descriptor->input_attachment_index = (uint32_t)INVALID_VALUE; p_descriptor->set = (uint32_t)INVALID_VALUE; p_descriptor->descriptor_type = (SpvReflectDescriptorType)INVALID_VALUE; p_descriptor->uav_counter_id = (uint32_t)INVALID_VALUE; } size_t descriptor_index = 0; for (size_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if ((p_node->op != SpvOpVariable) || ((p_node->storage_class != SpvStorageClassUniform) && (p_node->storage_class != SpvStorageClassStorageBuffer) && (p_node->storage_class != SpvStorageClassUniformConstant))) { continue; } if ((p_node->decorations.set.value == INVALID_VALUE) || (p_node->decorations.binding.value == INVALID_VALUE)) { continue; } SpvReflectTypeDescription* p_type = FindType(p_module, p_node->type_id); if (IsNull(p_type)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } // If the type is a pointer, resolve it. We need to retain the storage class // from the pointer so that we can use it to deduce deescriptor types. SpvStorageClass pointer_storage_class = SpvStorageClassMax; if (p_type->op == SpvOpTypePointer) { pointer_storage_class = p_type->storage_class; // Find the type's node SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id); if (IsNull(p_type_node)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } // Should be the resolved type p_type = FindType(p_module, p_type_node->type_id); if (IsNull(p_type)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } } SpvReflectDescriptorBinding* p_descriptor = &p_module->descriptor_bindings[descriptor_index]; p_descriptor->spirv_id = p_node->result_id; p_descriptor->name = p_node->name; p_descriptor->binding = p_node->decorations.binding.value; p_descriptor->input_attachment_index = p_node->decorations.input_attachment_index.value; p_descriptor->set = p_node->decorations.set.value; p_descriptor->count = 1; p_descriptor->uav_counter_id = p_node->decorations.uav_counter_buffer.value; p_descriptor->type_description = p_type; p_descriptor->decoration_flags = ApplyDecorations(&p_node->decorations); p_descriptor->user_type = p_node->decorations.user_type; // Flags like non-writable and non-readable are found as member decorations only. // If all members have one of those decorations set, promote the decoration up // to the whole descriptor. const SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id); if (IsNotNull(p_type_node) && p_type_node->member_count) { SpvReflectPrvDecorations common_flags = p_type_node->member_decorations[0]; for (uint32_t m = 1; m < p_type_node->member_count; ++m) { common_flags.is_relaxed_precision &= p_type_node->member_decorations[m].is_relaxed_precision; common_flags.is_block &= p_type_node->member_decorations[m].is_block; common_flags.is_buffer_block &= p_type_node->member_decorations[m].is_buffer_block; common_flags.is_row_major &= p_type_node->member_decorations[m].is_row_major; common_flags.is_column_major &= p_type_node->member_decorations[m].is_column_major; common_flags.is_built_in &= p_type_node->member_decorations[m].is_built_in; common_flags.is_noperspective &= p_type_node->member_decorations[m].is_noperspective; common_flags.is_flat &= p_type_node->member_decorations[m].is_flat; common_flags.is_non_writable &= p_type_node->member_decorations[m].is_non_writable; common_flags.is_non_readable &= p_type_node->member_decorations[m].is_non_readable; common_flags.is_patch &= p_type_node->member_decorations[m].is_patch; common_flags.is_per_vertex &= p_type_node->member_decorations[m].is_per_vertex; common_flags.is_per_task &= p_type_node->member_decorations[m].is_per_task; common_flags.is_weight_texture &= p_type_node->member_decorations[m].is_weight_texture; common_flags.is_block_match_texture &= p_type_node->member_decorations[m].is_block_match_texture; } p_descriptor->decoration_flags |= ApplyDecorations(&common_flags); } // If this is in the StorageBuffer storage class, it's for sure a storage // buffer descriptor. We need to handle this case earlier because in SPIR-V // there are two ways to indicate a storage buffer: // 1) Uniform storage class + BufferBlock decoration, or // 2) StorageBuffer storage class + Buffer decoration. // The 1) way is deprecated since SPIR-V v1.3. But the Buffer decoration is // also used together with Uniform storage class to mean uniform buffer.. // We'll handle the pre-v1.3 cases in ParseDescriptorType(). if (pointer_storage_class == SpvStorageClassStorageBuffer) { p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER; } // Copy image traits if ((p_type->type_flags & SPV_REFLECT_TYPE_FLAG_EXTERNAL_MASK) == SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE) { memcpy(&p_descriptor->image, &p_type->traits.image, sizeof(p_descriptor->image)); } // This is a workaround for: https://github.com/KhronosGroup/glslang/issues/1096 { const uint32_t resource_mask = SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLED_IMAGE | SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE; if ((p_type->type_flags & resource_mask) == resource_mask) { memcpy(&p_descriptor->image, &p_type->traits.image, sizeof(p_descriptor->image)); } } // Copy array traits if (p_type->traits.array.dims_count > 0) { p_descriptor->array.dims_count = p_type->traits.array.dims_count; for (uint32_t dim_index = 0; dim_index < p_type->traits.array.dims_count; ++dim_index) { uint32_t dim_value = p_type->traits.array.dims[dim_index]; p_descriptor->array.dims[dim_index] = dim_value; p_descriptor->count *= dim_value; } } // Count p_descriptor->word_offset.binding = p_node->decorations.binding.word_offset; p_descriptor->word_offset.set = p_node->decorations.set.word_offset; ++descriptor_index; } if (p_module->descriptor_binding_count > 0) { qsort(p_module->descriptor_bindings, p_module->descriptor_binding_count, sizeof(*(p_module->descriptor_bindings)), SortCompareDescriptorBinding); } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseDescriptorType(SpvReflectShaderModule* p_module) { if (p_module->descriptor_binding_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) { SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]); SpvReflectTypeDescription* p_type = p_descriptor->type_description; if ((int)p_descriptor->descriptor_type == (int)INVALID_VALUE) { switch (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_EXTERNAL_MASK) { default: assert(false && "unknown type flag"); break; case SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE: { if (p_descriptor->image.dim == SpvDimBuffer) { switch (p_descriptor->image.sampled) { default: assert(false && "unknown texel buffer sampled value"); break; case IMAGE_SAMPLED: p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; break; case IMAGE_STORAGE: p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; break; } } else if (p_descriptor->image.dim == SpvDimSubpassData) { p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; } else { switch (p_descriptor->image.sampled) { default: assert(false && "unknown image sampled value"); break; case IMAGE_SAMPLED: p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE; break; case IMAGE_STORAGE: p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE; break; } } } break; case SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLER: { p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER; } break; case (SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLED_IMAGE | SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE): { // This is a workaround for: https://github.com/KhronosGroup/glslang/issues/1096 if (p_descriptor->image.dim == SpvDimBuffer) { switch (p_descriptor->image.sampled) { default: assert(false && "unknown texel buffer sampled value"); break; case IMAGE_SAMPLED: p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; break; case IMAGE_STORAGE: p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; break; } } else { p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; } } break; case SPV_REFLECT_TYPE_FLAG_EXTERNAL_BLOCK: { if (p_type->decoration_flags & SPV_REFLECT_DECORATION_BLOCK) { p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER; } else if (p_type->decoration_flags & SPV_REFLECT_DECORATION_BUFFER_BLOCK) { p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER; } else { assert(false && "unknown struct"); } } break; case SPV_REFLECT_TYPE_FLAG_EXTERNAL_ACCELERATION_STRUCTURE: { p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR; } break; } } switch (p_descriptor->descriptor_type) { case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER: p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SAMPLER; break; case SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: p_descriptor->resource_type = (SpvReflectResourceType)(SPV_REFLECT_RESOURCE_FLAG_SAMPLER | SPV_REFLECT_RESOURCE_FLAG_SRV); break; case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE: p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SRV; break; case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE: p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_UAV; break; case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SRV; break; case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_UAV; break; case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER: p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_CBV; break; case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_CBV; break; case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER: p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_UAV; break; case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_UAV; break; case SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: break; case SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SRV; break; } } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseUAVCounterBindings(SpvReflectShaderModule* p_module) { char name[MAX_NODE_NAME_LENGTH]; const char* k_count_tag = "@count"; for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) { SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]); if (p_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER) { continue; } SpvReflectDescriptorBinding* p_counter_descriptor = NULL; // Use UAV counter buffer id if present... if (p_descriptor->uav_counter_id != UINT32_MAX) { for (uint32_t counter_descriptor_index = 0; counter_descriptor_index < p_module->descriptor_binding_count; ++counter_descriptor_index) { SpvReflectDescriptorBinding* p_test_counter_descriptor = &(p_module->descriptor_bindings[counter_descriptor_index]); if (p_test_counter_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER) { continue; } if (p_descriptor->uav_counter_id == p_test_counter_descriptor->spirv_id) { p_counter_descriptor = p_test_counter_descriptor; break; } } } // ...otherwise use old @count convention. else { const size_t descriptor_name_length = p_descriptor->name ? strlen(p_descriptor->name) : 0; memset(name, 0, MAX_NODE_NAME_LENGTH); memcpy(name, p_descriptor->name, descriptor_name_length); #if defined(_WIN32) strcat_s(name, MAX_NODE_NAME_LENGTH, k_count_tag); #else strcat(name, k_count_tag); #endif for (uint32_t counter_descriptor_index = 0; counter_descriptor_index < p_module->descriptor_binding_count; ++counter_descriptor_index) { SpvReflectDescriptorBinding* p_test_counter_descriptor = &(p_module->descriptor_bindings[counter_descriptor_index]); if (p_test_counter_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER) { continue; } if (p_test_counter_descriptor->name && strcmp(name, p_test_counter_descriptor->name) == 0) { p_counter_descriptor = p_test_counter_descriptor; break; } } } if (p_counter_descriptor != NULL) { p_descriptor->uav_counter_binding = p_counter_descriptor; } } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseDescriptorBlockVariable(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module, SpvReflectTypeDescription* p_type, SpvReflectBlockVariable* p_var) { bool has_non_writable = false; if (IsNotNull(p_type->members) && (p_type->member_count > 0)) { p_var->member_count = p_type->member_count; p_var->members = (SpvReflectBlockVariable*)calloc(p_var->member_count, sizeof(*p_var->members)); if (IsNull(p_var->members)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id); if (IsNull(p_type_node)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } // Resolve to element type if current type is array or run time array while (p_type_node->op == SpvOpTypeArray || p_type_node->op == SpvOpTypeRuntimeArray) { if (p_type_node->op == SpvOpTypeArray) { p_type_node = FindNode(p_parser, p_type_node->array_traits.element_type_id); } else { // Element type description SpvReflectTypeDescription* p_type_temp = FindType(p_module, p_type_node->array_traits.element_type_id); if (IsNull(p_type_temp)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } // Element type node p_type_node = FindNode(p_parser, p_type_temp->id); } if (IsNull(p_type_node)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } } // Parse members for (uint32_t member_index = 0; member_index < p_type->member_count; ++member_index) { SpvReflectTypeDescription* p_member_type = &p_type->members[member_index]; SpvReflectBlockVariable* p_member_var = &p_var->members[member_index]; // If pointer type, treat like reference and resolve to pointee type SpvReflectTypeDescription* p_member_ptr_type = 0; bool found_recursion = false; if ((p_member_type->storage_class == SpvStorageClassPhysicalStorageBuffer) && (p_member_type->type_flags & SPV_REFLECT_TYPE_FLAG_REF)) { // Remember the original type p_member_ptr_type = p_member_type; // strip array if (p_member_type->op == SpvOpTypeArray) { SpvReflectPrvNode* p_node = FindNode(p_parser, p_member_type->id); if (p_node == NULL) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } uint32_t element_type_id = p_node->array_traits.element_type_id; p_member_type = FindType(p_module, element_type_id); if (p_member_type == NULL) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } } // Need to make sure we haven't started an infinite recursive loop for (uint32_t i = 0; i < p_parser->physical_pointer_count; i++) { if (p_member_type->id == p_parser->physical_pointer_check[i]->id) { found_recursion = true; break; // still need to fill in p_member_type values } } if (!found_recursion) { uint32_t struct_id = FindType(p_module, p_member_type->id)->struct_type_description->id; p_parser->physical_pointer_structs[p_parser->physical_pointer_struct_count].struct_id = struct_id; p_parser->physical_pointer_structs[p_parser->physical_pointer_struct_count].p_var = p_member_var; p_parser->physical_pointer_struct_count++; p_parser->physical_pointer_check[p_parser->physical_pointer_count] = p_member_type; p_parser->physical_pointer_count++; if (p_parser->physical_pointer_count >= MAX_RECURSIVE_PHYSICAL_POINTER_CHECK) { return SPV_REFLECT_RESULT_ERROR_SPIRV_MAX_RECURSIVE_EXCEEDED; } } SpvReflectPrvNode* p_member_type_node = FindNode(p_parser, p_member_type->id); if (IsNull(p_member_type_node)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } // Should be the pointee type p_member_type = FindType(p_module, p_member_type_node->type_id); if (IsNull(p_member_type)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } } bool is_struct = (p_member_type->type_flags & SPV_REFLECT_TYPE_FLAG_STRUCT) == SPV_REFLECT_TYPE_FLAG_STRUCT; if (is_struct) { if (!found_recursion) { SpvReflectResult result = ParseDescriptorBlockVariable(p_parser, p_module, p_member_type, p_member_var); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } } else { // if 2 member of structs are same PhysicalPointer type, copy the // members values that aren't found skipping the recursion call for (uint32_t i = 0; i < p_parser->physical_pointer_struct_count; i++) { if (p_parser->physical_pointer_structs[i].struct_id == p_member_type->id) { p_member_var->members = p_parser->physical_pointer_structs[i].p_var->members; p_member_var->member_count = p_parser->physical_pointer_structs[i].p_var->member_count; // Set here as it is the first time we need to walk down structs p_member_var->flags |= SPV_REFLECT_VARIABLE_FLAGS_PHYSICAL_POINTER_COPY; } } } } if (p_type_node->storage_class == SpvStorageClassPhysicalStorageBuffer && !p_type_node->member_names) { // TODO 212 - If a buffer ref has an array of itself, all members are null continue; } p_member_var->name = p_type_node->member_names[member_index]; p_member_var->offset = p_type_node->member_decorations[member_index].offset.value; p_member_var->decoration_flags = ApplyDecorations(&p_type_node->member_decorations[member_index]); p_member_var->flags |= SPV_REFLECT_VARIABLE_FLAGS_UNUSED; if (!has_non_writable && (p_member_var->decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE)) { has_non_writable = true; } ApplyNumericTraits(p_member_type, &p_member_var->numeric); if (p_member_type->op == SpvOpTypeArray) { ApplyArrayTraits(p_member_type, &p_member_var->array); } p_member_var->word_offset.offset = p_type_node->member_decorations[member_index].offset.word_offset; p_member_var->type_description = p_member_ptr_type ? p_member_ptr_type : p_member_type; } } p_var->name = p_type->type_name; p_var->type_description = p_type; if (has_non_writable) { p_var->decoration_flags |= SPV_REFLECT_DECORATION_NON_WRITABLE; } return SPV_REFLECT_RESULT_SUCCESS; } static uint32_t GetPhysicalPointerStructSize(SpvReflectPrvParser* p_parser, uint32_t id) { for (uint32_t i = 0; i < p_parser->physical_pointer_struct_count; i++) { if (p_parser->physical_pointer_structs[i].struct_id == id) { return p_parser->physical_pointer_structs[i].p_var->size; } } return 0; } static SpvReflectResult ParseDescriptorBlockVariableSizes(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module, bool is_parent_root, bool is_parent_aos, bool is_parent_rta, SpvReflectBlockVariable* p_var) { if (p_var->member_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } bool is_parent_ref = p_var->type_description->op == SpvOpTypePointer; // Absolute offsets for (uint32_t member_index = 0; member_index < p_var->member_count; ++member_index) { SpvReflectBlockVariable* p_member_var = &p_var->members[member_index]; if (is_parent_root) { p_member_var->absolute_offset = p_member_var->offset; } else { p_member_var->absolute_offset = is_parent_aos ? 0 : (is_parent_ref ? p_member_var->offset : p_member_var->offset + p_var->absolute_offset); } } // Size for (uint32_t member_index = 0; member_index < p_var->member_count; ++member_index) { SpvReflectBlockVariable* p_member_var = &p_var->members[member_index]; SpvReflectTypeDescription* p_member_type = p_member_var->type_description; if (!p_member_type) { // TODO 212 - If a buffer ref has an array of itself, all members are null continue; } switch (p_member_type->op) { case SpvOpTypeBool: { p_member_var->size = SPIRV_WORD_SIZE; } break; case SpvOpTypeInt: case SpvOpTypeFloat: { p_member_var->size = p_member_type->traits.numeric.scalar.width / SPIRV_BYTE_WIDTH; } break; case SpvOpTypeVector: { uint32_t size = p_member_type->traits.numeric.vector.component_count * (p_member_type->traits.numeric.scalar.width / SPIRV_BYTE_WIDTH); p_member_var->size = size; } break; case SpvOpTypeMatrix: { if (p_member_var->decoration_flags & SPV_REFLECT_DECORATION_COLUMN_MAJOR) { p_member_var->size = p_member_var->numeric.matrix.column_count * p_member_var->numeric.matrix.stride; } else if (p_member_var->decoration_flags & SPV_REFLECT_DECORATION_ROW_MAJOR) { p_member_var->size = p_member_var->numeric.matrix.row_count * p_member_var->numeric.matrix.stride; } } break; case SpvOpTypeArray: { // If array of structs, parse members first... bool is_struct = (p_member_type->type_flags & SPV_REFLECT_TYPE_FLAG_STRUCT) == SPV_REFLECT_TYPE_FLAG_STRUCT; if (is_struct) { if (p_member_var->flags & SPV_REFLECT_VARIABLE_FLAGS_PHYSICAL_POINTER_COPY) { p_member_var->size = GetPhysicalPointerStructSize(p_parser, p_member_type->id); } else { SpvReflectResult result = ParseDescriptorBlockVariableSizes(p_parser, p_module, false, true, is_parent_rta, p_member_var); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } } } // ...then array uint32_t element_count = (p_member_var->array.dims_count > 0 ? 1 : 0); for (uint32_t i = 0; i < p_member_var->array.dims_count; ++i) { element_count *= p_member_var->array.dims[i]; } p_member_var->size = element_count * p_member_var->array.stride; } break; case SpvOpTypeRuntimeArray: { bool is_struct = (p_member_type->type_flags & SPV_REFLECT_TYPE_FLAG_STRUCT) == SPV_REFLECT_TYPE_FLAG_STRUCT; if (is_struct) { SpvReflectResult result = ParseDescriptorBlockVariableSizes(p_parser, p_module, false, true, true, p_member_var); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } } } break; case SpvOpTypePointer: { // Reference. Get to underlying struct type. SpvReflectPrvNode* p_member_type_node = FindNode(p_parser, p_member_type->id); if (IsNull(p_member_type_node)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } // Get the pointee type p_member_type = FindType(p_module, p_member_type_node->type_id); if (IsNull(p_member_type)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } assert(p_member_type->op == SpvOpTypeStruct); FALLTHROUGH; } case SpvOpTypeStruct: { if (p_member_var->flags & SPV_REFLECT_VARIABLE_FLAGS_PHYSICAL_POINTER_COPY) { p_member_var->size = GetPhysicalPointerStructSize(p_parser, p_member_type->id); } else { SpvReflectResult result = ParseDescriptorBlockVariableSizes(p_parser, p_module, false, is_parent_aos, is_parent_rta, p_member_var); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } } } break; default: break; } } // Parse padded size using offset difference for all member except for the last entry... for (uint32_t member_index = 0; member_index < (p_var->member_count - 1); ++member_index) { SpvReflectBlockVariable* p_member_var = &p_var->members[member_index]; SpvReflectBlockVariable* p_next_member_var = &p_var->members[member_index + 1]; p_member_var->padded_size = p_next_member_var->offset - p_member_var->offset; if (p_member_var->size > p_member_var->padded_size) { p_member_var->size = p_member_var->padded_size; } if (is_parent_rta) { p_member_var->padded_size = p_member_var->size; } } // ...last entry just gets rounded up to near multiple of SPIRV_DATA_ALIGNMENT, which is 16 and // subtract the offset. if (p_var->member_count > 0) { SpvReflectBlockVariable* p_member_var = &p_var->members[p_var->member_count - 1]; p_member_var->padded_size = RoundUp(p_member_var->offset + p_member_var->size, SPIRV_DATA_ALIGNMENT) - p_member_var->offset; if (p_member_var->size > p_member_var->padded_size) { p_member_var->size = p_member_var->padded_size; } if (is_parent_rta) { p_member_var->padded_size = p_member_var->size; } } // If buffer ref, sizes are same as uint64_t if (is_parent_ref) { p_var->size = p_var->padded_size = 8; return SPV_REFLECT_RESULT_SUCCESS; } // @TODO validate this with assertion p_var->size = p_var->members[p_var->member_count - 1].offset + p_var->members[p_var->member_count - 1].padded_size; p_var->padded_size = p_var->size; return SPV_REFLECT_RESULT_SUCCESS; } static void MarkSelfAndAllMemberVarsAsUsed(SpvReflectBlockVariable* p_var) { // Clear the current variable's UNUSED flag p_var->flags &= ~SPV_REFLECT_VARIABLE_FLAGS_UNUSED; SpvOp op_type = p_var->type_description->op; switch (op_type) { default: break; case SpvOpTypeArray: { } break; case SpvOpTypeStruct: { for (uint32_t i = 0; i < p_var->member_count; ++i) { SpvReflectBlockVariable* p_member_var = &p_var->members[i]; MarkSelfAndAllMemberVarsAsUsed(p_member_var); } } break; } } static SpvReflectResult ParseDescriptorBlockVariableUsage(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module, SpvReflectPrvAccessChain* p_access_chain, uint32_t index_index, SpvOp override_op_type, SpvReflectBlockVariable* p_var) { // Clear the current variable's UNUSED flag p_var->flags &= ~SPV_REFLECT_VARIABLE_FLAGS_UNUSED; // Parsing arrays requires overriding the op type for // for the lowest dim's element type. SpvReflectTypeDescription* p_type = p_var->type_description; SpvOp op_type = p_type->op; if (override_op_type != (SpvOp)INVALID_VALUE) { op_type = override_op_type; } switch (op_type) { default: break; case SpvOpTypeArray: { // Parse through array's type hierarchy to find the actual/non-array element type while ((p_type->op == SpvOpTypeArray) && (index_index < p_access_chain->index_count)) { // Find the array element type id SpvReflectPrvNode* p_node = FindNode(p_parser, p_type->id); if (p_node == NULL) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } uint32_t element_type_id = p_node->array_traits.element_type_id; // Get the array element type p_type = FindType(p_module, element_type_id); if (p_type == NULL) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } // Next access chain index index_index += 1; } // Only continue parsing if there's remaining indices in the access // chain. If the end of the access chain has been reached then all // remaining variables (including those in struct hierarchies) // are considered USED. // // See: https://github.com/KhronosGroup/SPIRV-Reflect/issues/78 // if (index_index < p_access_chain->index_count) { // Parse current var again with a type override and advanced index index SpvReflectResult result = ParseDescriptorBlockVariableUsage(p_parser, p_module, p_access_chain, index_index, p_type->op, p_var); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } } else { // Clear UNUSED flag for remaining variables MarkSelfAndAllMemberVarsAsUsed(p_var); } } break; case SpvOpTypePointer: { // Reference. Get to underlying struct type. SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id); if (IsNull(p_type_node)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } // Get the pointee type p_type = FindType(p_module, p_type_node->type_id); if (IsNull(p_type)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } if (p_type->op != SpvOpTypeStruct) { break; } FALLTHROUGH; } case SpvOpTypeStruct: { assert(p_var->member_count > 0); if (p_var->member_count == 0) { return SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_BLOCK_DATA; } // The access chain can have zero indexes, if used for a runtime array if (p_access_chain->index_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } // Get member variable at the access's chain current index uint32_t index = p_access_chain->indexes[index_index]; if (index >= p_var->member_count) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_BLOCK_MEMBER_REFERENCE; } SpvReflectBlockVariable* p_member_var = &p_var->members[index]; bool is_pointer_to_pointer = IsPointerToPointer(p_parser, p_access_chain->result_type_id); if (is_pointer_to_pointer) { // Remember block var for this access chain for downstream dereference p_access_chain->block_var = p_member_var; } // Next access chain index index_index += 1; // Only continue parsing if there's remaining indices in the access // chain. If the end of the access chain has been reach then all // remaining variables (including those in struct hierarchies) // are considered USED. // // See: https://github.com/KhronosGroup/SPIRV-Reflect/issues/78 // if (index_index < p_access_chain->index_count) { SpvReflectResult result = ParseDescriptorBlockVariableUsage(p_parser, p_module, p_access_chain, index_index, (SpvOp)INVALID_VALUE, p_member_var); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } } else if (!is_pointer_to_pointer) { // Clear UNUSED flag for remaining variables MarkSelfAndAllMemberVarsAsUsed(p_member_var); } } break; } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseDescriptorBlocks(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) { if (p_module->descriptor_binding_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } p_parser->physical_pointer_struct_count = 0; for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) { SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]); SpvReflectTypeDescription* p_type = p_descriptor->type_description; if ((p_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER) && (p_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER)) { continue; } // Mark UNUSED p_descriptor->block.flags |= SPV_REFLECT_VARIABLE_FLAGS_UNUSED; p_parser->physical_pointer_count = 0; // Parse descriptor block SpvReflectResult result = ParseDescriptorBlockVariable(p_parser, p_module, p_type, &p_descriptor->block); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } for (uint32_t access_chain_index = 0; access_chain_index < p_parser->access_chain_count; ++access_chain_index) { SpvReflectPrvAccessChain* p_access_chain = &(p_parser->access_chains[access_chain_index]); // Skip any access chains that aren't touching this descriptor block if (p_descriptor->spirv_id != p_access_chain->base_id) { continue; } result = ParseDescriptorBlockVariableUsage(p_parser, p_module, p_access_chain, 0, (SpvOp)INVALID_VALUE, &p_descriptor->block); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } } p_descriptor->block.name = p_descriptor->name; bool is_parent_rta = (p_descriptor->descriptor_type == SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER); result = ParseDescriptorBlockVariableSizes(p_parser, p_module, true, false, is_parent_rta, &p_descriptor->block); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } if (is_parent_rta) { p_descriptor->block.size = 0; p_descriptor->block.padded_size = 0; } } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseFormat(const SpvReflectTypeDescription* p_type, SpvReflectFormat* p_format) { SpvReflectResult result = SPV_REFLECT_RESULT_ERROR_INTERNAL_ERROR; bool signedness = (p_type->traits.numeric.scalar.signedness != 0); uint32_t bit_width = p_type->traits.numeric.scalar.width; if (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_VECTOR) { uint32_t component_count = p_type->traits.numeric.vector.component_count; if (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_FLOAT) { switch (bit_width) { case 16: { switch (component_count) { case 2: *p_format = SPV_REFLECT_FORMAT_R16G16_SFLOAT; break; case 3: *p_format = SPV_REFLECT_FORMAT_R16G16B16_SFLOAT; break; case 4: *p_format = SPV_REFLECT_FORMAT_R16G16B16A16_SFLOAT; break; } } break; case 32: { switch (component_count) { case 2: *p_format = SPV_REFLECT_FORMAT_R32G32_SFLOAT; break; case 3: *p_format = SPV_REFLECT_FORMAT_R32G32B32_SFLOAT; break; case 4: *p_format = SPV_REFLECT_FORMAT_R32G32B32A32_SFLOAT; break; } } break; case 64: { switch (component_count) { case 2: *p_format = SPV_REFLECT_FORMAT_R64G64_SFLOAT; break; case 3: *p_format = SPV_REFLECT_FORMAT_R64G64B64_SFLOAT; break; case 4: *p_format = SPV_REFLECT_FORMAT_R64G64B64A64_SFLOAT; break; } } } result = SPV_REFLECT_RESULT_SUCCESS; } else if (p_type->type_flags & (SPV_REFLECT_TYPE_FLAG_INT | SPV_REFLECT_TYPE_FLAG_BOOL)) { switch (bit_width) { case 16: { switch (component_count) { case 2: *p_format = signedness ? SPV_REFLECT_FORMAT_R16G16_SINT : SPV_REFLECT_FORMAT_R16G16_UINT; break; case 3: *p_format = signedness ? SPV_REFLECT_FORMAT_R16G16B16_SINT : SPV_REFLECT_FORMAT_R16G16B16_UINT; break; case 4: *p_format = signedness ? SPV_REFLECT_FORMAT_R16G16B16A16_SINT : SPV_REFLECT_FORMAT_R16G16B16A16_UINT; break; } } break; case 32: { switch (component_count) { case 2: *p_format = signedness ? SPV_REFLECT_FORMAT_R32G32_SINT : SPV_REFLECT_FORMAT_R32G32_UINT; break; case 3: *p_format = signedness ? SPV_REFLECT_FORMAT_R32G32B32_SINT : SPV_REFLECT_FORMAT_R32G32B32_UINT; break; case 4: *p_format = signedness ? SPV_REFLECT_FORMAT_R32G32B32A32_SINT : SPV_REFLECT_FORMAT_R32G32B32A32_UINT; break; } } break; case 64: { switch (component_count) { case 2: *p_format = signedness ? SPV_REFLECT_FORMAT_R64G64_SINT : SPV_REFLECT_FORMAT_R64G64_UINT; break; case 3: *p_format = signedness ? SPV_REFLECT_FORMAT_R64G64B64_SINT : SPV_REFLECT_FORMAT_R64G64B64_UINT; break; case 4: *p_format = signedness ? SPV_REFLECT_FORMAT_R64G64B64A64_SINT : SPV_REFLECT_FORMAT_R64G64B64A64_UINT; break; } } } result = SPV_REFLECT_RESULT_SUCCESS; } } else if (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_FLOAT) { switch (bit_width) { case 16: *p_format = SPV_REFLECT_FORMAT_R16_SFLOAT; break; case 32: *p_format = SPV_REFLECT_FORMAT_R32_SFLOAT; break; case 64: *p_format = SPV_REFLECT_FORMAT_R64_SFLOAT; break; } result = SPV_REFLECT_RESULT_SUCCESS; } else if (p_type->type_flags & (SPV_REFLECT_TYPE_FLAG_INT | SPV_REFLECT_TYPE_FLAG_BOOL)) { switch (bit_width) { case 16: *p_format = signedness ? SPV_REFLECT_FORMAT_R16_SINT : SPV_REFLECT_FORMAT_R16_UINT; break; break; case 32: *p_format = signedness ? SPV_REFLECT_FORMAT_R32_SINT : SPV_REFLECT_FORMAT_R32_UINT; break; break; case 64: *p_format = signedness ? SPV_REFLECT_FORMAT_R64_SINT : SPV_REFLECT_FORMAT_R64_UINT; break; } result = SPV_REFLECT_RESULT_SUCCESS; } else if (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_STRUCT) { *p_format = SPV_REFLECT_FORMAT_UNDEFINED; result = SPV_REFLECT_RESULT_SUCCESS; } return result; } static SpvReflectResult ParseInterfaceVariable(SpvReflectPrvParser* p_parser, const SpvReflectPrvDecorations* p_var_node_decorations, const SpvReflectPrvDecorations* p_type_node_decorations, SpvReflectShaderModule* p_module, SpvReflectTypeDescription* p_type, SpvReflectInterfaceVariable* p_var, bool* p_has_built_in) { SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id); if (IsNull(p_type_node)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } if (p_type->member_count > 0) { p_var->member_count = p_type->member_count; p_var->members = (SpvReflectInterfaceVariable*)calloc(p_var->member_count, sizeof(*p_var->members)); if (IsNull(p_var->members)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } for (uint32_t member_index = 0; member_index < p_type_node->member_count; ++member_index) { SpvReflectPrvDecorations* p_member_decorations = &p_type_node->member_decorations[member_index]; SpvReflectTypeDescription* p_member_type = &p_type->members[member_index]; SpvReflectInterfaceVariable* p_member_var = &p_var->members[member_index]; // Storage class is the same throughout the whole struct p_member_var->storage_class = p_var->storage_class; SpvReflectResult result = ParseInterfaceVariable(p_parser, NULL, p_member_decorations, p_module, p_member_type, p_member_var, p_has_built_in); if (result != SPV_REFLECT_RESULT_SUCCESS) { SPV_REFLECT_ASSERT(false); return result; } } } p_var->name = p_type_node->name; p_var->decoration_flags = ApplyDecorations(p_type_node_decorations); if (p_var_node_decorations != NULL) { p_var->decoration_flags |= ApplyDecorations(p_var_node_decorations); } else { // Apply member decoration values to struct members p_var->location = p_type_node_decorations->location.value; p_var->component = p_type_node_decorations->component.value; } p_var->built_in = p_type_node_decorations->built_in; ApplyNumericTraits(p_type, &p_var->numeric); if (p_type->op == SpvOpTypeArray) { ApplyArrayTraits(p_type, &p_var->array); } p_var->type_description = p_type; *p_has_built_in |= p_type_node_decorations->is_built_in; // Only parse format for interface variables that are input or output if ((p_var->storage_class == SpvStorageClassInput) || (p_var->storage_class == SpvStorageClassOutput)) { SpvReflectResult result = ParseFormat(p_var->type_description, &p_var->format); if (result != SPV_REFLECT_RESULT_SUCCESS) { SPV_REFLECT_ASSERT(false); return result; } } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseInterfaceVariables(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module, SpvReflectEntryPoint* p_entry, uint32_t interface_variable_count, uint32_t* p_interface_variable_ids) { if (interface_variable_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } p_entry->interface_variable_count = interface_variable_count; p_entry->input_variable_count = 0; p_entry->output_variable_count = 0; for (size_t i = 0; i < interface_variable_count; ++i) { uint32_t var_result_id = *(p_interface_variable_ids + i); SpvReflectPrvNode* p_node = FindNode(p_parser, var_result_id); if (IsNull(p_node)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } if (p_node->storage_class == SpvStorageClassInput) { p_entry->input_variable_count += 1; } else if (p_node->storage_class == SpvStorageClassOutput) { p_entry->output_variable_count += 1; } } if (p_entry->input_variable_count > 0) { p_entry->input_variables = (SpvReflectInterfaceVariable**)calloc(p_entry->input_variable_count, sizeof(*(p_entry->input_variables))); if (IsNull(p_entry->input_variables)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } if (p_entry->output_variable_count > 0) { p_entry->output_variables = (SpvReflectInterfaceVariable**)calloc(p_entry->output_variable_count, sizeof(*(p_entry->output_variables))); if (IsNull(p_entry->output_variables)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } if (p_entry->interface_variable_count > 0) { p_entry->interface_variables = (SpvReflectInterfaceVariable*)calloc(p_entry->interface_variable_count, sizeof(*(p_entry->interface_variables))); if (IsNull(p_entry->interface_variables)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } size_t input_index = 0; size_t output_index = 0; for (size_t i = 0; i < interface_variable_count; ++i) { uint32_t var_result_id = *(p_interface_variable_ids + i); SpvReflectPrvNode* p_node = FindNode(p_parser, var_result_id); if (IsNull(p_node)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } SpvReflectTypeDescription* p_type = FindType(p_module, p_node->type_id); if (IsNull(p_node) || IsNull(p_type)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } // If the type is a pointer, resolve it if (p_type->op == SpvOpTypePointer) { // Find the type's node SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id); if (IsNull(p_type_node)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } // Should be the resolved type p_type = FindType(p_module, p_type_node->type_id); if (IsNull(p_type)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } } SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id); if (IsNull(p_type_node)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } SpvReflectInterfaceVariable* p_var = &(p_entry->interface_variables[i]); p_var->storage_class = p_node->storage_class; bool has_built_in = p_node->decorations.is_built_in; SpvReflectResult result = ParseInterfaceVariable(p_parser, &p_node->decorations, &p_type_node->decorations, p_module, p_type, p_var, &has_built_in); if (result != SPV_REFLECT_RESULT_SUCCESS) { SPV_REFLECT_ASSERT(false); return result; } // Input and output variables if (p_var->storage_class == SpvStorageClassInput) { p_entry->input_variables[input_index] = p_var; ++input_index; } else if (p_node->storage_class == SpvStorageClassOutput) { p_entry->output_variables[output_index] = p_var; ++output_index; } // SPIR-V result id p_var->spirv_id = p_node->result_id; // Name p_var->name = p_node->name; // Semantic p_var->semantic = p_node->decorations.semantic.value; // Decorate with built-in if any member is built-in if (has_built_in) { p_var->decoration_flags |= SPV_REFLECT_DECORATION_BUILT_IN; } // Location is decorated on OpVariable node, not the type node. p_var->location = p_node->decorations.location.value; p_var->component = p_node->decorations.component.value; p_var->word_offset.location = p_node->decorations.location.word_offset; // Built in if (p_node->decorations.is_built_in) { p_var->built_in = p_node->decorations.built_in; } } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult EnumerateAllPushConstants(SpvReflectShaderModule* p_module, size_t* p_push_constant_count, uint32_t** p_push_constants) { *p_push_constant_count = p_module->push_constant_block_count; if (*p_push_constant_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } *p_push_constants = (uint32_t*)calloc(*p_push_constant_count, sizeof(**p_push_constants)); if (IsNull(*p_push_constants)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } for (size_t i = 0; i < *p_push_constant_count; ++i) { (*p_push_constants)[i] = p_module->push_constant_blocks[i].spirv_id; } qsort(*p_push_constants, *p_push_constant_count, sizeof(**p_push_constants), SortCompareUint32); return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult TraverseCallGraph(SpvReflectPrvParser* p_parser, SpvReflectPrvFunction* p_func, size_t* p_func_count, uint32_t* p_func_ids, uint32_t depth) { if (depth > p_parser->function_count) { // Vulkan does not permit recursion (Vulkan spec Appendix A): // "Recursion: The static function-call graph for an entry point must not // contain cycles." return SPV_REFLECT_RESULT_ERROR_SPIRV_RECURSION; } if (IsNotNull(p_func_ids)) { p_func_ids[(*p_func_count)++] = p_func->id; } else { ++*p_func_count; } for (size_t i = 0; i < p_func->callee_count; ++i) { SpvReflectResult result = TraverseCallGraph(p_parser, p_func->callee_ptrs[i], p_func_count, p_func_ids, depth + 1); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } } return SPV_REFLECT_RESULT_SUCCESS; } static uint32_t GetUint32Constant(SpvReflectPrvParser* p_parser, uint32_t id) { uint32_t result = (uint32_t)INVALID_VALUE; SpvReflectPrvNode* p_node = FindNode(p_parser, id); if (p_node && p_node->op == SpvOpConstant) { UNCHECKED_READU32(p_parser, p_node->word_offset + 3, result); } return result; } static bool HasByteAddressBufferOffset(SpvReflectPrvNode* p_node, SpvReflectDescriptorBinding* p_binding) { return IsNotNull(p_node) && IsNotNull(p_binding) && p_node->op == SpvOpAccessChain && p_node->word_count == 6 && (p_binding->user_type == SPV_REFLECT_USER_TYPE_BYTE_ADDRESS_BUFFER || p_binding->user_type == SPV_REFLECT_USER_TYPE_RW_BYTE_ADDRESS_BUFFER); } static SpvReflectResult ParseByteAddressBuffer(SpvReflectPrvParser* p_parser, SpvReflectPrvNode* p_node, SpvReflectDescriptorBinding* p_binding) { const SpvReflectResult not_found = SPV_REFLECT_RESULT_SUCCESS; if (!HasByteAddressBufferOffset(p_node, p_binding)) { return not_found; } uint32_t offset = 0; // starting offset uint32_t base_id = 0; // expect first index of 2D access is zero UNCHECKED_READU32(p_parser, p_node->word_offset + 4, base_id); if (GetUint32Constant(p_parser, base_id) != 0) { return not_found; } UNCHECKED_READU32(p_parser, p_node->word_offset + 5, base_id); SpvReflectPrvNode* p_next_node = FindNode(p_parser, base_id); if (IsNull(p_next_node)) { return not_found; } else if (p_next_node->op == SpvOpConstant) { // The access chain might just be a constant right to the offset offset = GetUint32Constant(p_parser, base_id); p_binding->byte_address_buffer_offsets[p_binding->byte_address_buffer_offset_count] = offset; p_binding->byte_address_buffer_offset_count++; return SPV_REFLECT_RESULT_SUCCESS; } // there is usually 2 (sometimes 3) instrucitons that make up the arithmetic logic to calculate the offset SpvReflectPrvNode* arithmetic_node_stack[8]; uint32_t arithmetic_count = 0; while (IsNotNull(p_next_node)) { if (p_next_node->op == SpvOpLoad || p_next_node->op == SpvOpBitcast || p_next_node->op == SpvOpConstant) { break; // arithmetic starts here } arithmetic_node_stack[arithmetic_count++] = p_next_node; if (arithmetic_count >= 8) { return not_found; } UNCHECKED_READU32(p_parser, p_next_node->word_offset + 3, base_id); p_next_node = FindNode(p_parser, base_id); } const uint32_t count = arithmetic_count; for (uint32_t i = 0; i < count; i++) { p_next_node = arithmetic_node_stack[--arithmetic_count]; // All arithmetic ops takes 2 operands, assumption is the 2nd operand has the constant UNCHECKED_READU32(p_parser, p_next_node->word_offset + 4, base_id); uint32_t value = GetUint32Constant(p_parser, base_id); if (value == INVALID_VALUE) { return not_found; } switch (p_next_node->op) { case SpvOpShiftRightLogical: offset >>= value; break; case SpvOpIAdd: offset += value; break; case SpvOpISub: offset -= value; break; case SpvOpIMul: offset *= value; break; case SpvOpUDiv: offset /= value; break; case SpvOpSDiv: // OpConstant might be signed, but value should never be negative assert((int32_t)value > 0); offset /= value; break; default: return not_found; } } p_binding->byte_address_buffer_offsets[p_binding->byte_address_buffer_offset_count] = offset; p_binding->byte_address_buffer_offset_count++; return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseStaticallyUsedResources(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module, SpvReflectEntryPoint* p_entry, size_t uniform_count, uint32_t* uniforms, size_t push_constant_count, uint32_t* push_constants) { // Find function with the right id SpvReflectPrvFunction* p_func = NULL; for (size_t i = 0; i < p_parser->function_count; ++i) { if (p_parser->functions[i].id == p_entry->id) { p_func = &(p_parser->functions[i]); break; } } if (p_func == NULL) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } size_t called_function_count = 0; SpvReflectResult result = TraverseCallGraph(p_parser, p_func, &called_function_count, NULL, 0); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } uint32_t* p_called_functions = NULL; if (called_function_count > 0) { p_called_functions = (uint32_t*)calloc(called_function_count, sizeof(*p_called_functions)); if (IsNull(p_called_functions)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } called_function_count = 0; result = TraverseCallGraph(p_parser, p_func, &called_function_count, p_called_functions, 0); if (result != SPV_REFLECT_RESULT_SUCCESS) { SafeFree(p_called_functions); return result; } if (called_function_count > 0) { qsort(p_called_functions, called_function_count, sizeof(*p_called_functions), SortCompareUint32); } called_function_count = DedupSortedUint32(p_called_functions, called_function_count); uint32_t used_acessed_count = 0; for (size_t i = 0, j = 0; i < called_function_count; ++i) { // No need to bounds check j because a missing ID issue would have been // found during TraverseCallGraph while (p_parser->functions[j].id != p_called_functions[i]) { ++j; } used_acessed_count += p_parser->functions[j].accessed_variable_count; } SpvReflectPrvAccessedVariable* p_used_accesses = NULL; if (used_acessed_count > 0) { p_used_accesses = (SpvReflectPrvAccessedVariable*)calloc(used_acessed_count, sizeof(SpvReflectPrvAccessedVariable)); if (IsNull(p_used_accesses)) { SafeFree(p_called_functions); return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } used_acessed_count = 0; for (size_t i = 0, j = 0; i < called_function_count; ++i) { while (p_parser->functions[j].id != p_called_functions[i]) { ++j; } memcpy(&p_used_accesses[used_acessed_count], p_parser->functions[j].accessed_variables, p_parser->functions[j].accessed_variable_count * sizeof(SpvReflectPrvAccessedVariable)); used_acessed_count += p_parser->functions[j].accessed_variable_count; } SafeFree(p_called_functions); if (used_acessed_count > 0) { qsort(p_used_accesses, used_acessed_count, sizeof(*p_used_accesses), SortCompareAccessedVariable); } // Do set intersection to find the used uniform and push constants size_t used_uniform_count = 0; result = IntersectSortedAccessedVariable(p_used_accesses, used_acessed_count, uniforms, uniform_count, &p_entry->used_uniforms, &used_uniform_count); if (result != SPV_REFLECT_RESULT_SUCCESS) { SafeFree(p_used_accesses); return result; } size_t used_push_constant_count = 0; result = IntersectSortedAccessedVariable(p_used_accesses, used_acessed_count, push_constants, push_constant_count, &p_entry->used_push_constants, &used_push_constant_count); if (result != SPV_REFLECT_RESULT_SUCCESS) { SafeFree(p_used_accesses); return result; } for (uint32_t i = 0; i < p_module->descriptor_binding_count; ++i) { SpvReflectDescriptorBinding* p_binding = &p_module->descriptor_bindings[i]; uint32_t byte_address_buffer_offset_count = 0; for (uint32_t j = 0; j < used_acessed_count; j++) { if (p_used_accesses[j].variable_ptr == p_binding->spirv_id) { p_binding->accessed = 1; if (HasByteAddressBufferOffset(p_used_accesses[j].p_node, p_binding)) { byte_address_buffer_offset_count++; } } } // only if SPIR-V has ByteAddressBuffer user type if (byte_address_buffer_offset_count > 0) { bool multi_entrypoint = p_binding->byte_address_buffer_offset_count > 0; if (multi_entrypoint) { // If there is a 2nd entrypoint, we can have multiple entry points, in this case we want to just combine the accessed // offsets and then de-duplicate it uint32_t* prev_byte_address_buffer_offsets = p_binding->byte_address_buffer_offsets; p_binding->byte_address_buffer_offsets = (uint32_t*)calloc(byte_address_buffer_offset_count + p_binding->byte_address_buffer_offset_count, sizeof(uint32_t)); memcpy(p_binding->byte_address_buffer_offsets, prev_byte_address_buffer_offsets, sizeof(uint32_t) * p_binding->byte_address_buffer_offset_count); SafeFree(prev_byte_address_buffer_offsets); } else { // possible not all allocated offset slots are used, but this will be a max per binding p_binding->byte_address_buffer_offsets = (uint32_t*)calloc(byte_address_buffer_offset_count, sizeof(uint32_t)); } if (IsNull(p_binding->byte_address_buffer_offsets)) { SafeFree(p_used_accesses); return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } for (uint32_t j = 0; j < used_acessed_count; j++) { if (p_used_accesses[j].variable_ptr == p_binding->spirv_id) { result = ParseByteAddressBuffer(p_parser, p_used_accesses[j].p_node, p_binding); if (result != SPV_REFLECT_RESULT_SUCCESS) { SafeFree(p_used_accesses); return result; } } } if (multi_entrypoint) { qsort(p_binding->byte_address_buffer_offsets, p_binding->byte_address_buffer_offset_count, sizeof(*(p_binding->byte_address_buffer_offsets)), SortCompareUint32); p_binding->byte_address_buffer_offset_count = (uint32_t)DedupSortedUint32(p_binding->byte_address_buffer_offsets, p_binding->byte_address_buffer_offset_count); } } } SafeFree(p_used_accesses); p_entry->used_uniform_count = (uint32_t)used_uniform_count; p_entry->used_push_constant_count = (uint32_t)used_push_constant_count; return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseEntryPoints(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) { if (p_parser->entry_point_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } p_module->entry_point_count = p_parser->entry_point_count; p_module->entry_points = (SpvReflectEntryPoint*)calloc(p_module->entry_point_count, sizeof(*(p_module->entry_points))); if (IsNull(p_module->entry_points)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } SpvReflectResult result; size_t uniform_count = 0; uint32_t* uniforms = NULL; if ((result = EnumerateAllUniforms(p_module, &uniform_count, &uniforms)) != SPV_REFLECT_RESULT_SUCCESS) { return result; } size_t push_constant_count = 0; uint32_t* push_constants = NULL; if ((result = EnumerateAllPushConstants(p_module, &push_constant_count, &push_constants)) != SPV_REFLECT_RESULT_SUCCESS) { return result; } size_t entry_point_index = 0; for (size_t i = 0; entry_point_index < p_parser->entry_point_count && i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if (p_node->op != SpvOpEntryPoint) { continue; } SpvReflectEntryPoint* p_entry_point = &(p_module->entry_points[entry_point_index]); CHECKED_READU32_CAST(p_parser, p_node->word_offset + 1, SpvExecutionModel, p_entry_point->spirv_execution_model); CHECKED_READU32(p_parser, p_node->word_offset + 2, p_entry_point->id); switch (p_entry_point->spirv_execution_model) { default: break; case SpvExecutionModelVertex: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_VERTEX_BIT; break; case SpvExecutionModelTessellationControl: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_TESSELLATION_CONTROL_BIT; break; case SpvExecutionModelTessellationEvaluation: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_TESSELLATION_EVALUATION_BIT; break; case SpvExecutionModelGeometry: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_GEOMETRY_BIT; break; case SpvExecutionModelFragment: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_FRAGMENT_BIT; break; case SpvExecutionModelGLCompute: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_COMPUTE_BIT; break; case SpvExecutionModelTaskNV: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_TASK_BIT_NV; break; case SpvExecutionModelTaskEXT: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_TASK_BIT_EXT; break; case SpvExecutionModelMeshNV: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_MESH_BIT_NV; break; case SpvExecutionModelMeshEXT: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_MESH_BIT_EXT; break; case SpvExecutionModelRayGenerationKHR: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_RAYGEN_BIT_KHR; break; case SpvExecutionModelIntersectionKHR: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_INTERSECTION_BIT_KHR; break; case SpvExecutionModelAnyHitKHR: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_ANY_HIT_BIT_KHR; break; case SpvExecutionModelClosestHitKHR: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_CLOSEST_HIT_BIT_KHR; break; case SpvExecutionModelMissKHR: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_MISS_BIT_KHR; break; case SpvExecutionModelCallableKHR: p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_CALLABLE_BIT_KHR; break; } ++entry_point_index; // Name length is required to calculate next operand uint32_t name_start_word_offset = 3; uint32_t name_length_with_terminator = 0; result = ReadStr(p_parser, p_node->word_offset + name_start_word_offset, 0, p_node->word_count, &name_length_with_terminator, NULL); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } p_entry_point->name = (const char*)(p_parser->spirv_code + p_node->word_offset + name_start_word_offset); uint32_t name_word_count = RoundUp(name_length_with_terminator, SPIRV_WORD_SIZE) / SPIRV_WORD_SIZE; uint32_t interface_variable_count = (p_node->word_count - (name_start_word_offset + name_word_count)); uint32_t* p_interface_variables = NULL; if (interface_variable_count > 0) { p_interface_variables = (uint32_t*)calloc(interface_variable_count, sizeof(*(p_interface_variables))); if (IsNull(p_interface_variables)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } for (uint32_t var_index = 0; var_index < interface_variable_count; ++var_index) { uint32_t var_result_id = (uint32_t)INVALID_VALUE; uint32_t offset = name_start_word_offset + name_word_count + var_index; CHECKED_READU32(p_parser, p_node->word_offset + offset, var_result_id); p_interface_variables[var_index] = var_result_id; } result = ParseInterfaceVariables(p_parser, p_module, p_entry_point, interface_variable_count, p_interface_variables); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } SafeFree(p_interface_variables); result = ParseStaticallyUsedResources(p_parser, p_module, p_entry_point, uniform_count, uniforms, push_constant_count, push_constants); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } } SafeFree(uniforms); SafeFree(push_constants); return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseExecutionModes(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) { assert(IsNotNull(p_parser)); assert(IsNotNull(p_parser->nodes)); assert(IsNotNull(p_module)); if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) { for (size_t node_idx = 0; node_idx < p_parser->node_count; ++node_idx) { SpvReflectPrvNode* p_node = &(p_parser->nodes[node_idx]); if (p_node->op != SpvOpExecutionMode && p_node->op != SpvOpExecutionModeId) { continue; } // Read entry point id uint32_t entry_point_id = 0; CHECKED_READU32(p_parser, p_node->word_offset + 1, entry_point_id); // Find entry point SpvReflectEntryPoint* p_entry_point = NULL; for (size_t entry_point_idx = 0; entry_point_idx < p_module->entry_point_count; ++entry_point_idx) { if (p_module->entry_points[entry_point_idx].id == entry_point_id) { p_entry_point = &p_module->entry_points[entry_point_idx]; break; } } // Bail if entry point is null if (IsNull(p_entry_point)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ENTRY_POINT; } // Read execution mode uint32_t execution_mode = (uint32_t)INVALID_VALUE; CHECKED_READU32(p_parser, p_node->word_offset + 2, execution_mode); // Parse execution mode switch (execution_mode) { case SpvExecutionModeInvocations: { CHECKED_READU32(p_parser, p_node->word_offset + 3, p_entry_point->invocations); } break; case SpvExecutionModeLocalSize: { CHECKED_READU32(p_parser, p_node->word_offset + 3, p_entry_point->local_size.x); CHECKED_READU32(p_parser, p_node->word_offset + 4, p_entry_point->local_size.y); CHECKED_READU32(p_parser, p_node->word_offset + 5, p_entry_point->local_size.z); } break; case SpvExecutionModeLocalSizeId: { uint32_t local_size_x_id = 0; uint32_t local_size_y_id = 0; uint32_t local_size_z_id = 0; CHECKED_READU32(p_parser, p_node->word_offset + 3, local_size_x_id); CHECKED_READU32(p_parser, p_node->word_offset + 4, local_size_y_id); CHECKED_READU32(p_parser, p_node->word_offset + 5, local_size_z_id); SpvReflectPrvNode* x_node = FindNode(p_parser, local_size_x_id); SpvReflectPrvNode* y_node = FindNode(p_parser, local_size_y_id); SpvReflectPrvNode* z_node = FindNode(p_parser, local_size_z_id); if (IsNotNull(x_node) && IsNotNull(y_node) && IsNotNull(z_node)) { if (IsSpecConstant(x_node)) { p_entry_point->local_size.x = (uint32_t)SPV_REFLECT_EXECUTION_MODE_SPEC_CONSTANT; } else { CHECKED_READU32(p_parser, x_node->word_offset + 3, p_entry_point->local_size.x); } if (IsSpecConstant(y_node)) { p_entry_point->local_size.y = (uint32_t)SPV_REFLECT_EXECUTION_MODE_SPEC_CONSTANT; } else { CHECKED_READU32(p_parser, y_node->word_offset + 3, p_entry_point->local_size.y); } if (IsSpecConstant(z_node)) { p_entry_point->local_size.z = (uint32_t)SPV_REFLECT_EXECUTION_MODE_SPEC_CONSTANT; } else { CHECKED_READU32(p_parser, z_node->word_offset + 3, p_entry_point->local_size.z); } } } break; case SpvExecutionModeInputPoints: case SpvExecutionModeInputLines: case SpvExecutionModeInputLinesAdjacency: case SpvExecutionModeTriangles: case SpvExecutionModeInputTrianglesAdjacency: case SpvExecutionModeQuads: case SpvExecutionModeIsolines: case SpvExecutionModeOutputVertices: { CHECKED_READU32(p_parser, p_node->word_offset + 3, p_entry_point->output_vertices); } break; default: break; } p_entry_point->execution_mode_count++; } uint32_t* indices = (uint32_t*)calloc(p_module->entry_point_count, sizeof(indices)); if (IsNull(indices)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } for (size_t entry_point_idx = 0; entry_point_idx < p_module->entry_point_count; ++entry_point_idx) { SpvReflectEntryPoint* p_entry_point = &p_module->entry_points[entry_point_idx]; if (p_entry_point->execution_mode_count > 0) { p_entry_point->execution_modes = (SpvExecutionMode*)calloc(p_entry_point->execution_mode_count, sizeof(*p_entry_point->execution_modes)); if (IsNull(p_entry_point->execution_modes)) { SafeFree(indices); return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } } for (size_t node_idx = 0; node_idx < p_parser->node_count; ++node_idx) { SpvReflectPrvNode* p_node = &(p_parser->nodes[node_idx]); if (p_node->op != SpvOpExecutionMode) { continue; } // Read entry point id uint32_t entry_point_id = 0; CHECKED_READU32(p_parser, p_node->word_offset + 1, entry_point_id); // Find entry point SpvReflectEntryPoint* p_entry_point = NULL; uint32_t* idx = NULL; for (size_t entry_point_idx = 0; entry_point_idx < p_module->entry_point_count; ++entry_point_idx) { if (p_module->entry_points[entry_point_idx].id == entry_point_id) { p_entry_point = &p_module->entry_points[entry_point_idx]; idx = &indices[entry_point_idx]; break; } } // Read execution mode uint32_t execution_mode = (uint32_t)INVALID_VALUE; CHECKED_READU32(p_parser, p_node->word_offset + 2, execution_mode); p_entry_point->execution_modes[(*idx)++] = (SpvExecutionMode)execution_mode; } SafeFree(indices); } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParsePushConstantBlocks(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module) { for (size_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if ((p_node->op != SpvOpVariable) || (p_node->storage_class != SpvStorageClassPushConstant)) { continue; } p_module->push_constant_block_count += 1; } if (p_module->push_constant_block_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } p_module->push_constant_blocks = (SpvReflectBlockVariable*)calloc(p_module->push_constant_block_count, sizeof(*p_module->push_constant_blocks)); if (IsNull(p_module->push_constant_blocks)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } p_parser->physical_pointer_struct_count = 0; uint32_t push_constant_index = 0; for (size_t i = 0; i < p_parser->node_count; ++i) { SpvReflectPrvNode* p_node = &(p_parser->nodes[i]); if ((p_node->op != SpvOpVariable) || (p_node->storage_class != SpvStorageClassPushConstant)) { continue; } SpvReflectTypeDescription* p_type = FindType(p_module, p_node->type_id); if (IsNull(p_node) || IsNull(p_type)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } // If the type is a pointer, resolve it if (p_type->op == SpvOpTypePointer) { // Find the type's node SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id); if (IsNull(p_type_node)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } // Should be the resolved type p_type = FindType(p_module, p_type_node->type_id); if (IsNull(p_type)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } } SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id); if (IsNull(p_type_node)) { return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; } SpvReflectBlockVariable* p_push_constant = &p_module->push_constant_blocks[push_constant_index]; p_push_constant->spirv_id = p_node->result_id; p_parser->physical_pointer_count = 0; SpvReflectResult result = ParseDescriptorBlockVariable(p_parser, p_module, p_type, p_push_constant); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } for (uint32_t access_chain_index = 0; access_chain_index < p_parser->access_chain_count; ++access_chain_index) { SpvReflectPrvAccessChain* p_access_chain = &(p_parser->access_chains[access_chain_index]); // Skip any access chains that aren't touching this push constant block if (p_push_constant->spirv_id != FindBaseId(p_parser, p_access_chain)) { continue; } SpvReflectBlockVariable* p_var = (p_access_chain->base_id == p_push_constant->spirv_id) ? p_push_constant : GetRefBlkVar(p_parser, p_access_chain); result = ParseDescriptorBlockVariableUsage(p_parser, p_module, p_access_chain, 0, (SpvOp)INVALID_VALUE, p_var); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } } p_push_constant->name = p_node->name; result = ParseDescriptorBlockVariableSizes(p_parser, p_module, true, false, false, p_push_constant); if (result != SPV_REFLECT_RESULT_SUCCESS) { return result; } // Get minimum offset for whole Push Constant block // It is not valid SPIR-V to have an empty Push Constant Block p_push_constant->offset = UINT32_MAX; for (uint32_t k = 0; k < p_push_constant->member_count; ++k) { const uint32_t member_offset = p_push_constant->members[k].offset; p_push_constant->offset = Min(p_push_constant->offset, member_offset); } ++push_constant_index; } return SPV_REFLECT_RESULT_SUCCESS; } static int SortCompareDescriptorSet(const void* a, const void* b) { const SpvReflectDescriptorSet* p_elem_a = (const SpvReflectDescriptorSet*)a; const SpvReflectDescriptorSet* p_elem_b = (const SpvReflectDescriptorSet*)b; int value = (int)(p_elem_a->set) - (int)(p_elem_b->set); // We should never see duplicate descriptor set numbers in a shader; if so, a tiebreaker // would be needed here. assert(value != 0); return value; } static SpvReflectResult ParseEntrypointDescriptorSets(SpvReflectShaderModule* p_module) { // Update the entry point's sets for (uint32_t i = 0; i < p_module->entry_point_count; ++i) { SpvReflectEntryPoint* p_entry = &p_module->entry_points[i]; for (uint32_t j = 0; j < p_entry->descriptor_set_count; ++j) { SafeFree(p_entry->descriptor_sets[j].bindings); } SafeFree(p_entry->descriptor_sets); p_entry->descriptor_set_count = 0; for (uint32_t j = 0; j < p_module->descriptor_set_count; ++j) { const SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[j]; for (uint32_t k = 0; k < p_set->binding_count; ++k) { bool found = SearchSortedUint32(p_entry->used_uniforms, p_entry->used_uniform_count, p_set->bindings[k]->spirv_id); if (found) { ++p_entry->descriptor_set_count; break; } } } p_entry->descriptor_sets = NULL; if (p_entry->descriptor_set_count > 0) { p_entry->descriptor_sets = (SpvReflectDescriptorSet*)calloc(p_entry->descriptor_set_count, sizeof(*p_entry->descriptor_sets)); if (IsNull(p_entry->descriptor_sets)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } } p_entry->descriptor_set_count = 0; for (uint32_t j = 0; j < p_module->descriptor_set_count; ++j) { const SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[j]; uint32_t count = 0; for (uint32_t k = 0; k < p_set->binding_count; ++k) { bool found = SearchSortedUint32(p_entry->used_uniforms, p_entry->used_uniform_count, p_set->bindings[k]->spirv_id); if (found) { ++count; } } if (count == 0) { continue; } SpvReflectDescriptorSet* p_entry_set = &p_entry->descriptor_sets[p_entry->descriptor_set_count++]; p_entry_set->set = p_set->set; p_entry_set->bindings = (SpvReflectDescriptorBinding**)calloc(count, sizeof(*p_entry_set->bindings)); if (IsNull(p_entry_set->bindings)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } for (uint32_t k = 0; k < p_set->binding_count; ++k) { bool found = SearchSortedUint32(p_entry->used_uniforms, p_entry->used_uniform_count, p_set->bindings[k]->spirv_id); if (found) { p_entry_set->bindings[p_entry_set->binding_count++] = p_set->bindings[k]; } } } } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult ParseDescriptorSets(SpvReflectShaderModule* p_module) { // Count the descriptors in each set for (uint32_t i = 0; i < p_module->descriptor_binding_count; ++i) { SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[i]); // Look for a target set using the descriptor's set number SpvReflectDescriptorSet* p_target_set = NULL; for (uint32_t j = 0; j < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++j) { SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[j]; if (p_set->set == p_descriptor->set) { p_target_set = p_set; break; } } // If a target set isn't found, find the first available one. if (IsNull(p_target_set)) { for (uint32_t j = 0; j < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++j) { SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[j]; if (p_set->set == (uint32_t)INVALID_VALUE) { p_target_set = p_set; p_target_set->set = p_descriptor->set; break; } } } if (IsNull(p_target_set)) { return SPV_REFLECT_RESULT_ERROR_INTERNAL_ERROR; } p_target_set->binding_count += 1; } // Count the descriptor sets for (uint32_t i = 0; i < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++i) { const SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[i]; if (p_set->set != (uint32_t)INVALID_VALUE) { p_module->descriptor_set_count += 1; } } // Sort the descriptor sets based on numbers if (p_module->descriptor_set_count > 0) { qsort(p_module->descriptor_sets, p_module->descriptor_set_count, sizeof(*(p_module->descriptor_sets)), SortCompareDescriptorSet); } // Build descriptor pointer array for (uint32_t i = 0; i < p_module->descriptor_set_count; ++i) { SpvReflectDescriptorSet* p_set = &(p_module->descriptor_sets[i]); p_set->bindings = (SpvReflectDescriptorBinding**)calloc(p_set->binding_count, sizeof(*(p_set->bindings))); uint32_t descriptor_index = 0; for (uint32_t j = 0; j < p_module->descriptor_binding_count; ++j) { SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[j]); if (p_descriptor->set == p_set->set) { assert(descriptor_index < p_set->binding_count); p_set->bindings[descriptor_index] = p_descriptor; ++descriptor_index; } } } return ParseEntrypointDescriptorSets(p_module); } static SpvReflectResult DisambiguateStorageBufferSrvUav(SpvReflectShaderModule* p_module) { if (p_module->descriptor_binding_count == 0) { return SPV_REFLECT_RESULT_SUCCESS; } for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) { SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]); // Skip everything that isn't a STORAGE_BUFFER descriptor if (p_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER) { continue; } // // Vulkan doesn't disambiguate between SRVs and UAVs so they // come back as STORAGE_BUFFER. The block parsing process will // mark a block as non-writable should any member of the block // or its descendants are non-writable. // if (p_descriptor->block.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE) { p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SRV; } } return SPV_REFLECT_RESULT_SUCCESS; } static SpvReflectResult SynchronizeDescriptorSets(SpvReflectShaderModule* p_module) { // Free and reset all descriptor set numbers for (uint32_t i = 0; i < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++i) { SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[i]; SafeFree(p_set->bindings); p_set->binding_count = 0; p_set->set = (uint32_t)INVALID_VALUE; } // Set descriptor set count to zero p_module->descriptor_set_count = 0; SpvReflectResult result = ParseDescriptorSets(p_module); return result; } static SpvReflectResult CreateShaderModule(uint32_t flags, size_t size, const void* p_code, SpvReflectShaderModule* p_module) { // Initialize all module fields to zero memset(p_module, 0, sizeof(*p_module)); // Allocate module internals #ifdef __cplusplus p_module->_internal = (SpvReflectShaderModule::Internal*)calloc(1, sizeof(*(p_module->_internal))); #else p_module->_internal = calloc(1, sizeof(*(p_module->_internal))); #endif if (IsNull(p_module->_internal)) { return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } // Copy flags p_module->_internal->module_flags = flags; // Figure out if we need to copy the SPIR-V code or not if (flags & SPV_REFLECT_MODULE_FLAG_NO_COPY) { // Set internal size and pointer to args passed in p_module->_internal->spirv_size = size; #if defined(__cplusplus) p_module->_internal->spirv_code = const_cast(static_cast(p_code)); // cast that const away #else p_module->_internal->spirv_code = (void*)p_code; // cast that const away #endif p_module->_internal->spirv_word_count = (uint32_t)(size / SPIRV_WORD_SIZE); } else { // Allocate SPIR-V code storage p_module->_internal->spirv_size = size; p_module->_internal->spirv_code = (uint32_t*)calloc(1, p_module->_internal->spirv_size); p_module->_internal->spirv_word_count = (uint32_t)(size / SPIRV_WORD_SIZE); if (IsNull(p_module->_internal->spirv_code)) { SafeFree(p_module->_internal); return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; } // Copy SPIR-V to code storage memcpy(p_module->_internal->spirv_code, p_code, size); } // Initialize everything to zero SpvReflectPrvParser parser; memset(&parser, 0, sizeof(SpvReflectPrvParser)); // Create parser SpvReflectResult result = CreateParser(p_module->_internal->spirv_size, p_module->_internal->spirv_code, &parser); // Generator { const uint32_t* p_ptr = (const uint32_t*)p_module->_internal->spirv_code; p_module->generator = (SpvReflectGenerator)((*(p_ptr + 2) & 0xFFFF0000) >> 16); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseNodes(&parser); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseStrings(&parser); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseSource(&parser, p_module); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseFunctions(&parser); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseMemberCounts(&parser); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseNames(&parser); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseDecorations(&parser, p_module); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } // Start of reflection data parsing if (result == SPV_REFLECT_RESULT_SUCCESS) { p_module->source_language = parser.source_language; p_module->source_language_version = parser.source_language_version; // Zero out descriptor set data p_module->descriptor_set_count = 0; memset(p_module->descriptor_sets, 0, SPV_REFLECT_MAX_DESCRIPTOR_SETS * sizeof(*p_module->descriptor_sets)); // Initialize descriptor set numbers for (uint32_t set_number = 0; set_number < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++set_number) { p_module->descriptor_sets[set_number].set = (uint32_t)INVALID_VALUE; } } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseTypes(&parser, p_module); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseDescriptorBindings(&parser, p_module); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseDescriptorType(p_module); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseUAVCounterBindings(p_module); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseDescriptorBlocks(&parser, p_module); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParsePushConstantBlocks(&parser, p_module); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseEntryPoints(&parser, p_module); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseCapabilities(&parser, p_module); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS && p_module->entry_point_count > 0) { SpvReflectEntryPoint* p_entry = &(p_module->entry_points[0]); p_module->entry_point_name = p_entry->name; p_module->entry_point_id = p_entry->id; p_module->spirv_execution_model = p_entry->spirv_execution_model; p_module->shader_stage = p_entry->shader_stage; p_module->input_variable_count = p_entry->input_variable_count; p_module->input_variables = p_entry->input_variables; p_module->output_variable_count = p_entry->output_variable_count; p_module->output_variables = p_entry->output_variables; p_module->interface_variable_count = p_entry->interface_variable_count; p_module->interface_variables = p_entry->interface_variables; } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = DisambiguateStorageBufferSrvUav(p_module); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = SynchronizeDescriptorSets(p_module); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } if (result == SPV_REFLECT_RESULT_SUCCESS) { result = ParseExecutionModes(&parser, p_module); SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS); } // Destroy module if parse was not successful if (result != SPV_REFLECT_RESULT_SUCCESS) { spvReflectDestroyShaderModule(p_module); } DestroyParser(&parser); return result; } SpvReflectResult spvReflectCreateShaderModule(size_t size, const void* p_code, SpvReflectShaderModule* p_module) { return CreateShaderModule(0, size, p_code, p_module); } SpvReflectResult spvReflectCreateShaderModule2(uint32_t flags, size_t size, const void* p_code, SpvReflectShaderModule* p_module) { return CreateShaderModule(flags, size, p_code, p_module); } SpvReflectResult spvReflectGetShaderModule(size_t size, const void* p_code, SpvReflectShaderModule* p_module) { return spvReflectCreateShaderModule(size, p_code, p_module); } static void SafeFreeTypes(SpvReflectTypeDescription* p_type) { if (IsNull(p_type) || p_type->copied) { return; } if (IsNotNull(p_type->members)) { for (size_t i = 0; i < p_type->member_count; ++i) { SpvReflectTypeDescription* p_member = &p_type->members[i]; SafeFreeTypes(p_member); } SafeFree(p_type->members); p_type->members = NULL; } } static void SafeFreeBlockVariables(SpvReflectBlockVariable* p_block) { if (IsNull(p_block)) { return; } // We share pointers to Physical Pointer structs and don't want to double free if (p_block->flags & SPV_REFLECT_VARIABLE_FLAGS_PHYSICAL_POINTER_COPY) { return; } if (IsNotNull(p_block->members)) { for (size_t i = 0; i < p_block->member_count; ++i) { SpvReflectBlockVariable* p_member = &p_block->members[i]; SafeFreeBlockVariables(p_member); } SafeFree(p_block->members); p_block->members = NULL; } } static void SafeFreeInterfaceVariable(SpvReflectInterfaceVariable* p_interface) { if (IsNull(p_interface)) { return; } if (IsNotNull(p_interface->members)) { for (size_t i = 0; i < p_interface->member_count; ++i) { SpvReflectInterfaceVariable* p_member = &p_interface->members[i]; SafeFreeInterfaceVariable(p_member); } SafeFree(p_interface->members); p_interface->members = NULL; } } void spvReflectDestroyShaderModule(SpvReflectShaderModule* p_module) { if (IsNull(p_module->_internal)) { return; } SafeFree(p_module->source_source); // Descriptor set bindings for (size_t i = 0; i < p_module->descriptor_set_count; ++i) { SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[i]; free(p_set->bindings); } // Descriptor binding blocks for (size_t i = 0; i < p_module->descriptor_binding_count; ++i) { SpvReflectDescriptorBinding* p_descriptor = &p_module->descriptor_bindings[i]; if (IsNotNull(p_descriptor->byte_address_buffer_offsets)) { SafeFree(p_descriptor->byte_address_buffer_offsets); } SafeFreeBlockVariables(&p_descriptor->block); } SafeFree(p_module->descriptor_bindings); // Entry points for (size_t i = 0; i < p_module->entry_point_count; ++i) { SpvReflectEntryPoint* p_entry = &p_module->entry_points[i]; for (size_t j = 0; j < p_entry->interface_variable_count; j++) { SafeFreeInterfaceVariable(&p_entry->interface_variables[j]); } for (uint32_t j = 0; j < p_entry->descriptor_set_count; ++j) { SafeFree(p_entry->descriptor_sets[j].bindings); } SafeFree(p_entry->descriptor_sets); SafeFree(p_entry->input_variables); SafeFree(p_entry->output_variables); SafeFree(p_entry->interface_variables); SafeFree(p_entry->used_uniforms); SafeFree(p_entry->used_push_constants); SafeFree(p_entry->execution_modes); } SafeFree(p_module->capabilities); SafeFree(p_module->entry_points); SafeFree(p_module->spec_constants); // Push constants for (size_t i = 0; i < p_module->push_constant_block_count; ++i) { SafeFreeBlockVariables(&p_module->push_constant_blocks[i]); } SafeFree(p_module->push_constant_blocks); // Type infos for (size_t i = 0; i < p_module->_internal->type_description_count; ++i) { SpvReflectTypeDescription* p_type = &p_module->_internal->type_descriptions[i]; if (IsNotNull(p_type->members)) { SafeFreeTypes(p_type); } SafeFree(p_type->members); } SafeFree(p_module->_internal->type_descriptions); // Free SPIR-V code if there was a copy if ((p_module->_internal->module_flags & SPV_REFLECT_MODULE_FLAG_NO_COPY) == 0) { SafeFree(p_module->_internal->spirv_code); } // Free internal SafeFree(p_module->_internal); } uint32_t spvReflectGetCodeSize(const SpvReflectShaderModule* p_module) { if (IsNull(p_module)) { return 0; } return (uint32_t)(p_module->_internal->spirv_size); } const uint32_t* spvReflectGetCode(const SpvReflectShaderModule* p_module) { if (IsNull(p_module)) { return NULL; } return p_module->_internal->spirv_code; } const SpvReflectEntryPoint* spvReflectGetEntryPoint(const SpvReflectShaderModule* p_module, const char* entry_point) { if (IsNull(p_module) || IsNull(entry_point)) { return NULL; } for (uint32_t i = 0; i < p_module->entry_point_count; ++i) { if (strcmp(p_module->entry_points[i].name, entry_point) == 0) { return &p_module->entry_points[i]; } } return NULL; } SpvReflectResult spvReflectEnumerateDescriptorBindings(const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectDescriptorBinding** pp_bindings) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_count)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNotNull(pp_bindings)) { if (*p_count != p_module->descriptor_binding_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } for (uint32_t index = 0; index < *p_count; ++index) { SpvReflectDescriptorBinding* p_bindings = (SpvReflectDescriptorBinding*)&p_module->descriptor_bindings[index]; pp_bindings[index] = p_bindings; } } else { *p_count = p_module->descriptor_binding_count; } return SPV_REFLECT_RESULT_SUCCESS; } SpvReflectResult spvReflectEnumerateEntryPointDescriptorBindings(const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t* p_count, SpvReflectDescriptorBinding** pp_bindings) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_count)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); if (IsNull(p_entry)) { return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } uint32_t count = 0; for (uint32_t i = 0; i < p_module->descriptor_binding_count; ++i) { bool found = SearchSortedUint32(p_entry->used_uniforms, p_entry->used_uniform_count, p_module->descriptor_bindings[i].spirv_id); if (found) { if (IsNotNull(pp_bindings)) { if (count >= *p_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } pp_bindings[count++] = (SpvReflectDescriptorBinding*)&p_module->descriptor_bindings[i]; } else { ++count; } } } if (IsNotNull(pp_bindings)) { if (count != *p_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } } else { *p_count = count; } return SPV_REFLECT_RESULT_SUCCESS; } SpvReflectResult spvReflectEnumerateDescriptorSets(const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectDescriptorSet** pp_sets) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_count)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNotNull(pp_sets)) { if (*p_count != p_module->descriptor_set_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } for (uint32_t index = 0; index < *p_count; ++index) { SpvReflectDescriptorSet* p_set = (SpvReflectDescriptorSet*)&p_module->descriptor_sets[index]; pp_sets[index] = p_set; } } else { *p_count = p_module->descriptor_set_count; } return SPV_REFLECT_RESULT_SUCCESS; } SpvReflectResult spvReflectEnumerateEntryPointDescriptorSets(const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t* p_count, SpvReflectDescriptorSet** pp_sets) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_count)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); if (IsNull(p_entry)) { return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } if (IsNotNull(pp_sets)) { if (*p_count != p_entry->descriptor_set_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } for (uint32_t index = 0; index < *p_count; ++index) { SpvReflectDescriptorSet* p_set = (SpvReflectDescriptorSet*)&p_entry->descriptor_sets[index]; pp_sets[index] = p_set; } } else { *p_count = p_entry->descriptor_set_count; } return SPV_REFLECT_RESULT_SUCCESS; } SpvReflectResult spvReflectEnumerateInterfaceVariables(const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_count)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNotNull(pp_variables)) { if (*p_count != p_module->interface_variable_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } for (uint32_t index = 0; index < *p_count; ++index) { SpvReflectInterfaceVariable* p_var = &p_module->interface_variables[index]; pp_variables[index] = p_var; } } else { *p_count = p_module->interface_variable_count; } return SPV_REFLECT_RESULT_SUCCESS; } SpvReflectResult spvReflectEnumerateEntryPointInterfaceVariables(const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_count)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); if (IsNull(p_entry)) { return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } if (IsNotNull(pp_variables)) { if (*p_count != p_entry->interface_variable_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } for (uint32_t index = 0; index < *p_count; ++index) { SpvReflectInterfaceVariable* p_var = &p_entry->interface_variables[index]; pp_variables[index] = p_var; } } else { *p_count = p_entry->interface_variable_count; } return SPV_REFLECT_RESULT_SUCCESS; } SpvReflectResult spvReflectEnumerateInputVariables(const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_count)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNotNull(pp_variables)) { if (*p_count != p_module->input_variable_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } for (uint32_t index = 0; index < *p_count; ++index) { SpvReflectInterfaceVariable* p_var = p_module->input_variables[index]; pp_variables[index] = p_var; } } else { *p_count = p_module->input_variable_count; } return SPV_REFLECT_RESULT_SUCCESS; } SpvReflectResult spvReflectEnumerateEntryPointInputVariables(const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_count)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); if (IsNull(p_entry)) { return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } if (IsNotNull(pp_variables)) { if (*p_count != p_entry->input_variable_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } for (uint32_t index = 0; index < *p_count; ++index) { SpvReflectInterfaceVariable* p_var = p_entry->input_variables[index]; pp_variables[index] = p_var; } } else { *p_count = p_entry->input_variable_count; } return SPV_REFLECT_RESULT_SUCCESS; } SpvReflectResult spvReflectEnumerateOutputVariables(const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_count)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNotNull(pp_variables)) { if (*p_count != p_module->output_variable_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } for (uint32_t index = 0; index < *p_count; ++index) { SpvReflectInterfaceVariable* p_var = p_module->output_variables[index]; pp_variables[index] = p_var; } } else { *p_count = p_module->output_variable_count; } return SPV_REFLECT_RESULT_SUCCESS; } SpvReflectResult spvReflectEnumerateEntryPointOutputVariables(const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_count)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); if (IsNull(p_entry)) { return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } if (IsNotNull(pp_variables)) { if (*p_count != p_entry->output_variable_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } for (uint32_t index = 0; index < *p_count; ++index) { SpvReflectInterfaceVariable* p_var = p_entry->output_variables[index]; pp_variables[index] = p_var; } } else { *p_count = p_entry->output_variable_count; } return SPV_REFLECT_RESULT_SUCCESS; } SpvReflectResult spvReflectEnumeratePushConstantBlocks(const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectBlockVariable** pp_blocks) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_count)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (pp_blocks != NULL) { if (*p_count != p_module->push_constant_block_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } for (uint32_t index = 0; index < *p_count; ++index) { SpvReflectBlockVariable* p_push_constant_blocks = (SpvReflectBlockVariable*)&p_module->push_constant_blocks[index]; pp_blocks[index] = p_push_constant_blocks; } } else { *p_count = p_module->push_constant_block_count; } return SPV_REFLECT_RESULT_SUCCESS; } SpvReflectResult spvReflectEnumeratePushConstants(const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectBlockVariable** pp_blocks) { return spvReflectEnumeratePushConstantBlocks(p_module, p_count, pp_blocks); } SpvReflectResult spvReflectEnumerateEntryPointPushConstantBlocks(const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t* p_count, SpvReflectBlockVariable** pp_blocks) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_count)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); if (IsNull(p_entry)) { return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } uint32_t count = 0; for (uint32_t i = 0; i < p_module->push_constant_block_count; ++i) { bool found = SearchSortedUint32(p_entry->used_push_constants, p_entry->used_push_constant_count, p_module->push_constant_blocks[i].spirv_id); if (found) { if (IsNotNull(pp_blocks)) { if (count >= *p_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } pp_blocks[count++] = (SpvReflectBlockVariable*)&p_module->push_constant_blocks[i]; } else { ++count; } } } if (IsNotNull(pp_blocks)) { if (count != *p_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } } else { *p_count = count; } return SPV_REFLECT_RESULT_SUCCESS; } SpvReflectResult spvReflectEnumerateSpecializationConstants(const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectSpecializationConstant** pp_constants) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_count)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNotNull(pp_constants)) { if (*p_count != p_module->spec_constant_count) { return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; } for (uint32_t index = 0; index < *p_count; ++index) { SpvReflectSpecializationConstant* p_constant = (SpvReflectSpecializationConstant*)&p_module->spec_constants[index]; pp_constants[index] = p_constant; } } else { *p_count = p_module->spec_constant_count; } return SPV_REFLECT_RESULT_SUCCESS; } const SpvReflectDescriptorBinding* spvReflectGetDescriptorBinding(const SpvReflectShaderModule* p_module, uint32_t binding_number, uint32_t set_number, SpvReflectResult* p_result) { const SpvReflectDescriptorBinding* p_descriptor = NULL; if (IsNotNull(p_module)) { for (uint32_t index = 0; index < p_module->descriptor_binding_count; ++index) { const SpvReflectDescriptorBinding* p_potential = &p_module->descriptor_bindings[index]; if ((p_potential->binding == binding_number) && (p_potential->set == set_number)) { p_descriptor = p_potential; break; } } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_descriptor) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_descriptor; } const SpvReflectDescriptorBinding* spvReflectGetEntryPointDescriptorBinding(const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t binding_number, uint32_t set_number, SpvReflectResult* p_result) { const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); if (IsNull(p_entry)) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } const SpvReflectDescriptorBinding* p_descriptor = NULL; if (IsNotNull(p_module)) { for (uint32_t index = 0; index < p_module->descriptor_binding_count; ++index) { const SpvReflectDescriptorBinding* p_potential = &p_module->descriptor_bindings[index]; bool found = SearchSortedUint32(p_entry->used_uniforms, p_entry->used_uniform_count, p_potential->spirv_id); if ((p_potential->binding == binding_number) && (p_potential->set == set_number) && found) { p_descriptor = p_potential; break; } } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_descriptor) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_descriptor; } const SpvReflectDescriptorSet* spvReflectGetDescriptorSet(const SpvReflectShaderModule* p_module, uint32_t set_number, SpvReflectResult* p_result) { const SpvReflectDescriptorSet* p_set = NULL; if (IsNotNull(p_module)) { for (uint32_t index = 0; index < p_module->descriptor_set_count; ++index) { const SpvReflectDescriptorSet* p_potential = &p_module->descriptor_sets[index]; if (p_potential->set == set_number) { p_set = p_potential; } } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_set) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_set; } const SpvReflectDescriptorSet* spvReflectGetEntryPointDescriptorSet(const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t set_number, SpvReflectResult* p_result) { const SpvReflectDescriptorSet* p_set = NULL; if (IsNotNull(p_module)) { const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); if (IsNull(p_entry)) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } for (uint32_t index = 0; index < p_entry->descriptor_set_count; ++index) { const SpvReflectDescriptorSet* p_potential = &p_entry->descriptor_sets[index]; if (p_potential->set == set_number) { p_set = p_potential; } } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_set) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_set; } const SpvReflectInterfaceVariable* spvReflectGetInputVariableByLocation(const SpvReflectShaderModule* p_module, uint32_t location, SpvReflectResult* p_result) { if (location == INVALID_VALUE) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } const SpvReflectInterfaceVariable* p_var = NULL; if (IsNotNull(p_module)) { for (uint32_t index = 0; index < p_module->input_variable_count; ++index) { const SpvReflectInterfaceVariable* p_potential = p_module->input_variables[index]; if (p_potential->location == location) { p_var = p_potential; } } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_var) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_var; } const SpvReflectInterfaceVariable* spvReflectGetInputVariable(const SpvReflectShaderModule* p_module, uint32_t location, SpvReflectResult* p_result) { return spvReflectGetInputVariableByLocation(p_module, location, p_result); } const SpvReflectInterfaceVariable* spvReflectGetEntryPointInputVariableByLocation(const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t location, SpvReflectResult* p_result) { if (location == INVALID_VALUE) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } const SpvReflectInterfaceVariable* p_var = NULL; if (IsNotNull(p_module)) { const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); if (IsNull(p_entry)) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } for (uint32_t index = 0; index < p_entry->input_variable_count; ++index) { const SpvReflectInterfaceVariable* p_potential = p_entry->input_variables[index]; if (p_potential->location == location) { p_var = p_potential; } } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_var) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_var; } const SpvReflectInterfaceVariable* spvReflectGetInputVariableBySemantic(const SpvReflectShaderModule* p_module, const char* semantic, SpvReflectResult* p_result) { if (IsNull(semantic)) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } return NULL; } if (semantic[0] == '\0') { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } const SpvReflectInterfaceVariable* p_var = NULL; if (IsNotNull(p_module)) { for (uint32_t index = 0; index < p_module->input_variable_count; ++index) { const SpvReflectInterfaceVariable* p_potential = p_module->input_variables[index]; if (p_potential->semantic != NULL && strcmp(p_potential->semantic, semantic) == 0) { p_var = p_potential; } } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_var) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_var; } const SpvReflectInterfaceVariable* spvReflectGetEntryPointInputVariableBySemantic(const SpvReflectShaderModule* p_module, const char* entry_point, const char* semantic, SpvReflectResult* p_result) { if (IsNull(semantic)) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } return NULL; } if (semantic[0] == '\0') { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } const SpvReflectInterfaceVariable* p_var = NULL; if (IsNotNull(p_module)) { const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); if (IsNull(p_entry)) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } for (uint32_t index = 0; index < p_entry->input_variable_count; ++index) { const SpvReflectInterfaceVariable* p_potential = p_entry->input_variables[index]; if (p_potential->semantic != NULL && strcmp(p_potential->semantic, semantic) == 0) { p_var = p_potential; } } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_var) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_var; } const SpvReflectInterfaceVariable* spvReflectGetOutputVariableByLocation(const SpvReflectShaderModule* p_module, uint32_t location, SpvReflectResult* p_result) { if (location == INVALID_VALUE) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } const SpvReflectInterfaceVariable* p_var = NULL; if (IsNotNull(p_module)) { for (uint32_t index = 0; index < p_module->output_variable_count; ++index) { const SpvReflectInterfaceVariable* p_potential = p_module->output_variables[index]; if (p_potential->location == location) { p_var = p_potential; } } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_var) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_var; } const SpvReflectInterfaceVariable* spvReflectGetOutputVariable(const SpvReflectShaderModule* p_module, uint32_t location, SpvReflectResult* p_result) { return spvReflectGetOutputVariableByLocation(p_module, location, p_result); } const SpvReflectInterfaceVariable* spvReflectGetEntryPointOutputVariableByLocation(const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t location, SpvReflectResult* p_result) { if (location == INVALID_VALUE) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } const SpvReflectInterfaceVariable* p_var = NULL; if (IsNotNull(p_module)) { const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); if (IsNull(p_entry)) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } for (uint32_t index = 0; index < p_entry->output_variable_count; ++index) { const SpvReflectInterfaceVariable* p_potential = p_entry->output_variables[index]; if (p_potential->location == location) { p_var = p_potential; } } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_var) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_var; } const SpvReflectInterfaceVariable* spvReflectGetOutputVariableBySemantic(const SpvReflectShaderModule* p_module, const char* semantic, SpvReflectResult* p_result) { if (IsNull(semantic)) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } return NULL; } if (semantic[0] == '\0') { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } const SpvReflectInterfaceVariable* p_var = NULL; if (IsNotNull(p_module)) { for (uint32_t index = 0; index < p_module->output_variable_count; ++index) { const SpvReflectInterfaceVariable* p_potential = p_module->output_variables[index]; if (p_potential->semantic != NULL && strcmp(p_potential->semantic, semantic) == 0) { p_var = p_potential; } } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_var) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_var; } const SpvReflectInterfaceVariable* spvReflectGetEntryPointOutputVariableBySemantic(const SpvReflectShaderModule* p_module, const char* entry_point, const char* semantic, SpvReflectResult* p_result) { if (IsNull(semantic)) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } return NULL; } if (semantic[0] == '\0') { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } const SpvReflectInterfaceVariable* p_var = NULL; if (IsNotNull(p_module)) { const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); if (IsNull(p_entry)) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } for (uint32_t index = 0; index < p_entry->output_variable_count; ++index) { const SpvReflectInterfaceVariable* p_potential = p_entry->output_variables[index]; if (p_potential->semantic != NULL && strcmp(p_potential->semantic, semantic) == 0) { p_var = p_potential; } } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_var) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_var; } const SpvReflectBlockVariable* spvReflectGetPushConstantBlock(const SpvReflectShaderModule* p_module, uint32_t index, SpvReflectResult* p_result) { const SpvReflectBlockVariable* p_push_constant = NULL; if (IsNotNull(p_module)) { if (index < p_module->push_constant_block_count) { p_push_constant = &p_module->push_constant_blocks[index]; } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_push_constant) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_push_constant; } const SpvReflectBlockVariable* spvReflectGetPushConstant(const SpvReflectShaderModule* p_module, uint32_t index, SpvReflectResult* p_result) { return spvReflectGetPushConstantBlock(p_module, index, p_result); } const SpvReflectBlockVariable* spvReflectGetEntryPointPushConstantBlock(const SpvReflectShaderModule* p_module, const char* entry_point, SpvReflectResult* p_result) { const SpvReflectBlockVariable* p_push_constant = NULL; if (IsNotNull(p_module)) { const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); if (IsNull(p_entry)) { if (IsNotNull(p_result)) { *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } return NULL; } for (uint32_t i = 0; i < p_module->push_constant_block_count; ++i) { bool found = SearchSortedUint32(p_entry->used_push_constants, p_entry->used_push_constant_count, p_module->push_constant_blocks[i].spirv_id); if (found) { p_push_constant = &p_module->push_constant_blocks[i]; break; } } } if (IsNotNull(p_result)) { *p_result = IsNotNull(p_push_constant) ? SPV_REFLECT_RESULT_SUCCESS : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); } return p_push_constant; } SpvReflectResult spvReflectChangeDescriptorBindingNumbers(SpvReflectShaderModule* p_module, const SpvReflectDescriptorBinding* p_binding, uint32_t new_binding_number, uint32_t new_set_binding) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_binding)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } SpvReflectDescriptorBinding* p_target_descriptor = NULL; for (uint32_t index = 0; index < p_module->descriptor_binding_count; ++index) { if (&p_module->descriptor_bindings[index] == p_binding) { p_target_descriptor = &p_module->descriptor_bindings[index]; break; } } if (IsNotNull(p_target_descriptor)) { if (p_target_descriptor->word_offset.binding > (p_module->_internal->spirv_word_count - 1)) { return SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED; } // Binding number if (new_binding_number != (uint32_t)SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE) { uint32_t* p_code = p_module->_internal->spirv_code + p_target_descriptor->word_offset.binding; *p_code = new_binding_number; p_target_descriptor->binding = new_binding_number; } // Set number if (new_set_binding != (uint32_t)SPV_REFLECT_SET_NUMBER_DONT_CHANGE) { uint32_t* p_code = p_module->_internal->spirv_code + p_target_descriptor->word_offset.set; *p_code = new_set_binding; p_target_descriptor->set = new_set_binding; } } SpvReflectResult result = SPV_REFLECT_RESULT_SUCCESS; if (new_set_binding != (uint32_t)SPV_REFLECT_SET_NUMBER_DONT_CHANGE) { result = SynchronizeDescriptorSets(p_module); } return result; } SpvReflectResult spvReflectChangeDescriptorBindingNumber(SpvReflectShaderModule* p_module, const SpvReflectDescriptorBinding* p_descriptor_binding, uint32_t new_binding_number, uint32_t optional_new_set_number) { return spvReflectChangeDescriptorBindingNumbers(p_module, p_descriptor_binding, new_binding_number, optional_new_set_number); } SpvReflectResult spvReflectChangeDescriptorSetNumber(SpvReflectShaderModule* p_module, const SpvReflectDescriptorSet* p_set, uint32_t new_set_number) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_set)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } SpvReflectDescriptorSet* p_target_set = NULL; for (uint32_t index = 0; index < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++index) { // The descriptor sets for specific entry points might not be in this set, // so just match on set index. if (p_module->descriptor_sets[index].set == p_set->set) { p_target_set = (SpvReflectDescriptorSet*)p_set; break; } } SpvReflectResult result = SPV_REFLECT_RESULT_SUCCESS; if (IsNotNull(p_target_set) && new_set_number != (uint32_t)SPV_REFLECT_SET_NUMBER_DONT_CHANGE) { for (uint32_t index = 0; index < p_target_set->binding_count; ++index) { SpvReflectDescriptorBinding* p_descriptor = p_target_set->bindings[index]; if (p_descriptor->word_offset.set > (p_module->_internal->spirv_word_count - 1)) { return SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED; } uint32_t* p_code = p_module->_internal->spirv_code + p_descriptor->word_offset.set; *p_code = new_set_number; p_descriptor->set = new_set_number; } result = SynchronizeDescriptorSets(p_module); } return result; } static SpvReflectResult ChangeVariableLocation(SpvReflectShaderModule* p_module, SpvReflectInterfaceVariable* p_variable, uint32_t new_location) { if (p_variable->word_offset.location > (p_module->_internal->spirv_word_count - 1)) { return SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED; } uint32_t* p_code = p_module->_internal->spirv_code + p_variable->word_offset.location; *p_code = new_location; p_variable->location = new_location; return SPV_REFLECT_RESULT_SUCCESS; } SpvReflectResult spvReflectChangeInputVariableLocation(SpvReflectShaderModule* p_module, const SpvReflectInterfaceVariable* p_input_variable, uint32_t new_location) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_input_variable)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } for (uint32_t index = 0; index < p_module->input_variable_count; ++index) { if (p_module->input_variables[index] == p_input_variable) { return ChangeVariableLocation(p_module, p_module->input_variables[index], new_location); } } return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } SpvReflectResult spvReflectChangeOutputVariableLocation(SpvReflectShaderModule* p_module, const SpvReflectInterfaceVariable* p_output_variable, uint32_t new_location) { if (IsNull(p_module)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } if (IsNull(p_output_variable)) { return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; } for (uint32_t index = 0; index < p_module->output_variable_count; ++index) { if (p_module->output_variables[index] == p_output_variable) { return ChangeVariableLocation(p_module, p_module->output_variables[index], new_location); } } return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; } const char* spvReflectSourceLanguage(SpvSourceLanguage source_lang) { switch (source_lang) { case SpvSourceLanguageESSL: return "ESSL"; case SpvSourceLanguageGLSL: return "GLSL"; case SpvSourceLanguageOpenCL_C: return "OpenCL_C"; case SpvSourceLanguageOpenCL_CPP: return "OpenCL_CPP"; case SpvSourceLanguageHLSL: return "HLSL"; case SpvSourceLanguageCPP_for_OpenCL: return "CPP_for_OpenCL"; case SpvSourceLanguageSYCL: return "SYCL"; case SpvSourceLanguageHERO_C: return "Hero C"; case SpvSourceLanguageNZSL: return "NZSL"; default: break; } // The source language is SpvSourceLanguageUnknown, SpvSourceLanguageMax, or // some other value that does not correspond to a knonwn language. return "Unknown"; } const char* spvReflectBlockVariableTypeName(const SpvReflectBlockVariable* p_var) { if (p_var == NULL) { return NULL; } return p_var->type_description->type_name; } ================================================ FILE: deps/SPIRV-reflect/spirv_reflect.h ================================================ /* Copyright 2017-2022 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* VERSION HISTORY 1.0 (2018-03-27) Initial public release */ // clang-format off /*! @file spirv_reflect.h */ #ifndef SPIRV_REFLECT_H #define SPIRV_REFLECT_H #if defined(SPIRV_REFLECT_USE_SYSTEM_SPIRV_H) #include #else #include "./include/spirv/unified1/spirv.h" #endif #include #include #ifdef _MSC_VER #define SPV_REFLECT_DEPRECATED(msg_str) __declspec(deprecated("This symbol is deprecated. Details: " msg_str)) #elif defined(__clang__) #define SPV_REFLECT_DEPRECATED(msg_str) __attribute__((deprecated(msg_str))) #elif defined(__GNUC__) #if GCC_VERSION >= 40500 #define SPV_REFLECT_DEPRECATED(msg_str) __attribute__((deprecated(msg_str))) #else #define SPV_REFLECT_DEPRECATED(msg_str) __attribute__((deprecated)) #endif #else #define SPV_REFLECT_DEPRECATED(msg_str) #endif /*! @enum SpvReflectResult */ typedef enum SpvReflectResult { SPV_REFLECT_RESULT_SUCCESS, SPV_REFLECT_RESULT_NOT_READY, SPV_REFLECT_RESULT_ERROR_PARSE_FAILED, SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED, SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED, SPV_REFLECT_RESULT_ERROR_NULL_POINTER, SPV_REFLECT_RESULT_ERROR_INTERNAL_ERROR, SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH, SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND, SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_CODE_SIZE, SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_MAGIC_NUMBER, SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_EOF, SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE, SPV_REFLECT_RESULT_ERROR_SPIRV_SET_NUMBER_OVERFLOW, SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_STORAGE_CLASS, SPV_REFLECT_RESULT_ERROR_SPIRV_RECURSION, SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_INSTRUCTION, SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_BLOCK_DATA, SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_BLOCK_MEMBER_REFERENCE, SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ENTRY_POINT, SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_EXECUTION_MODE, SPV_REFLECT_RESULT_ERROR_SPIRV_MAX_RECURSIVE_EXCEEDED, } SpvReflectResult; /*! @enum SpvReflectModuleFlagBits SPV_REFLECT_MODULE_FLAG_NO_COPY - Disables copying of SPIR-V code when a SPIRV-Reflect shader module is created. It is the responsibility of the calling program to ensure that the pointer remains valid and the memory it's pointing to is not freed while SPIRV-Reflect operations are taking place. Freeing the backing memory will cause undefined behavior or most likely a crash. This is flag is intended for cases where the memory overhead of storing the copied SPIR-V is undesirable. */ typedef enum SpvReflectModuleFlagBits { SPV_REFLECT_MODULE_FLAG_NONE = 0x00000000, SPV_REFLECT_MODULE_FLAG_NO_COPY = 0x00000001, } SpvReflectModuleFlagBits; typedef uint32_t SpvReflectModuleFlags; /*! @enum SpvReflectTypeFlagBits */ typedef enum SpvReflectTypeFlagBits { SPV_REFLECT_TYPE_FLAG_UNDEFINED = 0x00000000, SPV_REFLECT_TYPE_FLAG_VOID = 0x00000001, SPV_REFLECT_TYPE_FLAG_BOOL = 0x00000002, SPV_REFLECT_TYPE_FLAG_INT = 0x00000004, SPV_REFLECT_TYPE_FLAG_FLOAT = 0x00000008, SPV_REFLECT_TYPE_FLAG_VECTOR = 0x00000100, SPV_REFLECT_TYPE_FLAG_MATRIX = 0x00000200, SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE = 0x00010000, SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLER = 0x00020000, SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLED_IMAGE = 0x00040000, SPV_REFLECT_TYPE_FLAG_EXTERNAL_BLOCK = 0x00080000, SPV_REFLECT_TYPE_FLAG_EXTERNAL_ACCELERATION_STRUCTURE = 0x00100000, SPV_REFLECT_TYPE_FLAG_EXTERNAL_MASK = 0x00FF0000, SPV_REFLECT_TYPE_FLAG_STRUCT = 0x10000000, SPV_REFLECT_TYPE_FLAG_ARRAY = 0x20000000, SPV_REFLECT_TYPE_FLAG_REF = 0x40000000, } SpvReflectTypeFlagBits; typedef uint32_t SpvReflectTypeFlags; /*! @enum SpvReflectDecorationBits NOTE: HLSL row_major and column_major decorations are reversed in SPIR-V. Meaning that matrices declrations with row_major will get reflected as column_major and vice versa. The row and column decorations get appied during the compilation. SPIRV-Reflect reads the data as is and does not make any attempt to correct it to match what's in the source. The Patch, PerVertex, and PerTask are used for Interface variables that can have array */ typedef enum SpvReflectDecorationFlagBits { SPV_REFLECT_DECORATION_NONE = 0x00000000, SPV_REFLECT_DECORATION_BLOCK = 0x00000001, SPV_REFLECT_DECORATION_BUFFER_BLOCK = 0x00000002, SPV_REFLECT_DECORATION_ROW_MAJOR = 0x00000004, SPV_REFLECT_DECORATION_COLUMN_MAJOR = 0x00000008, SPV_REFLECT_DECORATION_BUILT_IN = 0x00000010, SPV_REFLECT_DECORATION_NOPERSPECTIVE = 0x00000020, SPV_REFLECT_DECORATION_FLAT = 0x00000040, SPV_REFLECT_DECORATION_NON_WRITABLE = 0x00000080, SPV_REFLECT_DECORATION_RELAXED_PRECISION = 0x00000100, SPV_REFLECT_DECORATION_NON_READABLE = 0x00000200, SPV_REFLECT_DECORATION_PATCH = 0x00000400, SPV_REFLECT_DECORATION_PER_VERTEX = 0x00000800, SPV_REFLECT_DECORATION_PER_TASK = 0x00001000, SPV_REFLECT_DECORATION_WEIGHT_TEXTURE = 0x00002000, SPV_REFLECT_DECORATION_BLOCK_MATCH_TEXTURE = 0x00004000, } SpvReflectDecorationFlagBits; typedef uint32_t SpvReflectDecorationFlags; // Based of SPV_GOOGLE_user_type typedef enum SpvReflectUserType { SPV_REFLECT_USER_TYPE_INVALID = 0, SPV_REFLECT_USER_TYPE_CBUFFER, SPV_REFLECT_USER_TYPE_TBUFFER, SPV_REFLECT_USER_TYPE_APPEND_STRUCTURED_BUFFER, SPV_REFLECT_USER_TYPE_BUFFER, SPV_REFLECT_USER_TYPE_BYTE_ADDRESS_BUFFER, SPV_REFLECT_USER_TYPE_CONSTANT_BUFFER, SPV_REFLECT_USER_TYPE_CONSUME_STRUCTURED_BUFFER, SPV_REFLECT_USER_TYPE_INPUT_PATCH, SPV_REFLECT_USER_TYPE_OUTPUT_PATCH, SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_BUFFER, SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_BYTE_ADDRESS_BUFFER, SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_STRUCTURED_BUFFER, SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_1D, SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_1D_ARRAY, SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_2D, SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_2D_ARRAY, SPV_REFLECT_USER_TYPE_RASTERIZER_ORDERED_TEXTURE_3D, SPV_REFLECT_USER_TYPE_RAYTRACING_ACCELERATION_STRUCTURE, SPV_REFLECT_USER_TYPE_RW_BUFFER, SPV_REFLECT_USER_TYPE_RW_BYTE_ADDRESS_BUFFER, SPV_REFLECT_USER_TYPE_RW_STRUCTURED_BUFFER, SPV_REFLECT_USER_TYPE_RW_TEXTURE_1D, SPV_REFLECT_USER_TYPE_RW_TEXTURE_1D_ARRAY, SPV_REFLECT_USER_TYPE_RW_TEXTURE_2D, SPV_REFLECT_USER_TYPE_RW_TEXTURE_2D_ARRAY, SPV_REFLECT_USER_TYPE_RW_TEXTURE_3D, SPV_REFLECT_USER_TYPE_STRUCTURED_BUFFER, SPV_REFLECT_USER_TYPE_SUBPASS_INPUT, SPV_REFLECT_USER_TYPE_SUBPASS_INPUT_MS, SPV_REFLECT_USER_TYPE_TEXTURE_1D, SPV_REFLECT_USER_TYPE_TEXTURE_1D_ARRAY, SPV_REFLECT_USER_TYPE_TEXTURE_2D, SPV_REFLECT_USER_TYPE_TEXTURE_2D_ARRAY, SPV_REFLECT_USER_TYPE_TEXTURE_2DMS, SPV_REFLECT_USER_TYPE_TEXTURE_2DMS_ARRAY, SPV_REFLECT_USER_TYPE_TEXTURE_3D, SPV_REFLECT_USER_TYPE_TEXTURE_BUFFER, SPV_REFLECT_USER_TYPE_TEXTURE_CUBE, SPV_REFLECT_USER_TYPE_TEXTURE_CUBE_ARRAY, } SpvReflectUserType; /*! @enum SpvReflectResourceType */ typedef enum SpvReflectResourceType { SPV_REFLECT_RESOURCE_FLAG_UNDEFINED = 0x00000000, SPV_REFLECT_RESOURCE_FLAG_SAMPLER = 0x00000001, SPV_REFLECT_RESOURCE_FLAG_CBV = 0x00000002, SPV_REFLECT_RESOURCE_FLAG_SRV = 0x00000004, SPV_REFLECT_RESOURCE_FLAG_UAV = 0x00000008, } SpvReflectResourceType; /*! @enum SpvReflectFormat */ typedef enum SpvReflectFormat { SPV_REFLECT_FORMAT_UNDEFINED = 0, // = VK_FORMAT_UNDEFINED SPV_REFLECT_FORMAT_R16_UINT = 74, // = VK_FORMAT_R16_UINT SPV_REFLECT_FORMAT_R16_SINT = 75, // = VK_FORMAT_R16_SINT SPV_REFLECT_FORMAT_R16_SFLOAT = 76, // = VK_FORMAT_R16_SFLOAT SPV_REFLECT_FORMAT_R16G16_UINT = 81, // = VK_FORMAT_R16G16_UINT SPV_REFLECT_FORMAT_R16G16_SINT = 82, // = VK_FORMAT_R16G16_SINT SPV_REFLECT_FORMAT_R16G16_SFLOAT = 83, // = VK_FORMAT_R16G16_SFLOAT SPV_REFLECT_FORMAT_R16G16B16_UINT = 88, // = VK_FORMAT_R16G16B16_UINT SPV_REFLECT_FORMAT_R16G16B16_SINT = 89, // = VK_FORMAT_R16G16B16_SINT SPV_REFLECT_FORMAT_R16G16B16_SFLOAT = 90, // = VK_FORMAT_R16G16B16_SFLOAT SPV_REFLECT_FORMAT_R16G16B16A16_UINT = 95, // = VK_FORMAT_R16G16B16A16_UINT SPV_REFLECT_FORMAT_R16G16B16A16_SINT = 96, // = VK_FORMAT_R16G16B16A16_SINT SPV_REFLECT_FORMAT_R16G16B16A16_SFLOAT = 97, // = VK_FORMAT_R16G16B16A16_SFLOAT SPV_REFLECT_FORMAT_R32_UINT = 98, // = VK_FORMAT_R32_UINT SPV_REFLECT_FORMAT_R32_SINT = 99, // = VK_FORMAT_R32_SINT SPV_REFLECT_FORMAT_R32_SFLOAT = 100, // = VK_FORMAT_R32_SFLOAT SPV_REFLECT_FORMAT_R32G32_UINT = 101, // = VK_FORMAT_R32G32_UINT SPV_REFLECT_FORMAT_R32G32_SINT = 102, // = VK_FORMAT_R32G32_SINT SPV_REFLECT_FORMAT_R32G32_SFLOAT = 103, // = VK_FORMAT_R32G32_SFLOAT SPV_REFLECT_FORMAT_R32G32B32_UINT = 104, // = VK_FORMAT_R32G32B32_UINT SPV_REFLECT_FORMAT_R32G32B32_SINT = 105, // = VK_FORMAT_R32G32B32_SINT SPV_REFLECT_FORMAT_R32G32B32_SFLOAT = 106, // = VK_FORMAT_R32G32B32_SFLOAT SPV_REFLECT_FORMAT_R32G32B32A32_UINT = 107, // = VK_FORMAT_R32G32B32A32_UINT SPV_REFLECT_FORMAT_R32G32B32A32_SINT = 108, // = VK_FORMAT_R32G32B32A32_SINT SPV_REFLECT_FORMAT_R32G32B32A32_SFLOAT = 109, // = VK_FORMAT_R32G32B32A32_SFLOAT SPV_REFLECT_FORMAT_R64_UINT = 110, // = VK_FORMAT_R64_UINT SPV_REFLECT_FORMAT_R64_SINT = 111, // = VK_FORMAT_R64_SINT SPV_REFLECT_FORMAT_R64_SFLOAT = 112, // = VK_FORMAT_R64_SFLOAT SPV_REFLECT_FORMAT_R64G64_UINT = 113, // = VK_FORMAT_R64G64_UINT SPV_REFLECT_FORMAT_R64G64_SINT = 114, // = VK_FORMAT_R64G64_SINT SPV_REFLECT_FORMAT_R64G64_SFLOAT = 115, // = VK_FORMAT_R64G64_SFLOAT SPV_REFLECT_FORMAT_R64G64B64_UINT = 116, // = VK_FORMAT_R64G64B64_UINT SPV_REFLECT_FORMAT_R64G64B64_SINT = 117, // = VK_FORMAT_R64G64B64_SINT SPV_REFLECT_FORMAT_R64G64B64_SFLOAT = 118, // = VK_FORMAT_R64G64B64_SFLOAT SPV_REFLECT_FORMAT_R64G64B64A64_UINT = 119, // = VK_FORMAT_R64G64B64A64_UINT SPV_REFLECT_FORMAT_R64G64B64A64_SINT = 120, // = VK_FORMAT_R64G64B64A64_SINT SPV_REFLECT_FORMAT_R64G64B64A64_SFLOAT = 121, // = VK_FORMAT_R64G64B64A64_SFLOAT } SpvReflectFormat; /*! @enum SpvReflectVariableFlagBits */ enum SpvReflectVariableFlagBits{ SPV_REFLECT_VARIABLE_FLAGS_NONE = 0x00000000, SPV_REFLECT_VARIABLE_FLAGS_UNUSED = 0x00000001, // If variable points to a copy of the PhysicalStorageBuffer struct SPV_REFLECT_VARIABLE_FLAGS_PHYSICAL_POINTER_COPY = 0x00000002, }; typedef uint32_t SpvReflectVariableFlags; /*! @enum SpvReflectDescriptorType */ typedef enum SpvReflectDescriptorType { SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER = 0, // = VK_DESCRIPTOR_TYPE_SAMPLER SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1, // = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2, // = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3, // = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4, // = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5, // = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6, // = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7, // = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8, // = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, // = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, // = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000 // = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR } SpvReflectDescriptorType; /*! @enum SpvReflectShaderStageFlagBits */ typedef enum SpvReflectShaderStageFlagBits { SPV_REFLECT_SHADER_STAGE_VERTEX_BIT = 0x00000001, // = VK_SHADER_STAGE_VERTEX_BIT SPV_REFLECT_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002, // = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT SPV_REFLECT_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004, // = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT SPV_REFLECT_SHADER_STAGE_GEOMETRY_BIT = 0x00000008, // = VK_SHADER_STAGE_GEOMETRY_BIT SPV_REFLECT_SHADER_STAGE_FRAGMENT_BIT = 0x00000010, // = VK_SHADER_STAGE_FRAGMENT_BIT SPV_REFLECT_SHADER_STAGE_COMPUTE_BIT = 0x00000020, // = VK_SHADER_STAGE_COMPUTE_BIT SPV_REFLECT_SHADER_STAGE_TASK_BIT_NV = 0x00000040, // = VK_SHADER_STAGE_TASK_BIT_NV SPV_REFLECT_SHADER_STAGE_TASK_BIT_EXT = SPV_REFLECT_SHADER_STAGE_TASK_BIT_NV, // = VK_SHADER_STAGE_CALLABLE_BIT_EXT SPV_REFLECT_SHADER_STAGE_MESH_BIT_NV = 0x00000080, // = VK_SHADER_STAGE_MESH_BIT_NV SPV_REFLECT_SHADER_STAGE_MESH_BIT_EXT = SPV_REFLECT_SHADER_STAGE_MESH_BIT_NV, // = VK_SHADER_STAGE_CALLABLE_BIT_EXT SPV_REFLECT_SHADER_STAGE_RAYGEN_BIT_KHR = 0x00000100, // = VK_SHADER_STAGE_RAYGEN_BIT_KHR SPV_REFLECT_SHADER_STAGE_ANY_HIT_BIT_KHR = 0x00000200, // = VK_SHADER_STAGE_ANY_HIT_BIT_KHR SPV_REFLECT_SHADER_STAGE_CLOSEST_HIT_BIT_KHR = 0x00000400, // = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR SPV_REFLECT_SHADER_STAGE_MISS_BIT_KHR = 0x00000800, // = VK_SHADER_STAGE_MISS_BIT_KHR SPV_REFLECT_SHADER_STAGE_INTERSECTION_BIT_KHR = 0x00001000, // = VK_SHADER_STAGE_INTERSECTION_BIT_KHR SPV_REFLECT_SHADER_STAGE_CALLABLE_BIT_KHR = 0x00002000, // = VK_SHADER_STAGE_CALLABLE_BIT_KHR } SpvReflectShaderStageFlagBits; /*! @enum SpvReflectGenerator */ typedef enum SpvReflectGenerator { SPV_REFLECT_GENERATOR_KHRONOS_LLVM_SPIRV_TRANSLATOR = 6, SPV_REFLECT_GENERATOR_KHRONOS_SPIRV_TOOLS_ASSEMBLER = 7, SPV_REFLECT_GENERATOR_KHRONOS_GLSLANG_REFERENCE_FRONT_END = 8, SPV_REFLECT_GENERATOR_GOOGLE_SHADERC_OVER_GLSLANG = 13, SPV_REFLECT_GENERATOR_GOOGLE_SPIREGG = 14, SPV_REFLECT_GENERATOR_GOOGLE_RSPIRV = 15, SPV_REFLECT_GENERATOR_X_LEGEND_MESA_MESAIR_SPIRV_TRANSLATOR = 16, SPV_REFLECT_GENERATOR_KHRONOS_SPIRV_TOOLS_LINKER = 17, SPV_REFLECT_GENERATOR_WINE_VKD3D_SHADER_COMPILER = 18, SPV_REFLECT_GENERATOR_CLAY_CLAY_SHADER_COMPILER = 19, } SpvReflectGenerator; enum { SPV_REFLECT_MAX_ARRAY_DIMS = 32, SPV_REFLECT_MAX_DESCRIPTOR_SETS = 64, }; enum { SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE = ~0, SPV_REFLECT_SET_NUMBER_DONT_CHANGE = ~0 }; typedef struct SpvReflectNumericTraits { struct Scalar { uint32_t width; uint32_t signedness; } scalar; struct Vector { uint32_t component_count; } vector; struct Matrix { uint32_t column_count; uint32_t row_count; uint32_t stride; // Measured in bytes } matrix; } SpvReflectNumericTraits; typedef struct SpvReflectImageTraits { SpvDim dim; uint32_t depth; uint32_t arrayed; uint32_t ms; // 0: single-sampled; 1: multisampled uint32_t sampled; SpvImageFormat image_format; } SpvReflectImageTraits; typedef enum SpvReflectArrayDimType { SPV_REFLECT_ARRAY_DIM_RUNTIME = 0, // OpTypeRuntimeArray } SpvReflectArrayDimType; typedef struct SpvReflectArrayTraits { uint32_t dims_count; // Each entry is either: // - specialization constant dimension // - OpTypeRuntimeArray // - the array length otherwise uint32_t dims[SPV_REFLECT_MAX_ARRAY_DIMS]; // Stores Ids for dimensions that are specialization constants uint32_t spec_constant_op_ids[SPV_REFLECT_MAX_ARRAY_DIMS]; uint32_t stride; // Measured in bytes } SpvReflectArrayTraits; typedef struct SpvReflectBindingArrayTraits { uint32_t dims_count; uint32_t dims[SPV_REFLECT_MAX_ARRAY_DIMS]; } SpvReflectBindingArrayTraits; /*! @struct SpvReflectTypeDescription @brief Information about an OpType* instruction */ typedef struct SpvReflectTypeDescription { uint32_t id; SpvOp op; const char* type_name; // Non-NULL if type is member of a struct const char* struct_member_name; SpvStorageClass storage_class; SpvReflectTypeFlags type_flags; SpvReflectDecorationFlags decoration_flags; struct Traits { SpvReflectNumericTraits numeric; SpvReflectImageTraits image; SpvReflectArrayTraits array; } traits; // If underlying type is a struct (ex. array of structs) // this gives access to the OpTypeStruct struct SpvReflectTypeDescription* struct_type_description; // Some pointers to SpvReflectTypeDescription are really // just copies of another reference to the same OpType uint32_t copied; // @deprecated use struct_type_description instead uint32_t member_count; // @deprecated use struct_type_description instead struct SpvReflectTypeDescription* members; } SpvReflectTypeDescription; /*! @struct SpvReflectInterfaceVariable @brief The OpVariable that is either an Input or Output to the module */ typedef struct SpvReflectInterfaceVariable { uint32_t spirv_id; const char* name; uint32_t location; uint32_t component; SpvStorageClass storage_class; const char* semantic; SpvReflectDecorationFlags decoration_flags; SpvBuiltIn built_in; SpvReflectNumericTraits numeric; SpvReflectArrayTraits array; uint32_t member_count; struct SpvReflectInterfaceVariable* members; SpvReflectFormat format; // NOTE: SPIR-V shares type references for variables // that have the same underlying type. This means // that the same type name will appear for multiple // variables. SpvReflectTypeDescription* type_description; struct { uint32_t location; } word_offset; } SpvReflectInterfaceVariable; /*! @struct SpvReflectBlockVariable */ typedef struct SpvReflectBlockVariable { uint32_t spirv_id; const char* name; // For Push Constants, this is the lowest offset of all memebers uint32_t offset; // Measured in bytes uint32_t absolute_offset; // Measured in bytes uint32_t size; // Measured in bytes uint32_t padded_size; // Measured in bytes SpvReflectDecorationFlags decoration_flags; SpvReflectNumericTraits numeric; SpvReflectArrayTraits array; SpvReflectVariableFlags flags; uint32_t member_count; struct SpvReflectBlockVariable* members; SpvReflectTypeDescription* type_description; struct { uint32_t offset; } word_offset; } SpvReflectBlockVariable; /*! @struct SpvReflectDescriptorBinding */ typedef struct SpvReflectDescriptorBinding { uint32_t spirv_id; const char* name; uint32_t binding; uint32_t input_attachment_index; uint32_t set; SpvReflectDescriptorType descriptor_type; SpvReflectResourceType resource_type; SpvReflectImageTraits image; SpvReflectBlockVariable block; SpvReflectBindingArrayTraits array; uint32_t count; uint32_t accessed; uint32_t uav_counter_id; struct SpvReflectDescriptorBinding* uav_counter_binding; uint32_t byte_address_buffer_offset_count; uint32_t* byte_address_buffer_offsets; SpvReflectTypeDescription* type_description; struct { uint32_t binding; uint32_t set; } word_offset; SpvReflectDecorationFlags decoration_flags; // Requires SPV_GOOGLE_user_type SpvReflectUserType user_type; } SpvReflectDescriptorBinding; /*! @struct SpvReflectDescriptorSet */ typedef struct SpvReflectDescriptorSet { uint32_t set; uint32_t binding_count; SpvReflectDescriptorBinding** bindings; } SpvReflectDescriptorSet; typedef enum SpvReflectExecutionModeValue { SPV_REFLECT_EXECUTION_MODE_SPEC_CONSTANT = 0xFFFFFFFF // specialization constant } SpvReflectExecutionModeValue; /*! @struct SpvReflectEntryPoint */ typedef struct SpvReflectEntryPoint { const char* name; uint32_t id; SpvExecutionModel spirv_execution_model; SpvReflectShaderStageFlagBits shader_stage; uint32_t input_variable_count; SpvReflectInterfaceVariable** input_variables; uint32_t output_variable_count; SpvReflectInterfaceVariable** output_variables; uint32_t interface_variable_count; SpvReflectInterfaceVariable* interface_variables; uint32_t descriptor_set_count; SpvReflectDescriptorSet* descriptor_sets; uint32_t used_uniform_count; uint32_t* used_uniforms; uint32_t used_push_constant_count; uint32_t* used_push_constants; uint32_t execution_mode_count; SpvExecutionMode* execution_modes; struct LocalSize { uint32_t x; uint32_t y; uint32_t z; } local_size; uint32_t invocations; // valid for geometry uint32_t output_vertices; // valid for geometry, tesselation } SpvReflectEntryPoint; /*! @struct SpvReflectCapability */ typedef struct SpvReflectCapability { SpvCapability value; uint32_t word_offset; } SpvReflectCapability; /*! @struct SpvReflectSpecId */ typedef struct SpvReflectSpecializationConstant { uint32_t spirv_id; uint32_t constant_id; const char* name; } SpvReflectSpecializationConstant; /*! @struct SpvReflectShaderModule */ typedef struct SpvReflectShaderModule { SpvReflectGenerator generator; const char* entry_point_name; uint32_t entry_point_id; uint32_t entry_point_count; SpvReflectEntryPoint* entry_points; SpvSourceLanguage source_language; uint32_t source_language_version; const char* source_file; const char* source_source; uint32_t capability_count; SpvReflectCapability* capabilities; SpvExecutionModel spirv_execution_model; // Uses value(s) from first entry point SpvReflectShaderStageFlagBits shader_stage; // Uses value(s) from first entry point uint32_t descriptor_binding_count; // Uses value(s) from first entry point SpvReflectDescriptorBinding* descriptor_bindings; // Uses value(s) from first entry point uint32_t descriptor_set_count; // Uses value(s) from first entry point SpvReflectDescriptorSet descriptor_sets[SPV_REFLECT_MAX_DESCRIPTOR_SETS]; // Uses value(s) from first entry point uint32_t input_variable_count; // Uses value(s) from first entry point SpvReflectInterfaceVariable** input_variables; // Uses value(s) from first entry point uint32_t output_variable_count; // Uses value(s) from first entry point SpvReflectInterfaceVariable** output_variables; // Uses value(s) from first entry point uint32_t interface_variable_count; // Uses value(s) from first entry point SpvReflectInterfaceVariable* interface_variables; // Uses value(s) from first entry point uint32_t push_constant_block_count; // Uses value(s) from first entry point SpvReflectBlockVariable* push_constant_blocks; // Uses value(s) from first entry point uint32_t spec_constant_count; // Uses value(s) from first entry point SpvReflectSpecializationConstant* spec_constants; // Uses value(s) from first entry point struct Internal { SpvReflectModuleFlags module_flags; size_t spirv_size; uint32_t* spirv_code; uint32_t spirv_word_count; size_t type_description_count; SpvReflectTypeDescription* type_descriptions; } * _internal; } SpvReflectShaderModule; #if defined(__cplusplus) extern "C" { #endif /*! @fn spvReflectCreateShaderModule @param size Size in bytes of SPIR-V code. @param p_code Pointer to SPIR-V code. @param p_module Pointer to an instance of SpvReflectShaderModule. @return SPV_REFLECT_RESULT_SUCCESS on success. */ SpvReflectResult spvReflectCreateShaderModule( size_t size, const void* p_code, SpvReflectShaderModule* p_module ); /*! @fn spvReflectCreateShaderModule2 @param flags Flags for module creations. @param size Size in bytes of SPIR-V code. @param p_code Pointer to SPIR-V code. @param p_module Pointer to an instance of SpvReflectShaderModule. @return SPV_REFLECT_RESULT_SUCCESS on success. */ SpvReflectResult spvReflectCreateShaderModule2( SpvReflectModuleFlags flags, size_t size, const void* p_code, SpvReflectShaderModule* p_module ); SPV_REFLECT_DEPRECATED("renamed to spvReflectCreateShaderModule") SpvReflectResult spvReflectGetShaderModule( size_t size, const void* p_code, SpvReflectShaderModule* p_module ); /*! @fn spvReflectDestroyShaderModule @param p_module Pointer to an instance of SpvReflectShaderModule. */ void spvReflectDestroyShaderModule(SpvReflectShaderModule* p_module); /*! @fn spvReflectGetCodeSize @param p_module Pointer to an instance of SpvReflectShaderModule. @return Returns the size of the SPIR-V in bytes */ uint32_t spvReflectGetCodeSize(const SpvReflectShaderModule* p_module); /*! @fn spvReflectGetCode @param p_module Pointer to an instance of SpvReflectShaderModule. @return Returns a const pointer to the compiled SPIR-V bytecode. */ const uint32_t* spvReflectGetCode(const SpvReflectShaderModule* p_module); /*! @fn spvReflectGetEntryPoint @param p_module Pointer to an instance of SpvReflectShaderModule. @param entry_point Name of the requested entry point. @return Returns a const pointer to the requested entry point, or NULL if it's not found. */ const SpvReflectEntryPoint* spvReflectGetEntryPoint( const SpvReflectShaderModule* p_module, const char* entry_point ); /*! @fn spvReflectEnumerateDescriptorBindings @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_count If pp_bindings is NULL, the module's descriptor binding count (across all descriptor sets) will be stored here. If pp_bindings is not NULL, *p_count must contain the module's descriptor binding count. @param pp_bindings If NULL, the module's total descriptor binding count will be written to *p_count. If non-NULL, pp_bindings must point to an array with *p_count entries, where pointers to the module's descriptor bindings will be written. The caller must not free the binding pointers written to this array. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectEnumerateDescriptorBindings( const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectDescriptorBinding** pp_bindings ); /*! @fn spvReflectEnumerateEntryPointDescriptorBindings @brief Creates a listing of all descriptor bindings that are used in the static call tree of the given entry point. @param p_module Pointer to an instance of SpvReflectShaderModule. @param entry_point The name of the entry point to get the descriptor bindings for. @param p_count If pp_bindings is NULL, the entry point's descriptor binding count (across all descriptor sets) will be stored here. If pp_bindings is not NULL, *p_count must contain the entry points's descriptor binding count. @param pp_bindings If NULL, the entry point's total descriptor binding count will be written to *p_count. If non-NULL, pp_bindings must point to an array with *p_count entries, where pointers to the entry point's descriptor bindings will be written. The caller must not free the binding pointers written to this array. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectEnumerateEntryPointDescriptorBindings( const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t* p_count, SpvReflectDescriptorBinding** pp_bindings ); /*! @fn spvReflectEnumerateDescriptorSets @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_count If pp_sets is NULL, the module's descriptor set count will be stored here. If pp_sets is not NULL, *p_count must contain the module's descriptor set count. @param pp_sets If NULL, the module's total descriptor set count will be written to *p_count. If non-NULL, pp_sets must point to an array with *p_count entries, where pointers to the module's descriptor sets will be written. The caller must not free the descriptor set pointers written to this array. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectEnumerateDescriptorSets( const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectDescriptorSet** pp_sets ); /*! @fn spvReflectEnumerateEntryPointDescriptorSets @brief Creates a listing of all descriptor sets and their bindings that are used in the static call tree of a given entry point. @param p_module Pointer to an instance of SpvReflectShaderModule. @param entry_point The name of the entry point to get the descriptor bindings for. @param p_count If pp_sets is NULL, the module's descriptor set count will be stored here. If pp_sets is not NULL, *p_count must contain the module's descriptor set count. @param pp_sets If NULL, the module's total descriptor set count will be written to *p_count. If non-NULL, pp_sets must point to an array with *p_count entries, where pointers to the module's descriptor sets will be written. The caller must not free the descriptor set pointers written to this array. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectEnumerateEntryPointDescriptorSets( const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t* p_count, SpvReflectDescriptorSet** pp_sets ); /*! @fn spvReflectEnumerateInterfaceVariables @brief If the module contains multiple entry points, this will only get the interface variables for the first one. @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_count If pp_variables is NULL, the module's interface variable count will be stored here. If pp_variables is not NULL, *p_count must contain the module's interface variable count. @param pp_variables If NULL, the module's interface variable count will be written to *p_count. If non-NULL, pp_variables must point to an array with *p_count entries, where pointers to the module's interface variables will be written. The caller must not free the interface variables written to this array. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectEnumerateInterfaceVariables( const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables ); /*! @fn spvReflectEnumerateEntryPointInterfaceVariables @brief Enumerate the interface variables for a given entry point. @param entry_point The name of the entry point to get the interface variables for. @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_count If pp_variables is NULL, the entry point's interface variable count will be stored here. If pp_variables is not NULL, *p_count must contain the entry point's interface variable count. @param pp_variables If NULL, the entry point's interface variable count will be written to *p_count. If non-NULL, pp_variables must point to an array with *p_count entries, where pointers to the entry point's interface variables will be written. The caller must not free the interface variables written to this array. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectEnumerateEntryPointInterfaceVariables( const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables ); /*! @fn spvReflectEnumerateInputVariables @brief If the module contains multiple entry points, this will only get the input variables for the first one. @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_count If pp_variables is NULL, the module's input variable count will be stored here. If pp_variables is not NULL, *p_count must contain the module's input variable count. @param pp_variables If NULL, the module's input variable count will be written to *p_count. If non-NULL, pp_variables must point to an array with *p_count entries, where pointers to the module's input variables will be written. The caller must not free the interface variables written to this array. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectEnumerateInputVariables( const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables ); /*! @fn spvReflectEnumerateEntryPointInputVariables @brief Enumerate the input variables for a given entry point. @param entry_point The name of the entry point to get the input variables for. @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_count If pp_variables is NULL, the entry point's input variable count will be stored here. If pp_variables is not NULL, *p_count must contain the entry point's input variable count. @param pp_variables If NULL, the entry point's input variable count will be written to *p_count. If non-NULL, pp_variables must point to an array with *p_count entries, where pointers to the entry point's input variables will be written. The caller must not free the interface variables written to this array. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectEnumerateEntryPointInputVariables( const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables ); /*! @fn spvReflectEnumerateOutputVariables @brief Note: If the module contains multiple entry points, this will only get the output variables for the first one. @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_count If pp_variables is NULL, the module's output variable count will be stored here. If pp_variables is not NULL, *p_count must contain the module's output variable count. @param pp_variables If NULL, the module's output variable count will be written to *p_count. If non-NULL, pp_variables must point to an array with *p_count entries, where pointers to the module's output variables will be written. The caller must not free the interface variables written to this array. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectEnumerateOutputVariables( const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables ); /*! @fn spvReflectEnumerateEntryPointOutputVariables @brief Enumerate the output variables for a given entry point. @param p_module Pointer to an instance of SpvReflectShaderModule. @param entry_point The name of the entry point to get the output variables for. @param p_count If pp_variables is NULL, the entry point's output variable count will be stored here. If pp_variables is not NULL, *p_count must contain the entry point's output variable count. @param pp_variables If NULL, the entry point's output variable count will be written to *p_count. If non-NULL, pp_variables must point to an array with *p_count entries, where pointers to the entry point's output variables will be written. The caller must not free the interface variables written to this array. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectEnumerateEntryPointOutputVariables( const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables ); /*! @fn spvReflectEnumeratePushConstantBlocks @brief Note: If the module contains multiple entry points, this will only get the push constant blocks for the first one. @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_count If pp_blocks is NULL, the module's push constant block count will be stored here. If pp_blocks is not NULL, *p_count must contain the module's push constant block count. @param pp_blocks If NULL, the module's push constant block count will be written to *p_count. If non-NULL, pp_blocks must point to an array with *p_count entries, where pointers to the module's push constant blocks will be written. The caller must not free the block variables written to this array. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectEnumeratePushConstantBlocks( const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectBlockVariable** pp_blocks ); SPV_REFLECT_DEPRECATED("renamed to spvReflectEnumeratePushConstantBlocks") SpvReflectResult spvReflectEnumeratePushConstants( const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectBlockVariable** pp_blocks ); /*! @fn spvReflectEnumerateEntryPointPushConstantBlocks @brief Enumerate the push constant blocks used in the static call tree of a given entry point. @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_count If pp_blocks is NULL, the entry point's push constant block count will be stored here. If pp_blocks is not NULL, *p_count must contain the entry point's push constant block count. @param pp_blocks If NULL, the entry point's push constant block count will be written to *p_count. If non-NULL, pp_blocks must point to an array with *p_count entries, where pointers to the entry point's push constant blocks will be written. The caller must not free the block variables written to this array. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectEnumerateEntryPointPushConstantBlocks( const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t* p_count, SpvReflectBlockVariable** pp_blocks ); /*! @fn spvReflectEnumerateSpecializationConstants @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_count If pp_blocks is NULL, the module's specialization constant count will be stored here. If pp_blocks is not NULL, *p_count must contain the module's specialization constant count. @param pp_constants If NULL, the module's specialization constant count will be written to *p_count. If non-NULL, pp_blocks must point to an array with *p_count entries, where pointers to the module's specialization constant blocks will be written. The caller must not free the variables written to this array. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectEnumerateSpecializationConstants( const SpvReflectShaderModule* p_module, uint32_t* p_count, SpvReflectSpecializationConstant** pp_constants ); /*! @fn spvReflectGetDescriptorBinding @param p_module Pointer to an instance of SpvReflectShaderModule. @param binding_number The "binding" value of the requested descriptor binding. @param set_number The "set" value of the requested descriptor binding. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the module contains a descriptor binding that matches the provided [binding_number, set_number] values, a pointer to that binding is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. @note If the module contains multiple desriptor bindings with the same set and binding numbers, there are no guarantees about which binding will be returned. */ const SpvReflectDescriptorBinding* spvReflectGetDescriptorBinding( const SpvReflectShaderModule* p_module, uint32_t binding_number, uint32_t set_number, SpvReflectResult* p_result ); /*! @fn spvReflectGetEntryPointDescriptorBinding @brief Get the descriptor binding with the given binding number and set number that is used in the static call tree of a certain entry point. @param p_module Pointer to an instance of SpvReflectShaderModule. @param entry_point The entry point to get the binding from. @param binding_number The "binding" value of the requested descriptor binding. @param set_number The "set" value of the requested descriptor binding. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the entry point contains a descriptor binding that matches the provided [binding_number, set_number] values, a pointer to that binding is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. @note If the entry point contains multiple desriptor bindings with the same set and binding numbers, there are no guarantees about which binding will be returned. */ const SpvReflectDescriptorBinding* spvReflectGetEntryPointDescriptorBinding( const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t binding_number, uint32_t set_number, SpvReflectResult* p_result ); /*! @fn spvReflectGetDescriptorSet @param p_module Pointer to an instance of SpvReflectShaderModule. @param set_number The "set" value of the requested descriptor set. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the module contains a descriptor set with the provided set_number, a pointer to that set is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. */ const SpvReflectDescriptorSet* spvReflectGetDescriptorSet( const SpvReflectShaderModule* p_module, uint32_t set_number, SpvReflectResult* p_result ); /*! @fn spvReflectGetEntryPointDescriptorSet @param p_module Pointer to an instance of SpvReflectShaderModule. @param entry_point The entry point to get the descriptor set from. @param set_number The "set" value of the requested descriptor set. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the entry point contains a descriptor set with the provided set_number, a pointer to that set is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. */ const SpvReflectDescriptorSet* spvReflectGetEntryPointDescriptorSet( const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t set_number, SpvReflectResult* p_result ); /* @fn spvReflectGetInputVariableByLocation @param p_module Pointer to an instance of SpvReflectShaderModule. @param location The "location" value of the requested input variable. A location of 0xFFFFFFFF will always return NULL with *p_result == ELEMENT_NOT_FOUND. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the module contains an input interface variable with the provided location value, a pointer to that variable is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. @note */ const SpvReflectInterfaceVariable* spvReflectGetInputVariableByLocation( const SpvReflectShaderModule* p_module, uint32_t location, SpvReflectResult* p_result ); SPV_REFLECT_DEPRECATED("renamed to spvReflectGetInputVariableByLocation") const SpvReflectInterfaceVariable* spvReflectGetInputVariable( const SpvReflectShaderModule* p_module, uint32_t location, SpvReflectResult* p_result ); /* @fn spvReflectGetEntryPointInputVariableByLocation @param p_module Pointer to an instance of SpvReflectShaderModule. @param entry_point The entry point to get the input variable from. @param location The "location" value of the requested input variable. A location of 0xFFFFFFFF will always return NULL with *p_result == ELEMENT_NOT_FOUND. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the entry point contains an input interface variable with the provided location value, a pointer to that variable is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. @note */ const SpvReflectInterfaceVariable* spvReflectGetEntryPointInputVariableByLocation( const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t location, SpvReflectResult* p_result ); /* @fn spvReflectGetInputVariableBySemantic @param p_module Pointer to an instance of SpvReflectShaderModule. @param semantic The "semantic" value of the requested input variable. A semantic of NULL will return NULL. A semantic of "" will always return NULL with *p_result == ELEMENT_NOT_FOUND. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the module contains an input interface variable with the provided semantic, a pointer to that variable is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. @note */ const SpvReflectInterfaceVariable* spvReflectGetInputVariableBySemantic( const SpvReflectShaderModule* p_module, const char* semantic, SpvReflectResult* p_result ); /* @fn spvReflectGetEntryPointInputVariableBySemantic @param p_module Pointer to an instance of SpvReflectShaderModule. @param entry_point The entry point to get the input variable from. @param semantic The "semantic" value of the requested input variable. A semantic of NULL will return NULL. A semantic of "" will always return NULL with *p_result == ELEMENT_NOT_FOUND. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the entry point contains an input interface variable with the provided semantic, a pointer to that variable is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. @note */ const SpvReflectInterfaceVariable* spvReflectGetEntryPointInputVariableBySemantic( const SpvReflectShaderModule* p_module, const char* entry_point, const char* semantic, SpvReflectResult* p_result ); /* @fn spvReflectGetOutputVariableByLocation @param p_module Pointer to an instance of SpvReflectShaderModule. @param location The "location" value of the requested output variable. A location of 0xFFFFFFFF will always return NULL with *p_result == ELEMENT_NOT_FOUND. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the module contains an output interface variable with the provided location value, a pointer to that variable is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. @note */ const SpvReflectInterfaceVariable* spvReflectGetOutputVariableByLocation( const SpvReflectShaderModule* p_module, uint32_t location, SpvReflectResult* p_result ); SPV_REFLECT_DEPRECATED("renamed to spvReflectGetOutputVariableByLocation") const SpvReflectInterfaceVariable* spvReflectGetOutputVariable( const SpvReflectShaderModule* p_module, uint32_t location, SpvReflectResult* p_result ); /* @fn spvReflectGetEntryPointOutputVariableByLocation @param p_module Pointer to an instance of SpvReflectShaderModule. @param entry_point The entry point to get the output variable from. @param location The "location" value of the requested output variable. A location of 0xFFFFFFFF will always return NULL with *p_result == ELEMENT_NOT_FOUND. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the entry point contains an output interface variable with the provided location value, a pointer to that variable is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. @note */ const SpvReflectInterfaceVariable* spvReflectGetEntryPointOutputVariableByLocation( const SpvReflectShaderModule* p_module, const char* entry_point, uint32_t location, SpvReflectResult* p_result ); /* @fn spvReflectGetOutputVariableBySemantic @param p_module Pointer to an instance of SpvReflectShaderModule. @param semantic The "semantic" value of the requested output variable. A semantic of NULL will return NULL. A semantic of "" will always return NULL with *p_result == ELEMENT_NOT_FOUND. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the module contains an output interface variable with the provided semantic, a pointer to that variable is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. @note */ const SpvReflectInterfaceVariable* spvReflectGetOutputVariableBySemantic( const SpvReflectShaderModule* p_module, const char* semantic, SpvReflectResult* p_result ); /* @fn spvReflectGetEntryPointOutputVariableBySemantic @param p_module Pointer to an instance of SpvReflectShaderModule. @param entry_point The entry point to get the output variable from. @param semantic The "semantic" value of the requested output variable. A semantic of NULL will return NULL. A semantic of "" will always return NULL with *p_result == ELEMENT_NOT_FOUND. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the entry point contains an output interface variable with the provided semantic, a pointer to that variable is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. @note */ const SpvReflectInterfaceVariable* spvReflectGetEntryPointOutputVariableBySemantic( const SpvReflectShaderModule* p_module, const char* entry_point, const char* semantic, SpvReflectResult* p_result ); /*! @fn spvReflectGetPushConstantBlock @param p_module Pointer to an instance of SpvReflectShaderModule. @param index The index of the desired block within the module's array of push constant blocks. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the provided index is within range, a pointer to the corresponding push constant block is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. */ const SpvReflectBlockVariable* spvReflectGetPushConstantBlock( const SpvReflectShaderModule* p_module, uint32_t index, SpvReflectResult* p_result ); SPV_REFLECT_DEPRECATED("renamed to spvReflectGetPushConstantBlock") const SpvReflectBlockVariable* spvReflectGetPushConstant( const SpvReflectShaderModule* p_module, uint32_t index, SpvReflectResult* p_result ); /*! @fn spvReflectGetEntryPointPushConstantBlock @brief Get the push constant block corresponding to the given entry point. As by the Vulkan specification there can be no more than one push constant block used by a given entry point, so if there is one it will be returned, otherwise NULL will be returned. @param p_module Pointer to an instance of SpvReflectShaderModule. @param entry_point The entry point to get the push constant block from. @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be written to *p_result. Otherwise, a error code indicating the cause of the failure will be stored here. @return If the provided index is within range, a pointer to the corresponding push constant block is returned. The caller must not free this pointer. If no match can be found, or if an unrelated error occurs, the return value will be NULL. Detailed error results are written to *pResult. */ const SpvReflectBlockVariable* spvReflectGetEntryPointPushConstantBlock( const SpvReflectShaderModule* p_module, const char* entry_point, SpvReflectResult* p_result ); /*! @fn spvReflectChangeDescriptorBindingNumbers @brief Assign new set and/or binding numbers to a descriptor binding. In addition to updating the reflection data, this function modifies the underlying SPIR-V bytecode. The updated code can be retrieved with spvReflectGetCode(). If the binding is used in multiple entry points within the module, it will be changed in all of them. @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_binding Pointer to the descriptor binding to modify. @param new_binding_number The new binding number to assign to the provided descriptor binding. To leave the binding number unchanged, pass SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE. @param new_set_number The new set number to assign to the provided descriptor binding. Successfully changing a descriptor binding's set number invalidates all existing SpvReflectDescriptorBinding and SpvReflectDescriptorSet pointers from this module. To leave the set number unchanged, pass SPV_REFLECT_SET_NUMBER_DONT_CHANGE. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectChangeDescriptorBindingNumbers( SpvReflectShaderModule* p_module, const SpvReflectDescriptorBinding* p_binding, uint32_t new_binding_number, uint32_t new_set_number ); SPV_REFLECT_DEPRECATED("Renamed to spvReflectChangeDescriptorBindingNumbers") SpvReflectResult spvReflectChangeDescriptorBindingNumber( SpvReflectShaderModule* p_module, const SpvReflectDescriptorBinding* p_descriptor_binding, uint32_t new_binding_number, uint32_t optional_new_set_number ); /*! @fn spvReflectChangeDescriptorSetNumber @brief Assign a new set number to an entire descriptor set (including all descriptor bindings in that set). In addition to updating the reflection data, this function modifies the underlying SPIR-V bytecode. The updated code can be retrieved with spvReflectGetCode(). If the descriptor set is used in multiple entry points within the module, it will be modified in all of them. @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_set Pointer to the descriptor binding to modify. @param new_set_number The new set number to assign to the provided descriptor set, and all its descriptor bindings. Successfully changing a descriptor binding's set number invalidates all existing SpvReflectDescriptorBinding and SpvReflectDescriptorSet pointers from this module. To leave the set number unchanged, pass SPV_REFLECT_SET_NUMBER_DONT_CHANGE. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectChangeDescriptorSetNumber( SpvReflectShaderModule* p_module, const SpvReflectDescriptorSet* p_set, uint32_t new_set_number ); /*! @fn spvReflectChangeInputVariableLocation @brief Assign a new location to an input interface variable. In addition to updating the reflection data, this function modifies the underlying SPIR-V bytecode. The updated code can be retrieved with spvReflectGetCode(). It is the caller's responsibility to avoid assigning the same location to multiple input variables. If the input variable is used by multiple entry points in the module, it will be changed in all of them. @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_input_variable Pointer to the input variable to update. @param new_location The new location to assign to p_input_variable. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectChangeInputVariableLocation( SpvReflectShaderModule* p_module, const SpvReflectInterfaceVariable* p_input_variable, uint32_t new_location ); /*! @fn spvReflectChangeOutputVariableLocation @brief Assign a new location to an output interface variable. In addition to updating the reflection data, this function modifies the underlying SPIR-V bytecode. The updated code can be retrieved with spvReflectGetCode(). It is the caller's responsibility to avoid assigning the same location to multiple output variables. If the output variable is used by multiple entry points in the module, it will be changed in all of them. @param p_module Pointer to an instance of SpvReflectShaderModule. @param p_output_variable Pointer to the output variable to update. @param new_location The new location to assign to p_output_variable. @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. Otherwise, the error code indicates the cause of the failure. */ SpvReflectResult spvReflectChangeOutputVariableLocation( SpvReflectShaderModule* p_module, const SpvReflectInterfaceVariable* p_output_variable, uint32_t new_location ); /*! @fn spvReflectSourceLanguage @param source_lang The source language code. @return Returns string of source language specified in \a source_lang. The caller must not free the memory associated with this string. */ const char* spvReflectSourceLanguage(SpvSourceLanguage source_lang); /*! @fn spvReflectBlockVariableTypeName @param p_var Pointer to block variable. @return Returns string of block variable's type description type name or NULL if p_var is NULL. */ const char* spvReflectBlockVariableTypeName( const SpvReflectBlockVariable* p_var ); #if defined(__cplusplus) }; #endif #if defined(__cplusplus) && !defined(SPIRV_REFLECT_DISABLE_CPP_BINDINGS) #include #include #include namespace spv_reflect { /*! \class ShaderModule */ class ShaderModule { public: ShaderModule(); ShaderModule(size_t size, const void* p_code, SpvReflectModuleFlags flags = SPV_REFLECT_MODULE_FLAG_NONE); ShaderModule(const std::vector& code, SpvReflectModuleFlags flags = SPV_REFLECT_MODULE_FLAG_NONE); ShaderModule(const std::vector& code, SpvReflectModuleFlags flags = SPV_REFLECT_MODULE_FLAG_NONE); ~ShaderModule(); ShaderModule(ShaderModule&& other); ShaderModule& operator=(ShaderModule&& other); SpvReflectResult GetResult() const; const SpvReflectShaderModule& GetShaderModule() const; uint32_t GetCodeSize() const; const uint32_t* GetCode() const; const char* GetEntryPointName() const; const char* GetSourceFile() const; uint32_t GetEntryPointCount() const; const char* GetEntryPointName(uint32_t index) const; SpvReflectShaderStageFlagBits GetEntryPointShaderStage(uint32_t index) const; SpvReflectShaderStageFlagBits GetShaderStage() const; SPV_REFLECT_DEPRECATED("Renamed to GetShaderStage") SpvReflectShaderStageFlagBits GetVulkanShaderStage() const { return GetShaderStage(); } SpvReflectResult EnumerateDescriptorBindings(uint32_t* p_count, SpvReflectDescriptorBinding** pp_bindings) const; SpvReflectResult EnumerateEntryPointDescriptorBindings(const char* entry_point, uint32_t* p_count, SpvReflectDescriptorBinding** pp_bindings) const; SpvReflectResult EnumerateDescriptorSets( uint32_t* p_count, SpvReflectDescriptorSet** pp_sets) const ; SpvReflectResult EnumerateEntryPointDescriptorSets(const char* entry_point, uint32_t* p_count, SpvReflectDescriptorSet** pp_sets) const ; SpvReflectResult EnumerateInterfaceVariables(uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) const; SpvReflectResult EnumerateEntryPointInterfaceVariables(const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) const; SpvReflectResult EnumerateInputVariables(uint32_t* p_count,SpvReflectInterfaceVariable** pp_variables) const; SpvReflectResult EnumerateEntryPointInputVariables(const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) const; SpvReflectResult EnumerateOutputVariables(uint32_t* p_count,SpvReflectInterfaceVariable** pp_variables) const; SpvReflectResult EnumerateEntryPointOutputVariables(const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables) const; SpvReflectResult EnumeratePushConstantBlocks(uint32_t* p_count, SpvReflectBlockVariable** pp_blocks) const; SpvReflectResult EnumerateEntryPointPushConstantBlocks(const char* entry_point, uint32_t* p_count, SpvReflectBlockVariable** pp_blocks) const; SPV_REFLECT_DEPRECATED("Renamed to EnumeratePushConstantBlocks") SpvReflectResult EnumeratePushConstants(uint32_t* p_count, SpvReflectBlockVariable** pp_blocks) const { return EnumeratePushConstantBlocks(p_count, pp_blocks); } SpvReflectResult EnumerateSpecializationConstants(uint32_t* p_count, SpvReflectSpecializationConstant** pp_constants) const; const SpvReflectDescriptorBinding* GetDescriptorBinding(uint32_t binding_number, uint32_t set_number, SpvReflectResult* p_result = nullptr) const; const SpvReflectDescriptorBinding* GetEntryPointDescriptorBinding(const char* entry_point, uint32_t binding_number, uint32_t set_number, SpvReflectResult* p_result = nullptr) const; const SpvReflectDescriptorSet* GetDescriptorSet(uint32_t set_number, SpvReflectResult* p_result = nullptr) const; const SpvReflectDescriptorSet* GetEntryPointDescriptorSet(const char* entry_point, uint32_t set_number, SpvReflectResult* p_result = nullptr) const; const SpvReflectInterfaceVariable* GetInputVariableByLocation(uint32_t location, SpvReflectResult* p_result = nullptr) const; SPV_REFLECT_DEPRECATED("Renamed to GetInputVariableByLocation") const SpvReflectInterfaceVariable* GetInputVariable(uint32_t location, SpvReflectResult* p_result = nullptr) const { return GetInputVariableByLocation(location, p_result); } const SpvReflectInterfaceVariable* GetEntryPointInputVariableByLocation(const char* entry_point, uint32_t location, SpvReflectResult* p_result = nullptr) const; const SpvReflectInterfaceVariable* GetInputVariableBySemantic(const char* semantic, SpvReflectResult* p_result = nullptr) const; const SpvReflectInterfaceVariable* GetEntryPointInputVariableBySemantic(const char* entry_point, const char* semantic, SpvReflectResult* p_result = nullptr) const; const SpvReflectInterfaceVariable* GetOutputVariableByLocation(uint32_t location, SpvReflectResult* p_result = nullptr) const; SPV_REFLECT_DEPRECATED("Renamed to GetOutputVariableByLocation") const SpvReflectInterfaceVariable* GetOutputVariable(uint32_t location, SpvReflectResult* p_result = nullptr) const { return GetOutputVariableByLocation(location, p_result); } const SpvReflectInterfaceVariable* GetEntryPointOutputVariableByLocation(const char* entry_point, uint32_t location, SpvReflectResult* p_result = nullptr) const; const SpvReflectInterfaceVariable* GetOutputVariableBySemantic(const char* semantic, SpvReflectResult* p_result = nullptr) const; const SpvReflectInterfaceVariable* GetEntryPointOutputVariableBySemantic(const char* entry_point, const char* semantic, SpvReflectResult* p_result = nullptr) const; const SpvReflectBlockVariable* GetPushConstantBlock(uint32_t index, SpvReflectResult* p_result = nullptr) const; SPV_REFLECT_DEPRECATED("Renamed to GetPushConstantBlock") const SpvReflectBlockVariable* GetPushConstant(uint32_t index, SpvReflectResult* p_result = nullptr) const { return GetPushConstantBlock(index, p_result); } const SpvReflectBlockVariable* GetEntryPointPushConstantBlock(const char* entry_point, SpvReflectResult* p_result = nullptr) const; SpvReflectResult ChangeDescriptorBindingNumbers(const SpvReflectDescriptorBinding* p_binding, uint32_t new_binding_number = SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE, uint32_t optional_new_set_number = SPV_REFLECT_SET_NUMBER_DONT_CHANGE); SPV_REFLECT_DEPRECATED("Renamed to ChangeDescriptorBindingNumbers") SpvReflectResult ChangeDescriptorBindingNumber(const SpvReflectDescriptorBinding* p_binding, uint32_t new_binding_number = SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE, uint32_t new_set_number = SPV_REFLECT_SET_NUMBER_DONT_CHANGE) { return ChangeDescriptorBindingNumbers(p_binding, new_binding_number, new_set_number); } SpvReflectResult ChangeDescriptorSetNumber(const SpvReflectDescriptorSet* p_set, uint32_t new_set_number = SPV_REFLECT_SET_NUMBER_DONT_CHANGE); SpvReflectResult ChangeInputVariableLocation(const SpvReflectInterfaceVariable* p_input_variable, uint32_t new_location); SpvReflectResult ChangeOutputVariableLocation(const SpvReflectInterfaceVariable* p_output_variable, uint32_t new_location); private: // Make noncopyable ShaderModule(const ShaderModule&); ShaderModule& operator=(const ShaderModule&); private: mutable SpvReflectResult m_result = SPV_REFLECT_RESULT_NOT_READY; SpvReflectShaderModule m_module = {}; }; // ================================================================================================= // ShaderModule // ================================================================================================= /*! @fn ShaderModule */ inline ShaderModule::ShaderModule() {} /*! @fn ShaderModule @param size @param p_code */ inline ShaderModule::ShaderModule(size_t size, const void* p_code, SpvReflectModuleFlags flags) { m_result = spvReflectCreateShaderModule2( flags, size, p_code, &m_module); } /*! @fn ShaderModule @param code */ inline ShaderModule::ShaderModule(const std::vector& code, SpvReflectModuleFlags flags) { m_result = spvReflectCreateShaderModule2( flags, code.size(), code.data(), &m_module); } /*! @fn ShaderModule @param code */ inline ShaderModule::ShaderModule(const std::vector& code, SpvReflectModuleFlags flags) { m_result = spvReflectCreateShaderModule2( flags, code.size() * sizeof(uint32_t), code.data(), &m_module); } /*! @fn ~ShaderModule */ inline ShaderModule::~ShaderModule() { spvReflectDestroyShaderModule(&m_module); } inline ShaderModule::ShaderModule(ShaderModule&& other) { *this = std::move(other); } inline ShaderModule& ShaderModule::operator=(ShaderModule&& other) { m_result = std::move(other.m_result); m_module = std::move(other.m_module); other.m_module = {}; return *this; } /*! @fn GetResult @return */ inline SpvReflectResult ShaderModule::GetResult() const { return m_result; } /*! @fn GetShaderModule @return */ inline const SpvReflectShaderModule& ShaderModule::GetShaderModule() const { return m_module; } /*! @fn GetCodeSize @return */ inline uint32_t ShaderModule::GetCodeSize() const { return spvReflectGetCodeSize(&m_module); } /*! @fn GetCode @return */ inline const uint32_t* ShaderModule::GetCode() const { return spvReflectGetCode(&m_module); } /*! @fn GetEntryPoint @return Returns entry point */ inline const char* ShaderModule::GetEntryPointName() const { return this->GetEntryPointName(0); } /*! @fn GetEntryPoint @return Returns entry point */ inline const char* ShaderModule::GetSourceFile() const { return m_module.source_file; } /*! @fn GetEntryPointCount @param @return */ inline uint32_t ShaderModule::GetEntryPointCount() const { return m_module.entry_point_count; } /*! @fn GetEntryPointName @param index @return */ inline const char* ShaderModule::GetEntryPointName(uint32_t index) const { return m_module.entry_points[index].name; } /*! @fn GetEntryPointShaderStage @param index @return Returns the shader stage for the entry point at \b index */ inline SpvReflectShaderStageFlagBits ShaderModule::GetEntryPointShaderStage(uint32_t index) const { return m_module.entry_points[index].shader_stage; } /*! @fn GetShaderStage @return Returns shader stage for the first entry point */ inline SpvReflectShaderStageFlagBits ShaderModule::GetShaderStage() const { return m_module.shader_stage; } /*! @fn EnumerateDescriptorBindings @param count @param p_binding_numbers @param pp_bindings @return */ inline SpvReflectResult ShaderModule::EnumerateDescriptorBindings( uint32_t* p_count, SpvReflectDescriptorBinding** pp_bindings ) const { m_result = spvReflectEnumerateDescriptorBindings( &m_module, p_count, pp_bindings); return m_result; } /*! @fn EnumerateEntryPointDescriptorBindings @param entry_point @param count @param pp_bindings @return */ inline SpvReflectResult ShaderModule::EnumerateEntryPointDescriptorBindings( const char* entry_point, uint32_t* p_count, SpvReflectDescriptorBinding** pp_bindings ) const { m_result = spvReflectEnumerateEntryPointDescriptorBindings( &m_module, entry_point, p_count, pp_bindings); return m_result; } /*! @fn EnumerateDescriptorSets @param count @param pp_sets @return */ inline SpvReflectResult ShaderModule::EnumerateDescriptorSets( uint32_t* p_count, SpvReflectDescriptorSet** pp_sets ) const { m_result = spvReflectEnumerateDescriptorSets( &m_module, p_count, pp_sets); return m_result; } /*! @fn EnumerateEntryPointDescriptorSets @param entry_point @param count @param pp_sets @return */ inline SpvReflectResult ShaderModule::EnumerateEntryPointDescriptorSets( const char* entry_point, uint32_t* p_count, SpvReflectDescriptorSet** pp_sets ) const { m_result = spvReflectEnumerateEntryPointDescriptorSets( &m_module, entry_point, p_count, pp_sets); return m_result; } /*! @fn EnumerateInterfaceVariables @param count @param pp_variables @return */ inline SpvReflectResult ShaderModule::EnumerateInterfaceVariables( uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables ) const { m_result = spvReflectEnumerateInterfaceVariables( &m_module, p_count, pp_variables); return m_result; } /*! @fn EnumerateEntryPointInterfaceVariables @param entry_point @param count @param pp_variables @return */ inline SpvReflectResult ShaderModule::EnumerateEntryPointInterfaceVariables( const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables ) const { m_result = spvReflectEnumerateEntryPointInterfaceVariables( &m_module, entry_point, p_count, pp_variables); return m_result; } /*! @fn EnumerateInputVariables @param count @param pp_variables @return */ inline SpvReflectResult ShaderModule::EnumerateInputVariables( uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables ) const { m_result = spvReflectEnumerateInputVariables( &m_module, p_count, pp_variables); return m_result; } /*! @fn EnumerateEntryPointInputVariables @param entry_point @param count @param pp_variables @return */ inline SpvReflectResult ShaderModule::EnumerateEntryPointInputVariables( const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables ) const { m_result = spvReflectEnumerateEntryPointInputVariables( &m_module, entry_point, p_count, pp_variables); return m_result; } /*! @fn EnumerateOutputVariables @param count @param pp_variables @return */ inline SpvReflectResult ShaderModule::EnumerateOutputVariables( uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables ) const { m_result = spvReflectEnumerateOutputVariables( &m_module, p_count, pp_variables); return m_result; } /*! @fn EnumerateEntryPointOutputVariables @param entry_point @param count @param pp_variables @return */ inline SpvReflectResult ShaderModule::EnumerateEntryPointOutputVariables( const char* entry_point, uint32_t* p_count, SpvReflectInterfaceVariable** pp_variables ) const { m_result = spvReflectEnumerateEntryPointOutputVariables( &m_module, entry_point, p_count, pp_variables); return m_result; } /*! @fn EnumeratePushConstantBlocks @param count @param pp_blocks @return */ inline SpvReflectResult ShaderModule::EnumeratePushConstantBlocks( uint32_t* p_count, SpvReflectBlockVariable** pp_blocks ) const { m_result = spvReflectEnumeratePushConstantBlocks( &m_module, p_count, pp_blocks); return m_result; } /*! @fn EnumerateSpecializationConstants @param p_count @param pp_constants @return */ inline SpvReflectResult ShaderModule::EnumerateSpecializationConstants( uint32_t* p_count, SpvReflectSpecializationConstant** pp_constants ) const { m_result = spvReflectEnumerateSpecializationConstants( &m_module, p_count, pp_constants ); return m_result; } /*! @fn EnumerateEntryPointPushConstantBlocks @param entry_point @param count @param pp_blocks @return */ inline SpvReflectResult ShaderModule::EnumerateEntryPointPushConstantBlocks( const char* entry_point, uint32_t* p_count, SpvReflectBlockVariable** pp_blocks ) const { m_result = spvReflectEnumerateEntryPointPushConstantBlocks( &m_module, entry_point, p_count, pp_blocks); return m_result; } /*! @fn GetDescriptorBinding @param binding_number @param set_number @param p_result @return */ inline const SpvReflectDescriptorBinding* ShaderModule::GetDescriptorBinding( uint32_t binding_number, uint32_t set_number, SpvReflectResult* p_result ) const { return spvReflectGetDescriptorBinding( &m_module, binding_number, set_number, p_result); } /*! @fn GetEntryPointDescriptorBinding @param entry_point @param binding_number @param set_number @param p_result @return */ inline const SpvReflectDescriptorBinding* ShaderModule::GetEntryPointDescriptorBinding( const char* entry_point, uint32_t binding_number, uint32_t set_number, SpvReflectResult* p_result ) const { return spvReflectGetEntryPointDescriptorBinding( &m_module, entry_point, binding_number, set_number, p_result); } /*! @fn GetDescriptorSet @param set_number @param p_result @return */ inline const SpvReflectDescriptorSet* ShaderModule::GetDescriptorSet( uint32_t set_number, SpvReflectResult* p_result ) const { return spvReflectGetDescriptorSet( &m_module, set_number, p_result); } /*! @fn GetEntryPointDescriptorSet @param entry_point @param set_number @param p_result @return */ inline const SpvReflectDescriptorSet* ShaderModule::GetEntryPointDescriptorSet( const char* entry_point, uint32_t set_number, SpvReflectResult* p_result ) const { return spvReflectGetEntryPointDescriptorSet( &m_module, entry_point, set_number, p_result); } /*! @fn GetInputVariable @param location @param p_result @return */ inline const SpvReflectInterfaceVariable* ShaderModule::GetInputVariableByLocation( uint32_t location, SpvReflectResult* p_result ) const { return spvReflectGetInputVariableByLocation( &m_module, location, p_result); } inline const SpvReflectInterfaceVariable* ShaderModule::GetInputVariableBySemantic( const char* semantic, SpvReflectResult* p_result ) const { return spvReflectGetInputVariableBySemantic( &m_module, semantic, p_result); } /*! @fn GetEntryPointInputVariable @param entry_point @param location @param p_result @return */ inline const SpvReflectInterfaceVariable* ShaderModule::GetEntryPointInputVariableByLocation( const char* entry_point, uint32_t location, SpvReflectResult* p_result ) const { return spvReflectGetEntryPointInputVariableByLocation( &m_module, entry_point, location, p_result); } inline const SpvReflectInterfaceVariable* ShaderModule::GetEntryPointInputVariableBySemantic( const char* entry_point, const char* semantic, SpvReflectResult* p_result ) const { return spvReflectGetEntryPointInputVariableBySemantic( &m_module, entry_point, semantic, p_result); } /*! @fn GetOutputVariable @param location @param p_result @return */ inline const SpvReflectInterfaceVariable* ShaderModule::GetOutputVariableByLocation( uint32_t location, SpvReflectResult* p_result ) const { return spvReflectGetOutputVariableByLocation( &m_module, location, p_result); } inline const SpvReflectInterfaceVariable* ShaderModule::GetOutputVariableBySemantic( const char* semantic, SpvReflectResult* p_result ) const { return spvReflectGetOutputVariableBySemantic(&m_module, semantic, p_result); } /*! @fn GetEntryPointOutputVariable @param entry_point @param location @param p_result @return */ inline const SpvReflectInterfaceVariable* ShaderModule::GetEntryPointOutputVariableByLocation( const char* entry_point, uint32_t location, SpvReflectResult* p_result ) const { return spvReflectGetEntryPointOutputVariableByLocation( &m_module, entry_point, location, p_result); } inline const SpvReflectInterfaceVariable* ShaderModule::GetEntryPointOutputVariableBySemantic( const char* entry_point, const char* semantic, SpvReflectResult* p_result ) const { return spvReflectGetEntryPointOutputVariableBySemantic( &m_module, entry_point, semantic, p_result); } /*! @fn GetPushConstant @param index @param p_result @return */ inline const SpvReflectBlockVariable* ShaderModule::GetPushConstantBlock( uint32_t index, SpvReflectResult* p_result ) const { return spvReflectGetPushConstantBlock( &m_module, index, p_result); } /*! @fn GetEntryPointPushConstant @param entry_point @param index @param p_result @return */ inline const SpvReflectBlockVariable* ShaderModule::GetEntryPointPushConstantBlock( const char* entry_point, SpvReflectResult* p_result ) const { return spvReflectGetEntryPointPushConstantBlock( &m_module, entry_point, p_result); } /*! @fn ChangeDescriptorBindingNumbers @param p_binding @param new_binding_number @param new_set_number @return */ inline SpvReflectResult ShaderModule::ChangeDescriptorBindingNumbers( const SpvReflectDescriptorBinding* p_binding, uint32_t new_binding_number, uint32_t new_set_number ) { return spvReflectChangeDescriptorBindingNumbers( &m_module, p_binding, new_binding_number, new_set_number); } /*! @fn ChangeDescriptorSetNumber @param p_set @param new_set_number @return */ inline SpvReflectResult ShaderModule::ChangeDescriptorSetNumber( const SpvReflectDescriptorSet* p_set, uint32_t new_set_number ) { return spvReflectChangeDescriptorSetNumber( &m_module, p_set, new_set_number); } /*! @fn ChangeInputVariableLocation @param p_input_variable @param new_location @return */ inline SpvReflectResult ShaderModule::ChangeInputVariableLocation( const SpvReflectInterfaceVariable* p_input_variable, uint32_t new_location) { return spvReflectChangeInputVariableLocation( &m_module, p_input_variable, new_location); } /*! @fn ChangeOutputVariableLocation @param p_input_variable @param new_location @return */ inline SpvReflectResult ShaderModule::ChangeOutputVariableLocation( const SpvReflectInterfaceVariable* p_output_variable, uint32_t new_location) { return spvReflectChangeOutputVariableLocation( &m_module, p_output_variable, new_location); } } // namespace spv_reflect #endif // defined(__cplusplus) && !defined(SPIRV_REFLECT_DISABLE_CPP_WRAPPER) #endif // SPIRV_REFLECT_H // clang-format on ================================================ FILE: deps/metal-cpp/MetalSingleHeader.hpp ================================================ // // Metal.hpp // // Autogenerated on October 02, 2025. // // Copyright 2020-2024 Apple Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #pragma once #define _NS_WEAK_IMPORT __attribute__((weak_import)) #ifdef METALCPP_SYMBOL_VISIBILITY_HIDDEN #define _NS_EXPORT __attribute__((visibility("hidden"))) #else #define _NS_EXPORT __attribute__((visibility("default"))) #endif // METALCPP_SYMBOL_VISIBILITY_HIDDEN #define _NS_EXTERN extern "C" _NS_EXPORT #define _NS_INLINE inline __attribute__((always_inline)) #define _NS_PACKED __attribute__((packed)) #define _NS_CONST(type, name) _NS_EXTERN type const name #define _NS_ENUM(type, name) enum name : type #define _NS_OPTIONS(type, name) \ using name = type; \ enum : name #define _NS_CAST_TO_UINT(value) static_cast(value) #define _NS_VALIDATE_SIZE(ns, name) static_assert(sizeof(ns::name) == sizeof(ns##name), "size mismatch " #ns "::" #name) #define _NS_VALIDATE_ENUM(ns, name) static_assert(_NS_CAST_TO_UINT(ns::name) == _NS_CAST_TO_UINT(ns##name), "value mismatch " #ns "::" #name) #include #define _NS_PRIVATE_CLS(symbol) (Private::Class::s_k##symbol) #define _NS_PRIVATE_SEL(accessor) (Private::Selector::s_k##accessor) #if defined(NS_PRIVATE_IMPLEMENTATION) #include namespace NS::Private { template inline _Type const LoadSymbol(const char* pSymbol) { const _Type* pAddress = static_cast<_Type*>(dlsym(RTLD_DEFAULT, pSymbol)); return pAddress ? *pAddress : _Type(); } } // NS::Private #ifdef METALCPP_SYMBOL_VISIBILITY_HIDDEN #define _NS_PRIVATE_VISIBILITY __attribute__((visibility("hidden"))) #else #define _NS_PRIVATE_VISIBILITY __attribute__((visibility("default"))) #endif // METALCPP_SYMBOL_VISIBILITY_HIDDEN #define _NS_PRIVATE_IMPORT __attribute__((weak_import)) #ifdef __OBJC__ #define _NS_PRIVATE_OBJC_LOOKUP_CLASS(symbol) ((__bridge void*)objc_lookUpClass(#symbol)) #define _NS_PRIVATE_OBJC_GET_PROTOCOL(symbol) ((__bridge void*)objc_getProtocol(#symbol)) #else #define _NS_PRIVATE_OBJC_LOOKUP_CLASS(symbol) objc_lookUpClass(#symbol) #define _NS_PRIVATE_OBJC_GET_PROTOCOL(symbol) objc_getProtocol(#symbol) #endif // __OBJC__ #define _NS_PRIVATE_DEF_CLS(symbol) void* s_k##symbol _NS_PRIVATE_VISIBILITY = _NS_PRIVATE_OBJC_LOOKUP_CLASS(symbol) #define _NS_PRIVATE_DEF_PRO(symbol) void* s_k##symbol _NS_PRIVATE_VISIBILITY = _NS_PRIVATE_OBJC_GET_PROTOCOL(symbol) #define _NS_PRIVATE_DEF_SEL(accessor, symbol) SEL s_k##accessor _NS_PRIVATE_VISIBILITY = sel_registerName(symbol) #if defined(__MAC_15_0) || defined(__IPHONE_18_0) || defined(__TVOS_18_0) #define _NS_PRIVATE_DEF_CONST(type, symbol) \ _NS_EXTERN type const NS##symbol _NS_PRIVATE_IMPORT; \ type const NS::symbol = (nullptr != &NS##symbol) ? NS##symbol : type() #else #define _NS_PRIVATE_DEF_CONST(type, symbol) \ _NS_EXTERN type const MTL##symbol _NS_PRIVATE_IMPORT; \ type const NS::symbol = Private::LoadSymbol("NS" #symbol) #endif #else #define _NS_PRIVATE_DEF_CLS(symbol) extern void* s_k##symbol #define _NS_PRIVATE_DEF_PRO(symbol) extern void* s_k##symbol #define _NS_PRIVATE_DEF_SEL(accessor, symbol) extern SEL s_k##accessor #define _NS_PRIVATE_DEF_CONST(type, symbol) extern type const NS::symbol #endif // NS_PRIVATE_IMPLEMENTATION namespace NS { namespace Private { namespace Class { _NS_PRIVATE_DEF_CLS(NSArray); _NS_PRIVATE_DEF_CLS(NSAutoreleasePool); _NS_PRIVATE_DEF_CLS(NSBundle); _NS_PRIVATE_DEF_CLS(NSCondition); _NS_PRIVATE_DEF_CLS(NSDate); _NS_PRIVATE_DEF_CLS(NSDictionary); _NS_PRIVATE_DEF_CLS(NSError); _NS_PRIVATE_DEF_CLS(NSNotificationCenter); _NS_PRIVATE_DEF_CLS(NSNumber); _NS_PRIVATE_DEF_CLS(NSObject); _NS_PRIVATE_DEF_CLS(NSProcessInfo); _NS_PRIVATE_DEF_CLS(NSSet); _NS_PRIVATE_DEF_CLS(NSString); _NS_PRIVATE_DEF_CLS(NSURL); _NS_PRIVATE_DEF_CLS(NSValue); } // Class } // Private } // MTL namespace NS { namespace Private { namespace Protocol { } // Protocol } // Private } // NS namespace NS { namespace Private { namespace Selector { _NS_PRIVATE_DEF_SEL(addObject_, "addObject:"); _NS_PRIVATE_DEF_SEL(addObserverName_object_queue_block_, "addObserverForName:object:queue:usingBlock:"); _NS_PRIVATE_DEF_SEL(activeProcessorCount, "activeProcessorCount"); _NS_PRIVATE_DEF_SEL(allBundles, "allBundles"); _NS_PRIVATE_DEF_SEL(allFrameworks, "allFrameworks"); _NS_PRIVATE_DEF_SEL(allObjects, "allObjects"); _NS_PRIVATE_DEF_SEL(alloc, "alloc"); _NS_PRIVATE_DEF_SEL(appStoreReceiptURL, "appStoreReceiptURL"); _NS_PRIVATE_DEF_SEL(arguments, "arguments"); _NS_PRIVATE_DEF_SEL(array, "array"); _NS_PRIVATE_DEF_SEL(arrayWithObject_, "arrayWithObject:"); _NS_PRIVATE_DEF_SEL(arrayWithObjects_count_, "arrayWithObjects:count:"); _NS_PRIVATE_DEF_SEL(automaticTerminationSupportEnabled, "automaticTerminationSupportEnabled"); _NS_PRIVATE_DEF_SEL(autorelease, "autorelease"); _NS_PRIVATE_DEF_SEL(beginActivityWithOptions_reason_, "beginActivityWithOptions:reason:"); _NS_PRIVATE_DEF_SEL(boolValue, "boolValue"); _NS_PRIVATE_DEF_SEL(broadcast, "broadcast"); _NS_PRIVATE_DEF_SEL(builtInPlugInsPath, "builtInPlugInsPath"); _NS_PRIVATE_DEF_SEL(builtInPlugInsURL, "builtInPlugInsURL"); _NS_PRIVATE_DEF_SEL(bundleIdentifier, "bundleIdentifier"); _NS_PRIVATE_DEF_SEL(bundlePath, "bundlePath"); _NS_PRIVATE_DEF_SEL(bundleURL, "bundleURL"); _NS_PRIVATE_DEF_SEL(bundleWithPath_, "bundleWithPath:"); _NS_PRIVATE_DEF_SEL(bundleWithURL_, "bundleWithURL:"); _NS_PRIVATE_DEF_SEL(caseInsensitiveCompare_, "caseInsensitiveCompare:"); _NS_PRIVATE_DEF_SEL(characterAtIndex_, "characterAtIndex:"); _NS_PRIVATE_DEF_SEL(charValue, "charValue"); _NS_PRIVATE_DEF_SEL(countByEnumeratingWithState_objects_count_, "countByEnumeratingWithState:objects:count:"); _NS_PRIVATE_DEF_SEL(cStringUsingEncoding_, "cStringUsingEncoding:"); _NS_PRIVATE_DEF_SEL(code, "code"); _NS_PRIVATE_DEF_SEL(compare_, "compare:"); _NS_PRIVATE_DEF_SEL(copy, "copy"); _NS_PRIVATE_DEF_SEL(count, "count"); _NS_PRIVATE_DEF_SEL(dateWithTimeIntervalSinceNow_, "dateWithTimeIntervalSinceNow:"); _NS_PRIVATE_DEF_SEL(defaultCenter, "defaultCenter"); _NS_PRIVATE_DEF_SEL(descriptionWithLocale_, "descriptionWithLocale:"); _NS_PRIVATE_DEF_SEL(disableAutomaticTermination_, "disableAutomaticTermination:"); _NS_PRIVATE_DEF_SEL(disableSuddenTermination, "disableSuddenTermination"); _NS_PRIVATE_DEF_SEL(debugDescription, "debugDescription"); _NS_PRIVATE_DEF_SEL(description, "description"); _NS_PRIVATE_DEF_SEL(dictionary, "dictionary"); _NS_PRIVATE_DEF_SEL(dictionaryWithObject_forKey_, "dictionaryWithObject:forKey:"); _NS_PRIVATE_DEF_SEL(dictionaryWithObjects_forKeys_count_, "dictionaryWithObjects:forKeys:count:"); _NS_PRIVATE_DEF_SEL(domain, "domain"); _NS_PRIVATE_DEF_SEL(doubleValue, "doubleValue"); _NS_PRIVATE_DEF_SEL(drain, "drain"); _NS_PRIVATE_DEF_SEL(enableAutomaticTermination_, "enableAutomaticTermination:"); _NS_PRIVATE_DEF_SEL(enableSuddenTermination, "enableSuddenTermination"); _NS_PRIVATE_DEF_SEL(endActivity_, "endActivity:"); _NS_PRIVATE_DEF_SEL(environment, "environment"); _NS_PRIVATE_DEF_SEL(errorWithDomain_code_userInfo_, "errorWithDomain:code:userInfo:"); _NS_PRIVATE_DEF_SEL(executablePath, "executablePath"); _NS_PRIVATE_DEF_SEL(executableURL, "executableURL"); _NS_PRIVATE_DEF_SEL(fileSystemRepresentation, "fileSystemRepresentation"); _NS_PRIVATE_DEF_SEL(fileURLWithPath_, "fileURLWithPath:"); _NS_PRIVATE_DEF_SEL(floatValue, "floatValue"); _NS_PRIVATE_DEF_SEL(fullUserName, "fullUserName"); _NS_PRIVATE_DEF_SEL(getValue_size_, "getValue:size:"); _NS_PRIVATE_DEF_SEL(globallyUniqueString, "globallyUniqueString"); _NS_PRIVATE_DEF_SEL(hash, "hash"); _NS_PRIVATE_DEF_SEL(hasPerformanceProfile_, "hasPerformanceProfile:"); _NS_PRIVATE_DEF_SEL(hostName, "hostName"); _NS_PRIVATE_DEF_SEL(infoDictionary, "infoDictionary"); _NS_PRIVATE_DEF_SEL(init, "init"); _NS_PRIVATE_DEF_SEL(initFileURLWithPath_, "initFileURLWithPath:"); _NS_PRIVATE_DEF_SEL(initWithBool_, "initWithBool:"); _NS_PRIVATE_DEF_SEL(initWithBytes_objCType_, "initWithBytes:objCType:"); _NS_PRIVATE_DEF_SEL(initWithBytesNoCopy_length_encoding_freeWhenDone_, "initWithBytesNoCopy:length:encoding:freeWhenDone:"); _NS_PRIVATE_DEF_SEL(initWithChar_, "initWithChar:"); _NS_PRIVATE_DEF_SEL(initWithCoder_, "initWithCoder:"); _NS_PRIVATE_DEF_SEL(initWithCString_encoding_, "initWithCString:encoding:"); _NS_PRIVATE_DEF_SEL(initWithDomain_code_userInfo_, "initWithDomain:code:userInfo:"); _NS_PRIVATE_DEF_SEL(initWithDouble_, "initWithDouble:"); _NS_PRIVATE_DEF_SEL(initWithFloat_, "initWithFloat:"); _NS_PRIVATE_DEF_SEL(initWithInt_, "initWithInt:"); _NS_PRIVATE_DEF_SEL(initWithLong_, "initWithLong:"); _NS_PRIVATE_DEF_SEL(initWithLongLong_, "initWithLongLong:"); _NS_PRIVATE_DEF_SEL(initWithObjects_count_, "initWithObjects:count:"); _NS_PRIVATE_DEF_SEL(initWithObjects_forKeys_count_, "initWithObjects:forKeys:count:"); _NS_PRIVATE_DEF_SEL(initWithPath_, "initWithPath:"); _NS_PRIVATE_DEF_SEL(initWithShort_, "initWithShort:"); _NS_PRIVATE_DEF_SEL(initWithString_, "initWithString:"); _NS_PRIVATE_DEF_SEL(initWithUnsignedChar_, "initWithUnsignedChar:"); _NS_PRIVATE_DEF_SEL(initWithUnsignedInt_, "initWithUnsignedInt:"); _NS_PRIVATE_DEF_SEL(initWithUnsignedLong_, "initWithUnsignedLong:"); _NS_PRIVATE_DEF_SEL(initWithUnsignedLongLong_, "initWithUnsignedLongLong:"); _NS_PRIVATE_DEF_SEL(initWithUnsignedShort_, "initWithUnsignedShort:"); _NS_PRIVATE_DEF_SEL(initWithURL_, "initWithURL:"); _NS_PRIVATE_DEF_SEL(integerValue, "integerValue"); _NS_PRIVATE_DEF_SEL(intValue, "intValue"); _NS_PRIVATE_DEF_SEL(isDeviceCertified_, "isDeviceCertifiedFor:"); _NS_PRIVATE_DEF_SEL(isEqual_, "isEqual:"); _NS_PRIVATE_DEF_SEL(isEqualToNumber_, "isEqualToNumber:"); _NS_PRIVATE_DEF_SEL(isEqualToString_, "isEqualToString:"); _NS_PRIVATE_DEF_SEL(isEqualToValue_, "isEqualToValue:"); _NS_PRIVATE_DEF_SEL(isiOSAppOnMac, "isiOSAppOnMac"); _NS_PRIVATE_DEF_SEL(isLoaded, "isLoaded"); _NS_PRIVATE_DEF_SEL(isLowPowerModeEnabled, "isLowPowerModeEnabled"); _NS_PRIVATE_DEF_SEL(isMacCatalystApp, "isMacCatalystApp"); _NS_PRIVATE_DEF_SEL(isOperatingSystemAtLeastVersion_, "isOperatingSystemAtLeastVersion:"); _NS_PRIVATE_DEF_SEL(keyEnumerator, "keyEnumerator"); _NS_PRIVATE_DEF_SEL(length, "length"); _NS_PRIVATE_DEF_SEL(lengthOfBytesUsingEncoding_, "lengthOfBytesUsingEncoding:"); _NS_PRIVATE_DEF_SEL(load, "load"); _NS_PRIVATE_DEF_SEL(loadAndReturnError_, "loadAndReturnError:"); _NS_PRIVATE_DEF_SEL(localizedDescription, "localizedDescription"); _NS_PRIVATE_DEF_SEL(localizedFailureReason, "localizedFailureReason"); _NS_PRIVATE_DEF_SEL(localizedInfoDictionary, "localizedInfoDictionary"); _NS_PRIVATE_DEF_SEL(localizedRecoveryOptions, "localizedRecoveryOptions"); _NS_PRIVATE_DEF_SEL(localizedRecoverySuggestion, "localizedRecoverySuggestion"); _NS_PRIVATE_DEF_SEL(localizedStringForKey_value_table_, "localizedStringForKey:value:table:"); _NS_PRIVATE_DEF_SEL(lock, "lock"); _NS_PRIVATE_DEF_SEL(longValue, "longValue"); _NS_PRIVATE_DEF_SEL(longLongValue, "longLongValue"); _NS_PRIVATE_DEF_SEL(mainBundle, "mainBundle"); _NS_PRIVATE_DEF_SEL(maximumLengthOfBytesUsingEncoding_, "maximumLengthOfBytesUsingEncoding:"); _NS_PRIVATE_DEF_SEL(methodSignatureForSelector_, "methodSignatureForSelector:"); _NS_PRIVATE_DEF_SEL(mutableBytes, "mutableBytes"); _NS_PRIVATE_DEF_SEL(name, "name"); _NS_PRIVATE_DEF_SEL(nextObject, "nextObject"); _NS_PRIVATE_DEF_SEL(numberWithBool_, "numberWithBool:"); _NS_PRIVATE_DEF_SEL(numberWithChar_, "numberWithChar:"); _NS_PRIVATE_DEF_SEL(numberWithDouble_, "numberWithDouble:"); _NS_PRIVATE_DEF_SEL(numberWithFloat_, "numberWithFloat:"); _NS_PRIVATE_DEF_SEL(numberWithInt_, "numberWithInt:"); _NS_PRIVATE_DEF_SEL(numberWithLong_, "numberWithLong:"); _NS_PRIVATE_DEF_SEL(numberWithLongLong_, "numberWithLongLong:"); _NS_PRIVATE_DEF_SEL(numberWithShort_, "numberWithShort:"); _NS_PRIVATE_DEF_SEL(numberWithUnsignedChar_, "numberWithUnsignedChar:"); _NS_PRIVATE_DEF_SEL(numberWithUnsignedInt_, "numberWithUnsignedInt:"); _NS_PRIVATE_DEF_SEL(numberWithUnsignedLong_, "numberWithUnsignedLong:"); _NS_PRIVATE_DEF_SEL(numberWithUnsignedLongLong_, "numberWithUnsignedLongLong:"); _NS_PRIVATE_DEF_SEL(numberWithUnsignedShort_, "numberWithUnsignedShort:"); _NS_PRIVATE_DEF_SEL(objCType, "objCType"); _NS_PRIVATE_DEF_SEL(object, "object"); _NS_PRIVATE_DEF_SEL(objectAtIndex_, "objectAtIndex:"); _NS_PRIVATE_DEF_SEL(objectEnumerator, "objectEnumerator"); _NS_PRIVATE_DEF_SEL(objectForInfoDictionaryKey_, "objectForInfoDictionaryKey:"); _NS_PRIVATE_DEF_SEL(objectForKey_, "objectForKey:"); _NS_PRIVATE_DEF_SEL(operatingSystem, "operatingSystem"); _NS_PRIVATE_DEF_SEL(operatingSystemVersion, "operatingSystemVersion"); _NS_PRIVATE_DEF_SEL(operatingSystemVersionString, "operatingSystemVersionString"); _NS_PRIVATE_DEF_SEL(pathForAuxiliaryExecutable_, "pathForAuxiliaryExecutable:"); _NS_PRIVATE_DEF_SEL(performActivityWithOptions_reason_usingBlock_, "performActivityWithOptions:reason:usingBlock:"); _NS_PRIVATE_DEF_SEL(performExpiringActivityWithReason_usingBlock_, "performExpiringActivityWithReason:usingBlock:"); _NS_PRIVATE_DEF_SEL(physicalMemory, "physicalMemory"); _NS_PRIVATE_DEF_SEL(pointerValue, "pointerValue"); _NS_PRIVATE_DEF_SEL(preflightAndReturnError_, "preflightAndReturnError:"); _NS_PRIVATE_DEF_SEL(privateFrameworksPath, "privateFrameworksPath"); _NS_PRIVATE_DEF_SEL(privateFrameworksURL, "privateFrameworksURL"); _NS_PRIVATE_DEF_SEL(processIdentifier, "processIdentifier"); _NS_PRIVATE_DEF_SEL(processInfo, "processInfo"); _NS_PRIVATE_DEF_SEL(processName, "processName"); _NS_PRIVATE_DEF_SEL(processorCount, "processorCount"); _NS_PRIVATE_DEF_SEL(rangeOfString_options_, "rangeOfString:options:"); _NS_PRIVATE_DEF_SEL(release, "release"); _NS_PRIVATE_DEF_SEL(removeObserver_, "removeObserver:"); _NS_PRIVATE_DEF_SEL(resourcePath, "resourcePath"); _NS_PRIVATE_DEF_SEL(resourceURL, "resourceURL"); _NS_PRIVATE_DEF_SEL(respondsToSelector_, "respondsToSelector:"); _NS_PRIVATE_DEF_SEL(retain, "retain"); _NS_PRIVATE_DEF_SEL(retainCount, "retainCount"); _NS_PRIVATE_DEF_SEL(setAutomaticTerminationSupportEnabled_, "setAutomaticTerminationSupportEnabled:"); _NS_PRIVATE_DEF_SEL(setProcessName_, "setProcessName:"); _NS_PRIVATE_DEF_SEL(sharedFrameworksPath, "sharedFrameworksPath"); _NS_PRIVATE_DEF_SEL(sharedFrameworksURL, "sharedFrameworksURL"); _NS_PRIVATE_DEF_SEL(sharedSupportPath, "sharedSupportPath"); _NS_PRIVATE_DEF_SEL(sharedSupportURL, "sharedSupportURL"); _NS_PRIVATE_DEF_SEL(shortValue, "shortValue"); _NS_PRIVATE_DEF_SEL(showPools, "showPools"); _NS_PRIVATE_DEF_SEL(signal, "signal"); _NS_PRIVATE_DEF_SEL(string, "string"); _NS_PRIVATE_DEF_SEL(stringValue, "stringValue"); _NS_PRIVATE_DEF_SEL(stringWithString_, "stringWithString:"); _NS_PRIVATE_DEF_SEL(stringWithCString_encoding_, "stringWithCString:encoding:"); _NS_PRIVATE_DEF_SEL(stringByAppendingString_, "stringByAppendingString:"); _NS_PRIVATE_DEF_SEL(systemUptime, "systemUptime"); _NS_PRIVATE_DEF_SEL(thermalState, "thermalState"); _NS_PRIVATE_DEF_SEL(unload, "unload"); _NS_PRIVATE_DEF_SEL(unlock, "unlock"); _NS_PRIVATE_DEF_SEL(unsignedCharValue, "unsignedCharValue"); _NS_PRIVATE_DEF_SEL(unsignedIntegerValue, "unsignedIntegerValue"); _NS_PRIVATE_DEF_SEL(unsignedIntValue, "unsignedIntValue"); _NS_PRIVATE_DEF_SEL(unsignedLongValue, "unsignedLongValue"); _NS_PRIVATE_DEF_SEL(unsignedLongLongValue, "unsignedLongLongValue"); _NS_PRIVATE_DEF_SEL(unsignedShortValue, "unsignedShortValue"); _NS_PRIVATE_DEF_SEL(URLForAuxiliaryExecutable_, "URLForAuxiliaryExecutable:"); _NS_PRIVATE_DEF_SEL(userInfo, "userInfo"); _NS_PRIVATE_DEF_SEL(userName, "userName"); _NS_PRIVATE_DEF_SEL(UTF8String, "UTF8String"); _NS_PRIVATE_DEF_SEL(valueWithBytes_objCType_, "valueWithBytes:objCType:"); _NS_PRIVATE_DEF_SEL(valueWithPointer_, "valueWithPointer:"); _NS_PRIVATE_DEF_SEL(wait, "wait"); _NS_PRIVATE_DEF_SEL(waitUntilDate_, "waitUntilDate:"); } // Class } // Private } // MTL #include #include namespace NS { using TimeInterval = double; using Integer = std::intptr_t; using UInteger = std::uintptr_t; const Integer IntegerMax = INTPTR_MAX; const Integer IntegerMin = INTPTR_MIN; const UInteger UIntegerMax = UINTPTR_MAX; struct OperatingSystemVersion { Integer majorVersion; Integer minorVersion; Integer patchVersion; } _NS_PACKED; } #include #include #include namespace NS { template class _NS_EXPORT Referencing : public _Base { public: _Class* retain(); void release(); _Class* autorelease(); UInteger retainCount() const; }; template class Copying : public Referencing<_Class, _Base> { public: _Class* copy() const; }; template class SecureCoding : public Referencing<_Class, _Base> { }; class Object : public Referencing { public: UInteger hash() const; bool isEqual(const Object* pObject) const; class String* description() const; class String* debugDescription() const; protected: friend class Referencing; template static _Class* alloc(const char* pClassName); template static _Class* alloc(const void* pClass); template _Class* init(); template static _Dst bridgingCast(const void* pObj); static class MethodSignature* methodSignatureForSelector(const void* pObj, SEL selector); static bool respondsToSelector(const void* pObj, SEL selector); template static constexpr bool doesRequireMsgSendStret(); template static _Ret sendMessage(const void* pObj, SEL selector, _Args... args); template static _Ret sendMessageSafe(const void* pObj, SEL selector, _Args... args); private: Object() = delete; Object(const Object&) = delete; ~Object() = delete; Object& operator=(const Object&) = delete; }; } template _NS_INLINE _Class* NS::Referencing<_Class, _Base>::retain() { return Object::sendMessage<_Class*>(this, _NS_PRIVATE_SEL(retain)); } template _NS_INLINE void NS::Referencing<_Class, _Base>::release() { Object::sendMessage(this, _NS_PRIVATE_SEL(release)); } template _NS_INLINE _Class* NS::Referencing<_Class, _Base>::autorelease() { return Object::sendMessage<_Class*>(this, _NS_PRIVATE_SEL(autorelease)); } template _NS_INLINE NS::UInteger NS::Referencing<_Class, _Base>::retainCount() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(retainCount)); } template _NS_INLINE _Class* NS::Copying<_Class, _Base>::copy() const { return Object::sendMessage<_Class*>(this, _NS_PRIVATE_SEL(copy)); } template _NS_INLINE _Dst NS::Object::bridgingCast(const void* pObj) { #ifdef __OBJC__ return (__bridge _Dst)pObj; #else return (_Dst)pObj; #endif // __OBJC__ } template _NS_INLINE constexpr bool NS::Object::doesRequireMsgSendStret() { #if (defined(__i386__) || defined(__x86_64__)) constexpr size_t kStructLimit = (sizeof(std::uintptr_t) << 1); return sizeof(_Type) > kStructLimit; #elif defined(__arm64__) return false; #elif defined(__arm__) constexpr size_t kStructLimit = sizeof(std::uintptr_t); return std::is_class_v<_Type> && (sizeof(_Type) > kStructLimit); #else #error "Unsupported architecture!" #endif } template <> _NS_INLINE constexpr bool NS::Object::doesRequireMsgSendStret() { return false; } template _NS_INLINE _Ret NS::Object::sendMessage(const void* pObj, SEL selector, _Args... args) { #if (defined(__i386__) || defined(__x86_64__)) if constexpr (std::is_floating_point<_Ret>()) { using SendMessageProcFpret = _Ret (*)(const void*, SEL, _Args...); const SendMessageProcFpret pProc = reinterpret_cast(&objc_msgSend_fpret); return (*pProc)(pObj, selector, args...); } else #endif // ( defined( __i386__ ) || defined( __x86_64__ ) ) #if !defined(__arm64__) if constexpr (doesRequireMsgSendStret<_Ret>()) { using SendMessageProcStret = void (*)(_Ret*, const void*, SEL, _Args...); const SendMessageProcStret pProc = reinterpret_cast(&objc_msgSend_stret); _Ret ret; (*pProc)(&ret, pObj, selector, args...); return ret; } else #endif // !defined( __arm64__ ) { using SendMessageProc = _Ret (*)(const void*, SEL, _Args...); const SendMessageProc pProc = reinterpret_cast(&objc_msgSend); return (*pProc)(pObj, selector, args...); } } _NS_INLINE NS::MethodSignature* NS::Object::methodSignatureForSelector(const void* pObj, SEL selector) { return sendMessage(pObj, _NS_PRIVATE_SEL(methodSignatureForSelector_), selector); } _NS_INLINE bool NS::Object::respondsToSelector(const void* pObj, SEL selector) { return sendMessage(pObj, _NS_PRIVATE_SEL(respondsToSelector_), selector); } template _NS_INLINE _Ret NS::Object::sendMessageSafe(const void* pObj, SEL selector, _Args... args) { if ((respondsToSelector(pObj, selector)) || (nullptr != methodSignatureForSelector(pObj, selector))) { return sendMessage<_Ret>(pObj, selector, args...); } if constexpr (!std::is_void<_Ret>::value) { return _Ret(0); } } template _NS_INLINE _Class* NS::Object::alloc(const char* pClassName) { return sendMessage<_Class*>(objc_lookUpClass(pClassName), _NS_PRIVATE_SEL(alloc)); } template _NS_INLINE _Class* NS::Object::alloc(const void* pClass) { return sendMessage<_Class*>(pClass, _NS_PRIVATE_SEL(alloc)); } template _NS_INLINE _Class* NS::Object::init() { return sendMessage<_Class*>(this, _NS_PRIVATE_SEL(init)); } _NS_INLINE NS::UInteger NS::Object::hash() const { return sendMessage(this, _NS_PRIVATE_SEL(hash)); } _NS_INLINE bool NS::Object::isEqual(const Object* pObject) const { return sendMessage(this, _NS_PRIVATE_SEL(isEqual_), pObject); } _NS_INLINE NS::String* NS::Object::description() const { return sendMessage(this, _NS_PRIVATE_SEL(description)); } _NS_INLINE NS::String* NS::Object::debugDescription() const { return sendMessageSafe(this, _NS_PRIVATE_SEL(debugDescription)); } namespace NS { struct FastEnumerationState { unsigned long state; Object** itemsPtr; unsigned long* mutationsPtr; unsigned long extra[5]; } _NS_PACKED; class FastEnumeration : public Referencing { public: NS::UInteger countByEnumerating(FastEnumerationState* pState, Object** pBuffer, NS::UInteger len); }; template class Enumerator : public Referencing, FastEnumeration> { public: _ObjectType* nextObject(); class Array* allObjects(); }; } _NS_INLINE NS::UInteger NS::FastEnumeration::countByEnumerating(FastEnumerationState* pState, Object** pBuffer, NS::UInteger len) { return Object::sendMessage(this, _NS_PRIVATE_SEL(countByEnumeratingWithState_objects_count_), pState, pBuffer, len); } template _NS_INLINE _ObjectType* NS::Enumerator<_ObjectType>::nextObject() { return Object::sendMessage<_ObjectType*>(this, _NS_PRIVATE_SEL(nextObject)); } template _NS_INLINE NS::Array* NS::Enumerator<_ObjectType>::allObjects() { return Object::sendMessage(this, _NS_PRIVATE_SEL(allObjects)); } namespace NS { class Array : public Copying { public: static Array* array(); static Array* array(const Object* pObject); static Array* array(const Object* const* pObjects, UInteger count); static Array* alloc(); Array* init(); Array* init(const Object* const* pObjects, UInteger count); Array* init(const class Coder* pCoder); template _Object* object(UInteger index) const; UInteger count() const; Enumerator* objectEnumerator() const; }; } _NS_INLINE NS::Array* NS::Array::array() { return Object::sendMessage(_NS_PRIVATE_CLS(NSArray), _NS_PRIVATE_SEL(array)); } _NS_INLINE NS::Array* NS::Array::array(const Object* pObject) { return Object::sendMessage(_NS_PRIVATE_CLS(NSArray), _NS_PRIVATE_SEL(arrayWithObject_), pObject); } _NS_INLINE NS::Array* NS::Array::array(const Object* const* pObjects, UInteger count) { return Object::sendMessage(_NS_PRIVATE_CLS(NSArray), _NS_PRIVATE_SEL(arrayWithObjects_count_), pObjects, count); } _NS_INLINE NS::Array* NS::Array::alloc() { return NS::Object::alloc(_NS_PRIVATE_CLS(NSArray)); } _NS_INLINE NS::Array* NS::Array::init() { return NS::Object::init(); } _NS_INLINE NS::Array* NS::Array::init(const Object* const* pObjects, UInteger count) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithObjects_count_), pObjects, count); } _NS_INLINE NS::Array* NS::Array::init(const class Coder* pCoder) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithCoder_), pCoder); } _NS_INLINE NS::UInteger NS::Array::count() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(count)); } template _NS_INLINE _Object* NS::Array::object(UInteger index) const { return Object::sendMessage<_Object*>(this, _NS_PRIVATE_SEL(objectAtIndex_), index); } _NS_INLINE NS::Enumerator* NS::Array::objectEnumerator() const { return NS::Object::sendMessage*>(this, _NS_PRIVATE_SEL(objectEnumerator)); } namespace NS { class AutoreleasePool : public Object { public: static AutoreleasePool* alloc(); AutoreleasePool* init(); void drain(); void addObject(Object* pObject); static void showPools(); }; } _NS_INLINE NS::AutoreleasePool* NS::AutoreleasePool::alloc() { return NS::Object::alloc(_NS_PRIVATE_CLS(NSAutoreleasePool)); } _NS_INLINE NS::AutoreleasePool* NS::AutoreleasePool::init() { return NS::Object::init(); } _NS_INLINE void NS::AutoreleasePool::drain() { Object::sendMessage(this, _NS_PRIVATE_SEL(drain)); } _NS_INLINE void NS::AutoreleasePool::addObject(Object* pObject) { Object::sendMessage(this, _NS_PRIVATE_SEL(addObject_), pObject); } _NS_INLINE void NS::AutoreleasePool::showPools() { Object::sendMessage(_NS_PRIVATE_CLS(NSAutoreleasePool), _NS_PRIVATE_SEL(showPools)); } namespace NS { class Dictionary : public NS::Copying { public: static Dictionary* dictionary(); static Dictionary* dictionary(const Object* pObject, const Object* pKey); static Dictionary* dictionary(const Object* const* pObjects, const Object* const* pKeys, UInteger count); static Dictionary* alloc(); Dictionary* init(); Dictionary* init(const Object* const* pObjects, const Object* const* pKeys, UInteger count); Dictionary* init(const class Coder* pCoder); template Enumerator<_KeyType>* keyEnumerator() const; template _Object* object(const Object* pKey) const; UInteger count() const; }; } _NS_INLINE NS::Dictionary* NS::Dictionary::dictionary() { return Object::sendMessage(_NS_PRIVATE_CLS(NSDictionary), _NS_PRIVATE_SEL(dictionary)); } _NS_INLINE NS::Dictionary* NS::Dictionary::dictionary(const Object* pObject, const Object* pKey) { return Object::sendMessage(_NS_PRIVATE_CLS(NSDictionary), _NS_PRIVATE_SEL(dictionaryWithObject_forKey_), pObject, pKey); } _NS_INLINE NS::Dictionary* NS::Dictionary::dictionary(const Object* const* pObjects, const Object* const* pKeys, UInteger count) { return Object::sendMessage(_NS_PRIVATE_CLS(NSDictionary), _NS_PRIVATE_SEL(dictionaryWithObjects_forKeys_count_), pObjects, pKeys, count); } _NS_INLINE NS::Dictionary* NS::Dictionary::alloc() { return NS::Object::alloc(_NS_PRIVATE_CLS(NSDictionary)); } _NS_INLINE NS::Dictionary* NS::Dictionary::init() { return NS::Object::init(); } _NS_INLINE NS::Dictionary* NS::Dictionary::init(const Object* const* pObjects, const Object* const* pKeys, UInteger count) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithObjects_forKeys_count_), pObjects, pKeys, count); } _NS_INLINE NS::Dictionary* NS::Dictionary::init(const class Coder* pCoder) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithCoder_), pCoder); } template _NS_INLINE NS::Enumerator<_KeyType>* NS::Dictionary::keyEnumerator() const { return Object::sendMessage*>(this, _NS_PRIVATE_SEL(keyEnumerator)); } template _NS_INLINE _Object* NS::Dictionary::object(const Object* pKey) const { return Object::sendMessage<_Object*>(this, _NS_PRIVATE_SEL(objectForKey_), pKey); } _NS_INLINE NS::UInteger NS::Dictionary::count() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(count)); } namespace NS { _NS_ENUM(Integer, ComparisonResult) { OrderedAscending = -1L, OrderedSame, OrderedDescending }; const Integer NotFound = IntegerMax; } namespace NS { struct Range { static Range Make(UInteger loc, UInteger len); Range(UInteger loc, UInteger len); bool Equal(const Range& range) const; bool LocationInRange(UInteger loc) const; UInteger Max() const; UInteger location; UInteger length; } _NS_PACKED; } _NS_INLINE NS::Range::Range(UInteger loc, UInteger len) : location(loc) , length(len) { } _NS_INLINE NS::Range NS::Range::Make(UInteger loc, UInteger len) { return Range(loc, len); } _NS_INLINE bool NS::Range::Equal(const Range& range) const { return (location == range.location) && (length == range.length); } _NS_INLINE bool NS::Range::LocationInRange(UInteger loc) const { return (!(loc < location)) && ((loc - location) < length); } _NS_INLINE NS::UInteger NS::Range::Max() const { return location + length; } namespace NS { _NS_ENUM(NS::UInteger, StringEncoding) { ASCIIStringEncoding = 1, NEXTSTEPStringEncoding = 2, JapaneseEUCStringEncoding = 3, UTF8StringEncoding = 4, ISOLatin1StringEncoding = 5, SymbolStringEncoding = 6, NonLossyASCIIStringEncoding = 7, ShiftJISStringEncoding = 8, ISOLatin2StringEncoding = 9, UnicodeStringEncoding = 10, WindowsCP1251StringEncoding = 11, WindowsCP1252StringEncoding = 12, WindowsCP1253StringEncoding = 13, WindowsCP1254StringEncoding = 14, WindowsCP1250StringEncoding = 15, ISO2022JPStringEncoding = 21, MacOSRomanStringEncoding = 30, UTF16StringEncoding = UnicodeStringEncoding, UTF16BigEndianStringEncoding = 0x90000100, UTF16LittleEndianStringEncoding = 0x94000100, UTF32StringEncoding = 0x8c000100, UTF32BigEndianStringEncoding = 0x98000100, UTF32LittleEndianStringEncoding = 0x9c000100 }; _NS_OPTIONS(NS::UInteger, StringCompareOptions) { CaseInsensitiveSearch = 1, LiteralSearch = 2, BackwardsSearch = 4, AnchoredSearch = 8, NumericSearch = 64, DiacriticInsensitiveSearch = 128, WidthInsensitiveSearch = 256, ForcedOrderingSearch = 512, RegularExpressionSearch = 1024 }; using unichar = unsigned short; class String : public Copying { public: static String* string(); static String* string(const String* pString); static String* string(const char* pString, StringEncoding encoding); static String* alloc(); String* init(); String* init(const String* pString); String* init(const char* pString, StringEncoding encoding); String* init(void* pBytes, UInteger len, StringEncoding encoding, bool freeBuffer); unichar character(UInteger index) const; UInteger length() const; const char* cString(StringEncoding encoding) const; const char* utf8String() const; UInteger maximumLengthOfBytes(StringEncoding encoding) const; UInteger lengthOfBytes(StringEncoding encoding) const; bool isEqualToString(const String* pString) const; Range rangeOfString(const String* pString, StringCompareOptions options) const; const char* fileSystemRepresentation() const; String* stringByAppendingString(const String* pString) const; ComparisonResult caseInsensitiveCompare(const String* pString) const; }; #define MTLSTR(literal) (NS::String*)__builtin___CFStringMakeConstantString("" literal "") template [[deprecated("please use MTLSTR(str)")]] constexpr const String* MakeConstantString(const char (&str)[_StringLen]) { return reinterpret_cast(__CFStringMakeConstantString(str)); } } _NS_INLINE NS::String* NS::String::string() { return Object::sendMessage(_NS_PRIVATE_CLS(NSString), _NS_PRIVATE_SEL(string)); } _NS_INLINE NS::String* NS::String::string(const String* pString) { return Object::sendMessage(_NS_PRIVATE_CLS(NSString), _NS_PRIVATE_SEL(stringWithString_), pString); } _NS_INLINE NS::String* NS::String::string(const char* pString, StringEncoding encoding) { return Object::sendMessage(_NS_PRIVATE_CLS(NSString), _NS_PRIVATE_SEL(stringWithCString_encoding_), pString, encoding); } _NS_INLINE NS::String* NS::String::alloc() { return Object::alloc(_NS_PRIVATE_CLS(NSString)); } _NS_INLINE NS::String* NS::String::init() { return Object::init(); } _NS_INLINE NS::String* NS::String::init(const String* pString) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithString_), pString); } _NS_INLINE NS::String* NS::String::init(const char* pString, StringEncoding encoding) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithCString_encoding_), pString, encoding); } _NS_INLINE NS::String* NS::String::init(void* pBytes, UInteger len, StringEncoding encoding, bool freeBuffer) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithBytesNoCopy_length_encoding_freeWhenDone_), pBytes, len, encoding, freeBuffer); } _NS_INLINE NS::unichar NS::String::character(UInteger index) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(characterAtIndex_), index); } _NS_INLINE NS::UInteger NS::String::length() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(length)); } _NS_INLINE const char* NS::String::cString(StringEncoding encoding) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(cStringUsingEncoding_), encoding); } _NS_INLINE const char* NS::String::utf8String() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(UTF8String)); } _NS_INLINE NS::UInteger NS::String::maximumLengthOfBytes(StringEncoding encoding) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(maximumLengthOfBytesUsingEncoding_), encoding); } _NS_INLINE NS::UInteger NS::String::lengthOfBytes(StringEncoding encoding) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(lengthOfBytesUsingEncoding_), encoding); } _NS_INLINE bool NS::String::isEqualToString(const NS::String* pString) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(isEqualToString_), pString); } _NS_INLINE NS::Range NS::String::rangeOfString(const NS::String* pString, NS::StringCompareOptions options) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(rangeOfString_options_), pString, options); } _NS_INLINE const char* NS::String::fileSystemRepresentation() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(fileSystemRepresentation)); } _NS_INLINE NS::String* NS::String::stringByAppendingString(const String* pString) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(stringByAppendingString_), pString); } _NS_INLINE NS::ComparisonResult NS::String::caseInsensitiveCompare(const String* pString) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(caseInsensitiveCompare_), pString); } #include namespace NS { using NotificationName = class String*; class Notification : public NS::Referencing { public: NS::String* name() const; NS::Object* object() const; NS::Dictionary* userInfo() const; }; using ObserverBlock = void(^)(Notification*); using ObserverFunction = std::function; class NotificationCenter : public NS::Referencing { public: static class NotificationCenter* defaultCenter(); Object* addObserver(NotificationName name, Object* pObj, void* pQueue, ObserverBlock block); Object* addObserver(NotificationName name, Object* pObj, void* pQueue, ObserverFunction &handler); void removeObserver(Object* pObserver); }; } _NS_INLINE NS::String* NS::Notification::name() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(name)); } _NS_INLINE NS::Object* NS::Notification::object() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(object)); } _NS_INLINE NS::Dictionary* NS::Notification::userInfo() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(userInfo)); } _NS_INLINE NS::NotificationCenter* NS::NotificationCenter::defaultCenter() { return NS::Object::sendMessage(_NS_PRIVATE_CLS(NSNotificationCenter), _NS_PRIVATE_SEL(defaultCenter)); } _NS_INLINE NS::Object* NS::NotificationCenter::addObserver(NS::NotificationName name, Object* pObj, void* pQueue, NS::ObserverBlock block) { return NS::Object::sendMessage(this, _NS_PRIVATE_SEL(addObserverName_object_queue_block_), name, pObj, pQueue, block); } _NS_INLINE NS::Object* NS::NotificationCenter::addObserver(NS::NotificationName name, Object* pObj, void* pQueue, NS::ObserverFunction &handler) { __block ObserverFunction blockFunction = handler; return addObserver(name, pObj, pQueue, ^(NS::Notification* pNotif) {blockFunction(pNotif);}); } _NS_INLINE void NS::NotificationCenter::removeObserver(Object* pObserver) { return NS::Object::sendMessage(this, _NS_PRIVATE_SEL(removeObserver_), pObserver); } namespace NS { _NS_CONST(NotificationName, BundleDidLoadNotification); _NS_CONST(NotificationName, BundleResourceRequestLowDiskSpaceNotification); class String* LocalizedString(const String* pKey, const String*); class String* LocalizedStringFromTable(const String* pKey, const String* pTbl, const String*); class String* LocalizedStringFromTableInBundle(const String* pKey, const String* pTbl, const class Bundle* pBdle, const String*); class String* LocalizedStringWithDefaultValue(const String* pKey, const String* pTbl, const class Bundle* pBdle, const String* pVal, const String*); class Bundle : public Referencing { public: static Bundle* mainBundle(); static Bundle* bundle(const class String* pPath); static Bundle* bundle(const class URL* pURL); static class Array* allBundles(); static class Array* allFrameworks(); static Bundle* alloc(); Bundle* init(const class String* pPath); Bundle* init(const class URL* pURL); bool load(); bool unload(); bool isLoaded() const; bool preflightAndReturnError(class Error** pError) const; bool loadAndReturnError(class Error** pError); class URL* bundleURL() const; class URL* resourceURL() const; class URL* executableURL() const; class URL* URLForAuxiliaryExecutable(const class String* pExecutableName) const; class URL* privateFrameworksURL() const; class URL* sharedFrameworksURL() const; class URL* sharedSupportURL() const; class URL* builtInPlugInsURL() const; class URL* appStoreReceiptURL() const; class String* bundlePath() const; class String* resourcePath() const; class String* executablePath() const; class String* pathForAuxiliaryExecutable(const class String* pExecutableName) const; class String* privateFrameworksPath() const; class String* sharedFrameworksPath() const; class String* sharedSupportPath() const; class String* builtInPlugInsPath() const; class String* bundleIdentifier() const; class Dictionary* infoDictionary() const; class Dictionary* localizedInfoDictionary() const; class Object* objectForInfoDictionaryKey(const class String* pKey); class String* localizedString(const class String* pKey, const class String* pValue = nullptr, const class String* pTableName = nullptr) const; }; } _NS_PRIVATE_DEF_CONST(NS::NotificationName, BundleDidLoadNotification); _NS_PRIVATE_DEF_CONST(NS::NotificationName, BundleResourceRequestLowDiskSpaceNotification); _NS_INLINE NS::String* NS::LocalizedString(const String* pKey, const String*) { return Bundle::mainBundle()->localizedString(pKey, nullptr, nullptr); } _NS_INLINE NS::String* NS::LocalizedStringFromTable(const String* pKey, const String* pTbl, const String*) { return Bundle::mainBundle()->localizedString(pKey, nullptr, pTbl); } _NS_INLINE NS::String* NS::LocalizedStringFromTableInBundle(const String* pKey, const String* pTbl, const Bundle* pBdl, const String*) { return pBdl->localizedString(pKey, nullptr, pTbl); } _NS_INLINE NS::String* NS::LocalizedStringWithDefaultValue(const String* pKey, const String* pTbl, const Bundle* pBdl, const String* pVal, const String*) { return pBdl->localizedString(pKey, pVal, pTbl); } _NS_INLINE NS::Bundle* NS::Bundle::mainBundle() { return Object::sendMessage(_NS_PRIVATE_CLS(NSBundle), _NS_PRIVATE_SEL(mainBundle)); } _NS_INLINE NS::Bundle* NS::Bundle::bundle(const class String* pPath) { return Object::sendMessage(_NS_PRIVATE_CLS(NSBundle), _NS_PRIVATE_SEL(bundleWithPath_), pPath); } _NS_INLINE NS::Bundle* NS::Bundle::bundle(const class URL* pURL) { return Object::sendMessage(_NS_PRIVATE_CLS(NSBundle), _NS_PRIVATE_SEL(bundleWithURL_), pURL); } _NS_INLINE NS::Array* NS::Bundle::allBundles() { return Object::sendMessage(_NS_PRIVATE_CLS(NSBundle), _NS_PRIVATE_SEL(allBundles)); } _NS_INLINE NS::Array* NS::Bundle::allFrameworks() { return Object::sendMessage(_NS_PRIVATE_CLS(NSBundle), _NS_PRIVATE_SEL(allFrameworks)); } _NS_INLINE NS::Bundle* NS::Bundle::alloc() { return Object::sendMessage(_NS_PRIVATE_CLS(NSBundle), _NS_PRIVATE_SEL(alloc)); } _NS_INLINE NS::Bundle* NS::Bundle::init(const String* pPath) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithPath_), pPath); } _NS_INLINE NS::Bundle* NS::Bundle::init(const URL* pURL) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithURL_), pURL); } _NS_INLINE bool NS::Bundle::load() { return Object::sendMessage(this, _NS_PRIVATE_SEL(load)); } _NS_INLINE bool NS::Bundle::unload() { return Object::sendMessage(this, _NS_PRIVATE_SEL(unload)); } _NS_INLINE bool NS::Bundle::isLoaded() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(isLoaded)); } _NS_INLINE bool NS::Bundle::preflightAndReturnError(Error** pError) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(preflightAndReturnError_), pError); } _NS_INLINE bool NS::Bundle::loadAndReturnError(Error** pError) { return Object::sendMessage(this, _NS_PRIVATE_SEL(loadAndReturnError_), pError); } _NS_INLINE NS::URL* NS::Bundle::bundleURL() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(bundleURL)); } _NS_INLINE NS::URL* NS::Bundle::resourceURL() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(resourceURL)); } _NS_INLINE NS::URL* NS::Bundle::executableURL() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(executableURL)); } _NS_INLINE NS::URL* NS::Bundle::URLForAuxiliaryExecutable(const String* pExecutableName) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(URLForAuxiliaryExecutable_), pExecutableName); } _NS_INLINE NS::URL* NS::Bundle::privateFrameworksURL() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(privateFrameworksURL)); } _NS_INLINE NS::URL* NS::Bundle::sharedFrameworksURL() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(sharedFrameworksURL)); } _NS_INLINE NS::URL* NS::Bundle::sharedSupportURL() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(sharedSupportURL)); } _NS_INLINE NS::URL* NS::Bundle::builtInPlugInsURL() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(builtInPlugInsURL)); } _NS_INLINE NS::URL* NS::Bundle::appStoreReceiptURL() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(appStoreReceiptURL)); } _NS_INLINE NS::String* NS::Bundle::bundlePath() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(bundlePath)); } _NS_INLINE NS::String* NS::Bundle::resourcePath() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(resourcePath)); } _NS_INLINE NS::String* NS::Bundle::executablePath() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(executablePath)); } _NS_INLINE NS::String* NS::Bundle::pathForAuxiliaryExecutable(const String* pExecutableName) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(pathForAuxiliaryExecutable_), pExecutableName); } _NS_INLINE NS::String* NS::Bundle::privateFrameworksPath() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(privateFrameworksPath)); } _NS_INLINE NS::String* NS::Bundle::sharedFrameworksPath() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(sharedFrameworksPath)); } _NS_INLINE NS::String* NS::Bundle::sharedSupportPath() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(sharedSupportPath)); } _NS_INLINE NS::String* NS::Bundle::builtInPlugInsPath() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(builtInPlugInsPath)); } _NS_INLINE NS::String* NS::Bundle::bundleIdentifier() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(bundleIdentifier)); } _NS_INLINE NS::Dictionary* NS::Bundle::infoDictionary() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(infoDictionary)); } _NS_INLINE NS::Dictionary* NS::Bundle::localizedInfoDictionary() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(localizedInfoDictionary)); } _NS_INLINE NS::Object* NS::Bundle::objectForInfoDictionaryKey(const String* pKey) { return Object::sendMessage(this, _NS_PRIVATE_SEL(objectForInfoDictionaryKey_), pKey); } _NS_INLINE NS::String* NS::Bundle::localizedString(const String* pKey, const String* pValue /* = nullptr */, const String* pTableName /* = nullptr */) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(localizedStringForKey_value_table_), pKey, pValue, pTableName); } namespace NS { class Data : public Copying { public: void* mutableBytes() const; UInteger length() const; }; } _NS_INLINE void* NS::Data::mutableBytes() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(mutableBytes)); } _NS_INLINE NS::UInteger NS::Data::length() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(length)); } namespace NS { using TimeInterval = double; class Date : public Copying { public: static Date* dateWithTimeIntervalSinceNow(TimeInterval secs); }; } // NS _NS_INLINE NS::Date* NS::Date::dateWithTimeIntervalSinceNow(NS::TimeInterval secs) { return NS::Object::sendMessage(_NS_PRIVATE_CLS(NSDate), _NS_PRIVATE_SEL(dateWithTimeIntervalSinceNow_), secs); } //------------------------------------------------------------------------------------------------------------------------------------------------------------- namespace NS { using ErrorDomain = class String*; _NS_CONST(ErrorDomain, CocoaErrorDomain); _NS_CONST(ErrorDomain, POSIXErrorDomain); _NS_CONST(ErrorDomain, OSStatusErrorDomain); _NS_CONST(ErrorDomain, MachErrorDomain); using ErrorUserInfoKey = class String*; _NS_CONST(ErrorUserInfoKey, UnderlyingErrorKey); _NS_CONST(ErrorUserInfoKey, LocalizedDescriptionKey); _NS_CONST(ErrorUserInfoKey, LocalizedFailureReasonErrorKey); _NS_CONST(ErrorUserInfoKey, LocalizedRecoverySuggestionErrorKey); _NS_CONST(ErrorUserInfoKey, LocalizedRecoveryOptionsErrorKey); _NS_CONST(ErrorUserInfoKey, RecoveryAttempterErrorKey); _NS_CONST(ErrorUserInfoKey, HelpAnchorErrorKey); _NS_CONST(ErrorUserInfoKey, DebugDescriptionErrorKey); _NS_CONST(ErrorUserInfoKey, LocalizedFailureErrorKey); _NS_CONST(ErrorUserInfoKey, StringEncodingErrorKey); _NS_CONST(ErrorUserInfoKey, URLErrorKey); _NS_CONST(ErrorUserInfoKey, FilePathErrorKey); class Error : public Copying { public: static Error* error(ErrorDomain domain, Integer code, class Dictionary* pDictionary); static Error* alloc(); Error* init(); Error* init(ErrorDomain domain, Integer code, class Dictionary* pDictionary); Integer code() const; ErrorDomain domain() const; class Dictionary* userInfo() const; class String* localizedDescription() const; class Array* localizedRecoveryOptions() const; class String* localizedRecoverySuggestion() const; class String* localizedFailureReason() const; }; } _NS_PRIVATE_DEF_CONST(NS::ErrorDomain, CocoaErrorDomain); _NS_PRIVATE_DEF_CONST(NS::ErrorDomain, POSIXErrorDomain); _NS_PRIVATE_DEF_CONST(NS::ErrorDomain, OSStatusErrorDomain); _NS_PRIVATE_DEF_CONST(NS::ErrorDomain, MachErrorDomain); _NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, UnderlyingErrorKey); _NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, LocalizedDescriptionKey); _NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, LocalizedFailureReasonErrorKey); _NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, LocalizedRecoverySuggestionErrorKey); _NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, LocalizedRecoveryOptionsErrorKey); _NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, RecoveryAttempterErrorKey); _NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, HelpAnchorErrorKey); _NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, DebugDescriptionErrorKey); _NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, LocalizedFailureErrorKey); _NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, StringEncodingErrorKey); _NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, URLErrorKey); _NS_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, FilePathErrorKey); _NS_INLINE NS::Error* NS::Error::error(ErrorDomain domain, Integer code, class Dictionary* pDictionary) { return Object::sendMessage(_NS_PRIVATE_CLS(NSError), _NS_PRIVATE_SEL(errorWithDomain_code_userInfo_), domain, code, pDictionary); } _NS_INLINE NS::Error* NS::Error::alloc() { return Object::alloc(_NS_PRIVATE_CLS(NSError)); } _NS_INLINE NS::Error* NS::Error::init() { return Object::init(); } _NS_INLINE NS::Error* NS::Error::init(ErrorDomain domain, Integer code, class Dictionary* pDictionary) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithDomain_code_userInfo_), domain, code, pDictionary); } _NS_INLINE NS::Integer NS::Error::code() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(code)); } _NS_INLINE NS::ErrorDomain NS::Error::domain() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(domain)); } _NS_INLINE NS::Dictionary* NS::Error::userInfo() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(userInfo)); } _NS_INLINE NS::String* NS::Error::localizedDescription() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(localizedDescription)); } _NS_INLINE NS::Array* NS::Error::localizedRecoveryOptions() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(localizedRecoveryOptions)); } _NS_INLINE NS::String* NS::Error::localizedRecoverySuggestion() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(localizedRecoverySuggestion)); } _NS_INLINE NS::String* NS::Error::localizedFailureReason() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(localizedFailureReason)); } namespace NS { template class Locking : public _Base { public: void lock(); void unlock(); }; class Condition : public Locking { public: static Condition* alloc(); Condition* init(); void wait(); bool waitUntilDate(Date* pLimit); void signal(); void broadcast(); }; } // NS template _NS_INLINE void NS::Locking<_Class, _Base>::lock() { NS::Object::sendMessage(this, _NS_PRIVATE_SEL(lock)); } template _NS_INLINE void NS::Locking<_Class, _Base>::unlock() { NS::Object::sendMessage(this, _NS_PRIVATE_SEL(unlock)); } _NS_INLINE NS::Condition* NS::Condition::alloc() { return NS::Object::alloc(_NS_PRIVATE_CLS(NSCondition)); } _NS_INLINE NS::Condition* NS::Condition::init() { return NS::Object::init(); } _NS_INLINE void NS::Condition::wait() { NS::Object::sendMessage(this, _NS_PRIVATE_SEL(wait)); } _NS_INLINE bool NS::Condition::waitUntilDate(NS::Date* pLimit) { return NS::Object::sendMessage(this, _NS_PRIVATE_SEL(waitUntilDate_), pLimit); } _NS_INLINE void NS::Condition::signal() { NS::Object::sendMessage(this, _NS_PRIVATE_SEL(signal)); } _NS_INLINE void NS::Condition::broadcast() { NS::Object::sendMessage(this, _NS_PRIVATE_SEL(broadcast)); } //------------------------------------------------------------------------------------------------------------------------------------------------------------- namespace NS { class Value : public Copying { public: static Value* value(const void* pValue, const char* pType); static Value* value(const void* pPointer); static Value* alloc(); Value* init(const void* pValue, const char* pType); Value* init(const class Coder* pCoder); void getValue(void* pValue, UInteger size) const; const char* objCType() const; bool isEqualToValue(Value* pValue) const; void* pointerValue() const; }; class Number : public Copying { public: static Number* number(char value); static Number* number(unsigned char value); static Number* number(short value); static Number* number(unsigned short value); static Number* number(int value); static Number* number(unsigned int value); static Number* number(long value); static Number* number(unsigned long value); static Number* number(long long value); static Number* number(unsigned long long value); static Number* number(float value); static Number* number(double value); static Number* number(bool value); static Number* alloc(); Number* init(const class Coder* pCoder); Number* init(char value); Number* init(unsigned char value); Number* init(short value); Number* init(unsigned short value); Number* init(int value); Number* init(unsigned int value); Number* init(long value); Number* init(unsigned long value); Number* init(long long value); Number* init(unsigned long long value); Number* init(float value); Number* init(double value); Number* init(bool value); char charValue() const; unsigned char unsignedCharValue() const; short shortValue() const; unsigned short unsignedShortValue() const; int intValue() const; unsigned int unsignedIntValue() const; long longValue() const; unsigned long unsignedLongValue() const; long long longLongValue() const; unsigned long long unsignedLongLongValue() const; float floatValue() const; double doubleValue() const; bool boolValue() const; Integer integerValue() const; UInteger unsignedIntegerValue() const; class String* stringValue() const; ComparisonResult compare(const Number* pOtherNumber) const; bool isEqualToNumber(const Number* pNumber) const; class String* descriptionWithLocale(const Object* pLocale) const; }; } _NS_INLINE NS::Value* NS::Value::value(const void* pValue, const char* pType) { return Object::sendMessage(_NS_PRIVATE_CLS(NSValue), _NS_PRIVATE_SEL(valueWithBytes_objCType_), pValue, pType); } _NS_INLINE NS::Value* NS::Value::value(const void* pPointer) { return Object::sendMessage(_NS_PRIVATE_CLS(NSValue), _NS_PRIVATE_SEL(valueWithPointer_), pPointer); } _NS_INLINE NS::Value* NS::Value::alloc() { return NS::Object::alloc(_NS_PRIVATE_CLS(NSValue)); } _NS_INLINE NS::Value* NS::Value::init(const void* pValue, const char* pType) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithBytes_objCType_), pValue, pType); } _NS_INLINE NS::Value* NS::Value::init(const class Coder* pCoder) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithCoder_), pCoder); } _NS_INLINE void NS::Value::getValue(void* pValue, UInteger size) const { Object::sendMessage(this, _NS_PRIVATE_SEL(getValue_size_), pValue, size); } _NS_INLINE const char* NS::Value::objCType() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(objCType)); } _NS_INLINE bool NS::Value::isEqualToValue(Value* pValue) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(isEqualToValue_), pValue); } _NS_INLINE void* NS::Value::pointerValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(pointerValue)); } _NS_INLINE NS::Number* NS::Number::number(char value) { return Object::sendMessage(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithChar_), value); } _NS_INLINE NS::Number* NS::Number::number(unsigned char value) { return Object::sendMessage(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithUnsignedChar_), value); } _NS_INLINE NS::Number* NS::Number::number(short value) { return Object::sendMessage(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithShort_), value); } _NS_INLINE NS::Number* NS::Number::number(unsigned short value) { return Object::sendMessage(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithUnsignedShort_), value); } _NS_INLINE NS::Number* NS::Number::number(int value) { return Object::sendMessage(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithInt_), value); } _NS_INLINE NS::Number* NS::Number::number(unsigned int value) { return Object::sendMessage(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithUnsignedInt_), value); } _NS_INLINE NS::Number* NS::Number::number(long value) { return Object::sendMessage(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithLong_), value); } _NS_INLINE NS::Number* NS::Number::number(unsigned long value) { return Object::sendMessage(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithUnsignedLong_), value); } _NS_INLINE NS::Number* NS::Number::number(long long value) { return Object::sendMessage(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithLongLong_), value); } _NS_INLINE NS::Number* NS::Number::number(unsigned long long value) { return Object::sendMessage(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithUnsignedLongLong_), value); } _NS_INLINE NS::Number* NS::Number::number(float value) { return Object::sendMessage(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithFloat_), value); } _NS_INLINE NS::Number* NS::Number::number(double value) { return Object::sendMessage(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithDouble_), value); } _NS_INLINE NS::Number* NS::Number::number(bool value) { return Object::sendMessage(_NS_PRIVATE_CLS(NSNumber), _NS_PRIVATE_SEL(numberWithBool_), value); } _NS_INLINE NS::Number* NS::Number::alloc() { return NS::Object::alloc(_NS_PRIVATE_CLS(NSNumber)); } _NS_INLINE NS::Number* NS::Number::init(const Coder* pCoder) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithCoder_), pCoder); } _NS_INLINE NS::Number* NS::Number::init(char value) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithChar_), value); } _NS_INLINE NS::Number* NS::Number::init(unsigned char value) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithUnsignedChar_), value); } _NS_INLINE NS::Number* NS::Number::init(short value) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithShort_), value); } _NS_INLINE NS::Number* NS::Number::init(unsigned short value) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithUnsignedShort_), value); } _NS_INLINE NS::Number* NS::Number::init(int value) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithInt_), value); } _NS_INLINE NS::Number* NS::Number::init(unsigned int value) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithUnsignedInt_), value); } _NS_INLINE NS::Number* NS::Number::init(long value) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithLong_), value); } _NS_INLINE NS::Number* NS::Number::init(unsigned long value) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithUnsignedLong_), value); } _NS_INLINE NS::Number* NS::Number::init(long long value) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithLongLong_), value); } _NS_INLINE NS::Number* NS::Number::init(unsigned long long value) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithUnsignedLongLong_), value); } _NS_INLINE NS::Number* NS::Number::init(float value) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithFloat_), value); } _NS_INLINE NS::Number* NS::Number::init(double value) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithDouble_), value); } _NS_INLINE NS::Number* NS::Number::init(bool value) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithBool_), value); } _NS_INLINE char NS::Number::charValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(charValue)); } _NS_INLINE unsigned char NS::Number::unsignedCharValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(unsignedCharValue)); } _NS_INLINE short NS::Number::shortValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(shortValue)); } _NS_INLINE unsigned short NS::Number::unsignedShortValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(unsignedShortValue)); } _NS_INLINE int NS::Number::intValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(intValue)); } _NS_INLINE unsigned int NS::Number::unsignedIntValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(unsignedIntValue)); } _NS_INLINE long NS::Number::longValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(longValue)); } _NS_INLINE unsigned long NS::Number::unsignedLongValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(unsignedLongValue)); } _NS_INLINE long long NS::Number::longLongValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(longLongValue)); } _NS_INLINE unsigned long long NS::Number::unsignedLongLongValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(unsignedLongLongValue)); } _NS_INLINE float NS::Number::floatValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(floatValue)); } _NS_INLINE double NS::Number::doubleValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(doubleValue)); } _NS_INLINE bool NS::Number::boolValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(boolValue)); } _NS_INLINE NS::Integer NS::Number::integerValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(integerValue)); } _NS_INLINE NS::UInteger NS::Number::unsignedIntegerValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(unsignedIntegerValue)); } _NS_INLINE NS::String* NS::Number::stringValue() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(stringValue)); } _NS_INLINE NS::ComparisonResult NS::Number::compare(const Number* pOtherNumber) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(compare_), pOtherNumber); } _NS_INLINE bool NS::Number::isEqualToNumber(const Number* pNumber) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(isEqualToNumber_), pNumber); } _NS_INLINE NS::String* NS::Number::descriptionWithLocale(const Object* pLocale) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(descriptionWithLocale_), pLocale); } #include namespace NS { _NS_CONST(NotificationName, ProcessInfoThermalStateDidChangeNotification); _NS_CONST(NotificationName, ProcessInfoPowerStateDidChangeNotification); _NS_CONST(NotificationName, ProcessInfoPerformanceProfileDidChangeNotification); _NS_ENUM(NS::Integer, ProcessInfoThermalState) { ProcessInfoThermalStateNominal = 0, ProcessInfoThermalStateFair = 1, ProcessInfoThermalStateSerious = 2, ProcessInfoThermalStateCritical = 3 }; _NS_OPTIONS(std::uint64_t, ActivityOptions) { ActivityIdleDisplaySleepDisabled = (1ULL << 40), ActivityIdleSystemSleepDisabled = (1ULL << 20), ActivitySuddenTerminationDisabled = (1ULL << 14), ActivityAutomaticTerminationDisabled = (1ULL << 15), ActivityUserInitiated = (0x00FFFFFFULL | ActivityIdleSystemSleepDisabled), ActivityUserInitiatedAllowingIdleSystemSleep = (ActivityUserInitiated & ~ActivityIdleSystemSleepDisabled), ActivityBackground = 0x000000FFULL, ActivityLatencyCritical = 0xFF00000000ULL, }; typedef NS::Integer DeviceCertification; _NS_CONST(DeviceCertification, DeviceCertificationiPhonePerformanceGaming); typedef NS::Integer ProcessPerformanceProfile; _NS_CONST(ProcessPerformanceProfile, ProcessPerformanceProfileDefault); _NS_CONST(ProcessPerformanceProfile, ProcessPerformanceProfileSustained); class ProcessInfo : public Referencing { public: static ProcessInfo* processInfo(); class Array* arguments() const; class Dictionary* environment() const; class String* hostName() const; class String* processName() const; void setProcessName(const String* pString); int processIdentifier() const; class String* globallyUniqueString() const; class String* userName() const; class String* fullUserName() const; UInteger operatingSystem() const; OperatingSystemVersion operatingSystemVersion() const; class String* operatingSystemVersionString() const; bool isOperatingSystemAtLeastVersion(OperatingSystemVersion version) const; UInteger processorCount() const; UInteger activeProcessorCount() const; unsigned long long physicalMemory() const; TimeInterval systemUptime() const; void disableSuddenTermination(); void enableSuddenTermination(); void disableAutomaticTermination(const class String* pReason); void enableAutomaticTermination(const class String* pReason); bool automaticTerminationSupportEnabled() const; void setAutomaticTerminationSupportEnabled(bool enabled); class Object* beginActivity(ActivityOptions options, const class String* pReason); void endActivity(class Object* pActivity); void performActivity(ActivityOptions options, const class String* pReason, void (^block)(void)); void performActivity(ActivityOptions options, const class String* pReason, const std::function& func); void performExpiringActivity(const class String* pReason, void (^block)(bool expired)); void performExpiringActivity(const class String* pReason, const std::function& func); ProcessInfoThermalState thermalState() const; bool isLowPowerModeEnabled() const; bool isiOSAppOnMac() const; bool isMacCatalystApp() const; bool isDeviceCertified(DeviceCertification performanceTier) const; bool hasPerformanceProfile(ProcessPerformanceProfile performanceProfile) const; }; } _NS_PRIVATE_DEF_CONST(NS::NotificationName, ProcessInfoThermalStateDidChangeNotification); _NS_PRIVATE_DEF_CONST(NS::NotificationName, ProcessInfoPowerStateDidChangeNotification); _NS_PRIVATE_DEF_CONST(NS::NotificationName, ProcessInfoPerformanceProfileDidChangeNotification); _NS_PRIVATE_DEF_CONST(NS::DeviceCertification, DeviceCertificationiPhonePerformanceGaming); _NS_PRIVATE_DEF_CONST(NS::ProcessPerformanceProfile, ProcessPerformanceProfileDefault); _NS_PRIVATE_DEF_CONST(NS::ProcessPerformanceProfile, ProcessPerformanceProfileSustained); _NS_INLINE NS::ProcessInfo* NS::ProcessInfo::processInfo() { return Object::sendMessage(_NS_PRIVATE_CLS(NSProcessInfo), _NS_PRIVATE_SEL(processInfo)); } _NS_INLINE NS::Array* NS::ProcessInfo::arguments() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(arguments)); } _NS_INLINE NS::Dictionary* NS::ProcessInfo::environment() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(environment)); } _NS_INLINE NS::String* NS::ProcessInfo::hostName() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(hostName)); } _NS_INLINE NS::String* NS::ProcessInfo::processName() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(processName)); } _NS_INLINE void NS::ProcessInfo::setProcessName(const String* pString) { Object::sendMessage(this, _NS_PRIVATE_SEL(setProcessName_), pString); } _NS_INLINE int NS::ProcessInfo::processIdentifier() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(processIdentifier)); } _NS_INLINE NS::String* NS::ProcessInfo::globallyUniqueString() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(globallyUniqueString)); } _NS_INLINE NS::String* NS::ProcessInfo::userName() const { return Object::sendMessageSafe(this, _NS_PRIVATE_SEL(userName)); } _NS_INLINE NS::String* NS::ProcessInfo::fullUserName() const { return Object::sendMessageSafe(this, _NS_PRIVATE_SEL(fullUserName)); } _NS_INLINE NS::UInteger NS::ProcessInfo::operatingSystem() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(operatingSystem)); } _NS_INLINE NS::OperatingSystemVersion NS::ProcessInfo::operatingSystemVersion() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(operatingSystemVersion)); } _NS_INLINE NS::String* NS::ProcessInfo::operatingSystemVersionString() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(operatingSystemVersionString)); } _NS_INLINE bool NS::ProcessInfo::isOperatingSystemAtLeastVersion(OperatingSystemVersion version) const { return Object::sendMessage(this, _NS_PRIVATE_SEL(isOperatingSystemAtLeastVersion_), version); } _NS_INLINE NS::UInteger NS::ProcessInfo::processorCount() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(processorCount)); } _NS_INLINE NS::UInteger NS::ProcessInfo::activeProcessorCount() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(activeProcessorCount)); } _NS_INLINE unsigned long long NS::ProcessInfo::physicalMemory() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(physicalMemory)); } _NS_INLINE NS::TimeInterval NS::ProcessInfo::systemUptime() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(systemUptime)); } _NS_INLINE void NS::ProcessInfo::disableSuddenTermination() { Object::sendMessageSafe(this, _NS_PRIVATE_SEL(disableSuddenTermination)); } _NS_INLINE void NS::ProcessInfo::enableSuddenTermination() { Object::sendMessageSafe(this, _NS_PRIVATE_SEL(enableSuddenTermination)); } _NS_INLINE void NS::ProcessInfo::disableAutomaticTermination(const String* pReason) { Object::sendMessageSafe(this, _NS_PRIVATE_SEL(disableAutomaticTermination_), pReason); } _NS_INLINE void NS::ProcessInfo::enableAutomaticTermination(const String* pReason) { Object::sendMessageSafe(this, _NS_PRIVATE_SEL(enableAutomaticTermination_), pReason); } _NS_INLINE bool NS::ProcessInfo::automaticTerminationSupportEnabled() const { return Object::sendMessageSafe(this, _NS_PRIVATE_SEL(automaticTerminationSupportEnabled)); } _NS_INLINE void NS::ProcessInfo::setAutomaticTerminationSupportEnabled(bool enabled) { Object::sendMessageSafe(this, _NS_PRIVATE_SEL(setAutomaticTerminationSupportEnabled_), enabled); } _NS_INLINE NS::Object* NS::ProcessInfo::beginActivity(ActivityOptions options, const String* pReason) { return Object::sendMessage(this, _NS_PRIVATE_SEL(beginActivityWithOptions_reason_), options, pReason); } _NS_INLINE void NS::ProcessInfo::endActivity(Object* pActivity) { Object::sendMessage(this, _NS_PRIVATE_SEL(endActivity_), pActivity); } _NS_INLINE void NS::ProcessInfo::performActivity(ActivityOptions options, const String* pReason, void (^block)(void)) { Object::sendMessage(this, _NS_PRIVATE_SEL(performActivityWithOptions_reason_usingBlock_), options, pReason, block); } _NS_INLINE void NS::ProcessInfo::performActivity(ActivityOptions options, const String* pReason, const std::function& function) { __block std::function blockFunction = function; performActivity(options, pReason, ^() { blockFunction(); }); } _NS_INLINE void NS::ProcessInfo::performExpiringActivity(const String* pReason, void (^block)(bool expired)) { Object::sendMessageSafe(this, _NS_PRIVATE_SEL(performExpiringActivityWithReason_usingBlock_), pReason, block); } _NS_INLINE void NS::ProcessInfo::performExpiringActivity(const String* pReason, const std::function& function) { __block std::function blockFunction = function; performExpiringActivity(pReason, ^(bool expired) { blockFunction(expired); }); } _NS_INLINE NS::ProcessInfoThermalState NS::ProcessInfo::thermalState() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(thermalState)); } _NS_INLINE bool NS::ProcessInfo::isLowPowerModeEnabled() const { return Object::sendMessageSafe(this, _NS_PRIVATE_SEL(isLowPowerModeEnabled)); } _NS_INLINE bool NS::ProcessInfo::isiOSAppOnMac() const { return Object::sendMessageSafe(this, _NS_PRIVATE_SEL(isiOSAppOnMac)); } _NS_INLINE bool NS::ProcessInfo::isMacCatalystApp() const { return Object::sendMessageSafe(this, _NS_PRIVATE_SEL(isMacCatalystApp)); } _NS_INLINE bool NS::ProcessInfo::isDeviceCertified(DeviceCertification performanceTier) const { return Object::sendMessageSafe(this, _NS_PRIVATE_SEL(isDeviceCertified_), performanceTier); } _NS_INLINE bool NS::ProcessInfo::hasPerformanceProfile(ProcessPerformanceProfile performanceProfile) const { return Object::sendMessageSafe(this, _NS_PRIVATE_SEL(hasPerformanceProfile_), performanceProfile); } /*****Immutable Set*******/ namespace NS { class Set : public NS::Copying { public: UInteger count() const; Enumerator* objectEnumerator() const; static Set* alloc(); Set* init(); Set* init(const Object* const* pObjects, UInteger count); Set* init(const class Coder* pCoder); }; } _NS_INLINE NS::UInteger NS::Set::count() const { return NS::Object::sendMessage(this, _NS_PRIVATE_SEL(count)); } _NS_INLINE NS::Enumerator* NS::Set::objectEnumerator() const { return NS::Object::sendMessage*>(this, _NS_PRIVATE_SEL(objectEnumerator)); } _NS_INLINE NS::Set* NS::Set::alloc() { return NS::Object::alloc(_NS_PRIVATE_CLS(NSSet)); } _NS_INLINE NS::Set* NS::Set::init() { return NS::Object::init(); } _NS_INLINE NS::Set* NS::Set::init(const Object* const* pObjects, NS::UInteger count) { return NS::Object::sendMessage(this, _NS_PRIVATE_SEL(initWithObjects_count_), pObjects, count); } _NS_INLINE NS::Set* NS::Set::init(const class Coder* pCoder) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithCoder_), pCoder); } #pragma once #include namespace NS { template class SharedPtr { public: /** * Create a new null pointer. */ SharedPtr(); /** * Destroy this SharedPtr, decreasing the reference count. */ ~SharedPtr(); /** * Create a new null pointer. */ SharedPtr(std::nullptr_t) noexcept; /** * SharedPtr copy constructor. */ SharedPtr(const SharedPtr<_Class>& other) noexcept; /** * Construction from another pointee type. */ template SharedPtr(const SharedPtr<_OtherClass>& other, typename std::enable_if_t> * = nullptr) noexcept; /** * SharedPtr move constructor. */ SharedPtr(SharedPtr<_Class>&& other) noexcept; /** * Move from another pointee type. */ template SharedPtr(SharedPtr<_OtherClass>&& other, typename std::enable_if_t> * = nullptr) noexcept; /** * Copy assignment operator. * Copying increases reference count. Only releases previous pointee if objects are different. */ SharedPtr& operator=(const SharedPtr<_Class>& other); /** * Copy-assignment from different pointee. * Copying increases reference count. Only releases previous pointee if objects are different. */ template typename std::enable_if_t, SharedPtr &> operator=(const SharedPtr<_OtherClass>& other); /** * Move assignment operator. * Move without affecting reference counts, unless pointees are equal. Moved-from object is reset to nullptr. */ SharedPtr& operator=(SharedPtr<_Class>&& other); /** * Move-asignment from different pointee. * Move without affecting reference counts, unless pointees are equal. Moved-from object is reset to nullptr. */ template typename std::enable_if_t, SharedPtr &> operator=(SharedPtr<_OtherClass>&& other); /** * Access raw pointee. * @warning Avoid wrapping the returned value again, as it may lead double frees unless this object becomes detached. */ _Class* get() const; /** * Call operations directly on the pointee. */ _Class* operator->() const; /** * Implicit cast to bool. */ explicit operator bool() const; /** * Reset this SharedPtr to null, decreasing the reference count. */ void reset(); /** * Detach the SharedPtr from the pointee, without decreasing the reference count. */ void detach(); template friend SharedPtr<_OtherClass> RetainPtr(_OtherClass* ptr); template friend SharedPtr<_OtherClass> TransferPtr(_OtherClass* ptr); private: _Class* m_pObject; }; /** * Create a SharedPtr by retaining an existing raw pointer. * Increases the reference count of the passed-in object. * If the passed-in object was in an AutoreleasePool, it will be removed from it. */ template _NS_INLINE NS::SharedPtr<_Class> RetainPtr(_Class* pObject) { NS::SharedPtr<_Class> ret; ret.m_pObject = pObject->retain(); return ret; } /* * Create a SharedPtr by transfering the ownership of an existing raw pointer to SharedPtr. * Does not increase the reference count of the passed-in pointer, it is assumed to be >= 1. * This method does not remove objects from an AutoreleasePool. */ template _NS_INLINE NS::SharedPtr<_Class> TransferPtr(_Class* pObject) { NS::SharedPtr<_Class> ret; ret.m_pObject = pObject; return ret; } } template _NS_INLINE NS::SharedPtr<_Class>::SharedPtr() : m_pObject(nullptr) { } template _NS_INLINE NS::SharedPtr<_Class>::~SharedPtr<_Class>() __attribute__((no_sanitize("undefined"))) { m_pObject->release(); } template _NS_INLINE NS::SharedPtr<_Class>::SharedPtr(std::nullptr_t) noexcept : m_pObject(nullptr) { } template _NS_INLINE NS::SharedPtr<_Class>::SharedPtr(const SharedPtr<_Class>& other) noexcept : m_pObject(other.m_pObject->retain()) { } template template _NS_INLINE NS::SharedPtr<_Class>::SharedPtr(const SharedPtr<_OtherClass>& other, typename std::enable_if_t> *) noexcept : m_pObject(reinterpret_cast<_Class*>(other.get()->retain())) { } template _NS_INLINE NS::SharedPtr<_Class>::SharedPtr(SharedPtr<_Class>&& other) noexcept : m_pObject(other.m_pObject) { other.m_pObject = nullptr; } template template _NS_INLINE NS::SharedPtr<_Class>::SharedPtr(SharedPtr<_OtherClass>&& other, typename std::enable_if_t> *) noexcept : m_pObject(reinterpret_cast<_Class*>(other.get())) { other.detach(); } template _NS_INLINE _Class* NS::SharedPtr<_Class>::get() const { return m_pObject; } template _NS_INLINE _Class* NS::SharedPtr<_Class>::operator->() const { return m_pObject; } template _NS_INLINE NS::SharedPtr<_Class>::operator bool() const { return nullptr != m_pObject; } template _NS_INLINE void NS::SharedPtr<_Class>::reset() __attribute__((no_sanitize("undefined"))) { m_pObject->release(); m_pObject = nullptr; } template _NS_INLINE void NS::SharedPtr<_Class>::detach() { m_pObject = nullptr; } template _NS_INLINE NS::SharedPtr<_Class>& NS::SharedPtr<_Class>::operator=(const SharedPtr<_Class>& other) __attribute__((no_sanitize("undefined"))) { _Class* pOldObject = m_pObject; m_pObject = other.m_pObject->retain(); pOldObject->release(); return *this; } template template typename std::enable_if_t, NS::SharedPtr<_Class> &> _NS_INLINE NS::SharedPtr<_Class>::operator=(const SharedPtr<_OtherClass>& other) __attribute__((no_sanitize("undefined"))) { _Class* pOldObject = m_pObject; m_pObject = reinterpret_cast<_Class*>(other.get()->retain()); pOldObject->release(); return *this; } template _NS_INLINE NS::SharedPtr<_Class>& NS::SharedPtr<_Class>::operator=(SharedPtr<_Class>&& other) __attribute__((no_sanitize("undefined"))) { if (m_pObject != other.m_pObject) { m_pObject->release(); m_pObject = other.m_pObject; } else { m_pObject = other.m_pObject; other.m_pObject->release(); } other.m_pObject = nullptr; return *this; } template template typename std::enable_if_t, NS::SharedPtr<_Class> &> _NS_INLINE NS::SharedPtr<_Class>::operator=(SharedPtr<_OtherClass>&& other) __attribute__((no_sanitize("undefined"))) { if (m_pObject != other.get()) { m_pObject->release(); m_pObject = reinterpret_cast<_Class*>(other.get()); other.detach(); } else { m_pObject = other.get(); other.reset(); } return *this; } template _NS_INLINE bool operator==(const NS::SharedPtr<_ClassLhs>& lhs, const NS::SharedPtr<_ClassRhs>& rhs) { return lhs.get() == rhs.get(); } template _NS_INLINE bool operator!=(const NS::SharedPtr<_ClassLhs>& lhs, const NS::SharedPtr<_ClassRhs>& rhs) { return lhs.get() != rhs.get(); } namespace NS { class URL : public Copying { public: static URL* fileURLWithPath(const class String* pPath); static URL* alloc(); URL* init(); URL* init(const class String* pString); URL* initFileURLWithPath(const class String* pPath); const char* fileSystemRepresentation() const; }; } _NS_INLINE NS::URL* NS::URL::fileURLWithPath(const String* pPath) { return Object::sendMessage(_NS_PRIVATE_CLS(NSURL), _NS_PRIVATE_SEL(fileURLWithPath_), pPath); } _NS_INLINE NS::URL* NS::URL::alloc() { return Object::alloc(_NS_PRIVATE_CLS(NSURL)); } _NS_INLINE NS::URL* NS::URL::init() { return Object::init(); } _NS_INLINE NS::URL* NS::URL::init(const String* pString) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initWithString_), pString); } _NS_INLINE NS::URL* NS::URL::initFileURLWithPath(const String* pPath) { return Object::sendMessage(this, _NS_PRIVATE_SEL(initFileURLWithPath_), pPath); } _NS_INLINE const char* NS::URL::fileSystemRepresentation() const { return Object::sendMessage(this, _NS_PRIVATE_SEL(fileSystemRepresentation)); } #pragma once #define _MTL_EXPORT _NS_EXPORT #define _MTL_EXTERN _NS_EXTERN #define _MTL_INLINE _NS_INLINE #define _MTL_PACKED _NS_PACKED #define _MTL_CONST(type, name) _NS_CONST(type, name) #define _MTL_ENUM(type, name) _NS_ENUM(type, name) #define _MTL_OPTIONS(type, name) _NS_OPTIONS(type, name) #define _MTL_VALIDATE_SIZE(ns, name) _NS_VALIDATE_SIZE(ns, name) #define _MTL_VALIDATE_ENUM(ns, name) _NS_VALIDATE_ENUM(ns, name) #pragma once #include #define _MTL_PRIVATE_CLS(symbol) (Private::Class::s_k##symbol) #define _MTL_PRIVATE_SEL(accessor) (Private::Selector::s_k##accessor) #if defined(MTL_PRIVATE_IMPLEMENTATION) #ifdef METALCPP_SYMBOL_VISIBILITY_HIDDEN #define _MTL_PRIVATE_VISIBILITY __attribute__((visibility("hidden"))) #else #define _MTL_PRIVATE_VISIBILITY __attribute__((visibility("default"))) #endif // METALCPP_SYMBOL_VISIBILITY_HIDDEN #define _MTL_PRIVATE_IMPORT __attribute__((weak_import)) #ifdef __OBJC__ #define _MTL_PRIVATE_OBJC_LOOKUP_CLASS(symbol) ((__bridge void*)objc_lookUpClass(#symbol)) #define _MTL_PRIVATE_OBJC_GET_PROTOCOL(symbol) ((__bridge void*)objc_getProtocol(#symbol)) #else #define _MTL_PRIVATE_OBJC_LOOKUP_CLASS(symbol) objc_lookUpClass(#symbol) #define _MTL_PRIVATE_OBJC_GET_PROTOCOL(symbol) objc_getProtocol(#symbol) #endif // __OBJC__ #define _MTL_PRIVATE_DEF_CLS(symbol) void* s_k##symbol _MTL_PRIVATE_VISIBILITY = _MTL_PRIVATE_OBJC_LOOKUP_CLASS(symbol) #define _MTL_PRIVATE_DEF_PRO(symbol) void* s_k##symbol _MTL_PRIVATE_VISIBILITY = _MTL_PRIVATE_OBJC_GET_PROTOCOL(symbol) #define _MTL_PRIVATE_DEF_SEL(accessor, symbol) SEL s_k##accessor _MTL_PRIVATE_VISIBILITY = sel_registerName(symbol) #include #define MTL_DEF_FUNC( name, signature ) \ using Fn##name = signature; \ Fn##name name = reinterpret_cast< Fn##name >( dlsym( RTLD_DEFAULT, #name ) ) namespace MTL::Private { template inline _Type const LoadSymbol(const char* pSymbol) { const _Type* pAddress = static_cast<_Type*>(dlsym(RTLD_DEFAULT, pSymbol)); return pAddress ? *pAddress : nullptr; } } // MTL::Private #if defined(__MAC_15_0) || defined(__IPHONE_18_0) || defined(__TVOS_18_0) #define _MTL_PRIVATE_DEF_STR(type, symbol) \ _MTL_EXTERN type const MTL##symbol _MTL_PRIVATE_IMPORT; \ type const MTL::symbol = (nullptr != &MTL##symbol) ? MTL##symbol : nullptr #define _MTL_PRIVATE_DEF_CONST(type, symbol) \ _MTL_EXTERN type const MTL##symbol _MTL_PRIVATE_IMPORT; \ type const MTL::symbol = (nullptr != &MTL##symbol) ? MTL##symbol : nullptr #define _MTL_PRIVATE_DEF_WEAK_CONST(type, symbol) \ _MTL_EXTERN type const MTL##symbol; \ type const MTL::symbol = Private::LoadSymbol("MTL" #symbol) #else #define _MTL_PRIVATE_DEF_STR(type, symbol) \ _MTL_EXTERN type const MTL##symbol; \ type const MTL::symbol = Private::LoadSymbol("MTL" #symbol) #define _MTL_PRIVATE_DEF_CONST(type, symbol) \ _MTL_EXTERN type const MTL##symbol; \ type const MTL::symbol = Private::LoadSymbol("MTL" #symbol) #define _MTL_PRIVATE_DEF_WEAK_CONST(type, symbol) _MTL_PRIVATE_DEF_CONST(type, symbol) #endif // defined(__MAC_15_0) || defined(__IPHONE_18_0) || defined(__TVOS_18_0) #else #define _MTL_PRIVATE_DEF_CLS(symbol) extern void* s_k##symbol #define _MTL_PRIVATE_DEF_PRO(symbol) extern void* s_k##symbol #define _MTL_PRIVATE_DEF_SEL(accessor, symbol) extern SEL s_k##accessor #define _MTL_PRIVATE_DEF_STR(type, symbol) extern type const MTL::symbol #define _MTL_PRIVATE_DEF_CONST(type, symbol) extern type const MTL::symbol #define _MTL_PRIVATE_DEF_WEAK_CONST(type, symbol) extern type const MTL::symbol #endif // MTL_PRIVATE_IMPLEMENTATION namespace MTL { namespace Private { namespace Class { } // Class } // Private } // MTL namespace MTL { namespace Private { namespace Protocol { } // Protocol } // Private } // MTL namespace MTL { namespace Private { namespace Selector { _MTL_PRIVATE_DEF_SEL(beginScope, "beginScope"); _MTL_PRIVATE_DEF_SEL(endScope, "endScope"); } // Class } // Private } // MTL namespace MTL::Private::Class { _MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureBoundingBoxGeometryDescriptor); _MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureCurveGeometryDescriptor); _MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureDescriptor); _MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureGeometryDescriptor); _MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureMotionBoundingBoxGeometryDescriptor); _MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureMotionCurveGeometryDescriptor); _MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureMotionTriangleGeometryDescriptor); _MTL_PRIVATE_DEF_CLS(MTLAccelerationStructurePassDescriptor); _MTL_PRIVATE_DEF_CLS(MTLAccelerationStructurePassSampleBufferAttachmentDescriptor); _MTL_PRIVATE_DEF_CLS(MTLAccelerationStructurePassSampleBufferAttachmentDescriptorArray); _MTL_PRIVATE_DEF_CLS(MTLAccelerationStructureTriangleGeometryDescriptor); _MTL_PRIVATE_DEF_CLS(MTLArchitecture); _MTL_PRIVATE_DEF_CLS(MTLArgument); _MTL_PRIVATE_DEF_CLS(MTLArgumentDescriptor); _MTL_PRIVATE_DEF_CLS(MTLArrayType); _MTL_PRIVATE_DEF_CLS(MTLAttribute); _MTL_PRIVATE_DEF_CLS(MTLAttributeDescriptor); _MTL_PRIVATE_DEF_CLS(MTLAttributeDescriptorArray); _MTL_PRIVATE_DEF_CLS(MTLBinaryArchiveDescriptor); _MTL_PRIVATE_DEF_CLS(MTLBlitPassDescriptor); _MTL_PRIVATE_DEF_CLS(MTLBlitPassSampleBufferAttachmentDescriptor); _MTL_PRIVATE_DEF_CLS(MTLBlitPassSampleBufferAttachmentDescriptorArray); _MTL_PRIVATE_DEF_CLS(MTLBufferLayoutDescriptor); _MTL_PRIVATE_DEF_CLS(MTLBufferLayoutDescriptorArray); _MTL_PRIVATE_DEF_CLS(MTLCaptureDescriptor); _MTL_PRIVATE_DEF_CLS(MTLCaptureManager); _MTL_PRIVATE_DEF_CLS(MTLCommandBufferDescriptor); _MTL_PRIVATE_DEF_CLS(MTLCommandQueueDescriptor); _MTL_PRIVATE_DEF_CLS(MTLCompileOptions); _MTL_PRIVATE_DEF_CLS(MTLComputePassDescriptor); _MTL_PRIVATE_DEF_CLS(MTLComputePassSampleBufferAttachmentDescriptor); _MTL_PRIVATE_DEF_CLS(MTLComputePassSampleBufferAttachmentDescriptorArray); _MTL_PRIVATE_DEF_CLS(MTLComputePipelineDescriptor); _MTL_PRIVATE_DEF_CLS(MTLComputePipelineReflection); _MTL_PRIVATE_DEF_CLS(MTLCounterSampleBufferDescriptor); _MTL_PRIVATE_DEF_CLS(MTLDepthStencilDescriptor); _MTL_PRIVATE_DEF_CLS(MTLFunctionConstant); _MTL_PRIVATE_DEF_CLS(MTLFunctionConstantValues); _MTL_PRIVATE_DEF_CLS(MTLFunctionDescriptor); _MTL_PRIVATE_DEF_CLS(MTLFunctionStitchingAttributeAlwaysInline); _MTL_PRIVATE_DEF_CLS(MTLFunctionStitchingFunctionNode); _MTL_PRIVATE_DEF_CLS(MTLFunctionStitchingGraph); _MTL_PRIVATE_DEF_CLS(MTLFunctionStitchingInputNode); _MTL_PRIVATE_DEF_CLS(MTLHeapDescriptor); _MTL_PRIVATE_DEF_CLS(MTLIOCommandQueueDescriptor); _MTL_PRIVATE_DEF_CLS(MTLIndirectCommandBufferDescriptor); _MTL_PRIVATE_DEF_CLS(MTLIndirectInstanceAccelerationStructureDescriptor); _MTL_PRIVATE_DEF_CLS(MTLInstanceAccelerationStructureDescriptor); _MTL_PRIVATE_DEF_CLS(MTLIntersectionFunctionDescriptor); _MTL_PRIVATE_DEF_CLS(MTLIntersectionFunctionTableDescriptor); _MTL_PRIVATE_DEF_CLS(MTLLinkedFunctions); _MTL_PRIVATE_DEF_CLS(MTLLogStateDescriptor); _MTL_PRIVATE_DEF_CLS(MTLMeshRenderPipelineDescriptor); _MTL_PRIVATE_DEF_CLS(MTLMotionKeyframeData); _MTL_PRIVATE_DEF_CLS(MTLPipelineBufferDescriptor); _MTL_PRIVATE_DEF_CLS(MTLPipelineBufferDescriptorArray); _MTL_PRIVATE_DEF_CLS(MTLPointerType); _MTL_PRIVATE_DEF_CLS(MTLPrimitiveAccelerationStructureDescriptor); _MTL_PRIVATE_DEF_CLS(MTLRasterizationRateLayerArray); _MTL_PRIVATE_DEF_CLS(MTLRasterizationRateLayerDescriptor); _MTL_PRIVATE_DEF_CLS(MTLRasterizationRateMapDescriptor); _MTL_PRIVATE_DEF_CLS(MTLRasterizationRateSampleArray); _MTL_PRIVATE_DEF_CLS(MTLRenderPassAttachmentDescriptor); _MTL_PRIVATE_DEF_CLS(MTLRenderPassColorAttachmentDescriptor); _MTL_PRIVATE_DEF_CLS(MTLRenderPassColorAttachmentDescriptorArray); _MTL_PRIVATE_DEF_CLS(MTLRenderPassDepthAttachmentDescriptor); _MTL_PRIVATE_DEF_CLS(MTLRenderPassDescriptor); _MTL_PRIVATE_DEF_CLS(MTLRenderPassSampleBufferAttachmentDescriptor); _MTL_PRIVATE_DEF_CLS(MTLRenderPassSampleBufferAttachmentDescriptorArray); _MTL_PRIVATE_DEF_CLS(MTLRenderPassStencilAttachmentDescriptor); _MTL_PRIVATE_DEF_CLS(MTLRenderPipelineColorAttachmentDescriptor); _MTL_PRIVATE_DEF_CLS(MTLRenderPipelineColorAttachmentDescriptorArray); _MTL_PRIVATE_DEF_CLS(MTLRenderPipelineDescriptor); _MTL_PRIVATE_DEF_CLS(MTLRenderPipelineFunctionsDescriptor); _MTL_PRIVATE_DEF_CLS(MTLRenderPipelineReflection); _MTL_PRIVATE_DEF_CLS(MTLResidencySetDescriptor); _MTL_PRIVATE_DEF_CLS(MTLResourceStatePassDescriptor); _MTL_PRIVATE_DEF_CLS(MTLResourceStatePassSampleBufferAttachmentDescriptor); _MTL_PRIVATE_DEF_CLS(MTLResourceStatePassSampleBufferAttachmentDescriptorArray); _MTL_PRIVATE_DEF_CLS(MTLSamplerDescriptor); _MTL_PRIVATE_DEF_CLS(MTLSharedEventHandle); _MTL_PRIVATE_DEF_CLS(MTLSharedEventListener); _MTL_PRIVATE_DEF_CLS(MTLSharedTextureHandle); _MTL_PRIVATE_DEF_CLS(MTLStageInputOutputDescriptor); _MTL_PRIVATE_DEF_CLS(MTLStencilDescriptor); _MTL_PRIVATE_DEF_CLS(MTLStitchedLibraryDescriptor); _MTL_PRIVATE_DEF_CLS(MTLStructMember); _MTL_PRIVATE_DEF_CLS(MTLStructType); _MTL_PRIVATE_DEF_CLS(MTLTextureDescriptor); _MTL_PRIVATE_DEF_CLS(MTLTextureReferenceType); _MTL_PRIVATE_DEF_CLS(MTLTileRenderPipelineColorAttachmentDescriptor); _MTL_PRIVATE_DEF_CLS(MTLTileRenderPipelineColorAttachmentDescriptorArray); _MTL_PRIVATE_DEF_CLS(MTLTileRenderPipelineDescriptor); _MTL_PRIVATE_DEF_CLS(MTLType); _MTL_PRIVATE_DEF_CLS(MTLVertexAttribute); _MTL_PRIVATE_DEF_CLS(MTLVertexAttributeDescriptor); _MTL_PRIVATE_DEF_CLS(MTLVertexAttributeDescriptorArray); _MTL_PRIVATE_DEF_CLS(MTLVertexBufferLayoutDescriptor); _MTL_PRIVATE_DEF_CLS(MTLVertexBufferLayoutDescriptorArray); _MTL_PRIVATE_DEF_CLS(MTLVertexDescriptor); _MTL_PRIVATE_DEF_CLS(MTLVisibleFunctionTableDescriptor); } namespace MTL::Private::Protocol { _MTL_PRIVATE_DEF_PRO(MTLAccelerationStructure); _MTL_PRIVATE_DEF_PRO(MTLAccelerationStructureCommandEncoder); _MTL_PRIVATE_DEF_PRO(MTLAllocation); _MTL_PRIVATE_DEF_PRO(MTLArgumentEncoder); _MTL_PRIVATE_DEF_PRO(MTLBinaryArchive); _MTL_PRIVATE_DEF_PRO(MTLBinding); _MTL_PRIVATE_DEF_PRO(MTLBlitCommandEncoder); _MTL_PRIVATE_DEF_PRO(MTLBuffer); _MTL_PRIVATE_DEF_PRO(MTLBufferBinding); _MTL_PRIVATE_DEF_PRO(MTLCommandBuffer); _MTL_PRIVATE_DEF_PRO(MTLCommandBufferEncoderInfo); _MTL_PRIVATE_DEF_PRO(MTLCommandEncoder); _MTL_PRIVATE_DEF_PRO(MTLCommandQueue); _MTL_PRIVATE_DEF_PRO(MTLComputeCommandEncoder); _MTL_PRIVATE_DEF_PRO(MTLComputePipelineState); _MTL_PRIVATE_DEF_PRO(MTLCounter); _MTL_PRIVATE_DEF_PRO(MTLCounterSampleBuffer); _MTL_PRIVATE_DEF_PRO(MTLCounterSet); _MTL_PRIVATE_DEF_PRO(MTLDepthStencilState); _MTL_PRIVATE_DEF_PRO(MTLDevice); _MTL_PRIVATE_DEF_PRO(MTLDrawable); _MTL_PRIVATE_DEF_PRO(MTLDynamicLibrary); _MTL_PRIVATE_DEF_PRO(MTLEvent); _MTL_PRIVATE_DEF_PRO(MTLFence); _MTL_PRIVATE_DEF_PRO(MTLFunction); _MTL_PRIVATE_DEF_PRO(MTLFunctionHandle); _MTL_PRIVATE_DEF_PRO(MTLFunctionLog); _MTL_PRIVATE_DEF_PRO(MTLFunctionLogDebugLocation); _MTL_PRIVATE_DEF_PRO(MTLFunctionStitchingAttribute); _MTL_PRIVATE_DEF_PRO(MTLFunctionStitchingNode); _MTL_PRIVATE_DEF_PRO(MTLHeap); _MTL_PRIVATE_DEF_PRO(MTLIOCommandBuffer); _MTL_PRIVATE_DEF_PRO(MTLIOCommandQueue); _MTL_PRIVATE_DEF_PRO(MTLIOFileHandle); _MTL_PRIVATE_DEF_PRO(MTLIOScratchBuffer); _MTL_PRIVATE_DEF_PRO(MTLIOScratchBufferAllocator); _MTL_PRIVATE_DEF_PRO(MTLIndirectCommandBuffer); _MTL_PRIVATE_DEF_PRO(MTLIndirectComputeCommand); _MTL_PRIVATE_DEF_PRO(MTLIndirectRenderCommand); _MTL_PRIVATE_DEF_PRO(MTLIntersectionFunctionTable); _MTL_PRIVATE_DEF_PRO(MTLLibrary); _MTL_PRIVATE_DEF_PRO(MTLLogContainer); _MTL_PRIVATE_DEF_PRO(MTLLogState); _MTL_PRIVATE_DEF_PRO(MTLObjectPayloadBinding); _MTL_PRIVATE_DEF_PRO(MTLParallelRenderCommandEncoder); _MTL_PRIVATE_DEF_PRO(MTLRasterizationRateMap); _MTL_PRIVATE_DEF_PRO(MTLRenderCommandEncoder); _MTL_PRIVATE_DEF_PRO(MTLRenderPipelineState); _MTL_PRIVATE_DEF_PRO(MTLResidencySet); _MTL_PRIVATE_DEF_PRO(MTLResource); _MTL_PRIVATE_DEF_PRO(MTLResourceStateCommandEncoder); _MTL_PRIVATE_DEF_PRO(MTLSamplerState); _MTL_PRIVATE_DEF_PRO(MTLSharedEvent); _MTL_PRIVATE_DEF_PRO(MTLTexture); _MTL_PRIVATE_DEF_PRO(MTLTextureBinding); _MTL_PRIVATE_DEF_PRO(MTLThreadgroupBinding); _MTL_PRIVATE_DEF_PRO(MTLVisibleFunctionTable); } namespace MTL::Private::Selector { _MTL_PRIVATE_DEF_SEL(GPUEndTime, "GPUEndTime"); _MTL_PRIVATE_DEF_SEL(GPUStartTime, "GPUStartTime"); _MTL_PRIVATE_DEF_SEL(URL, "URL"); _MTL_PRIVATE_DEF_SEL(accelerationStructureCommandEncoder, "accelerationStructureCommandEncoder"); _MTL_PRIVATE_DEF_SEL(accelerationStructureCommandEncoderWithDescriptor_, "accelerationStructureCommandEncoderWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(accelerationStructurePassDescriptor, "accelerationStructurePassDescriptor"); _MTL_PRIVATE_DEF_SEL(accelerationStructureSizesWithDescriptor_, "accelerationStructureSizesWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(access, "access"); _MTL_PRIVATE_DEF_SEL(addAllocation_, "addAllocation:"); _MTL_PRIVATE_DEF_SEL(addAllocations_count_, "addAllocations:count:"); _MTL_PRIVATE_DEF_SEL(addBarrier, "addBarrier"); _MTL_PRIVATE_DEF_SEL(addCompletedHandler_, "addCompletedHandler:"); _MTL_PRIVATE_DEF_SEL(addComputePipelineFunctionsWithDescriptor_error_, "addComputePipelineFunctionsWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(addDebugMarker_range_, "addDebugMarker:range:"); _MTL_PRIVATE_DEF_SEL(addFunctionWithDescriptor_library_error_, "addFunctionWithDescriptor:library:error:"); _MTL_PRIVATE_DEF_SEL(addLibraryWithDescriptor_error_, "addLibraryWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(addLogHandler_, "addLogHandler:"); _MTL_PRIVATE_DEF_SEL(addMeshRenderPipelineFunctionsWithDescriptor_error_, "addMeshRenderPipelineFunctionsWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(addPresentedHandler_, "addPresentedHandler:"); _MTL_PRIVATE_DEF_SEL(addRenderPipelineFunctionsWithDescriptor_error_, "addRenderPipelineFunctionsWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(addResidencySet_, "addResidencySet:"); _MTL_PRIVATE_DEF_SEL(addResidencySets_count_, "addResidencySets:count:"); _MTL_PRIVATE_DEF_SEL(addScheduledHandler_, "addScheduledHandler:"); _MTL_PRIVATE_DEF_SEL(addTileRenderPipelineFunctionsWithDescriptor_error_, "addTileRenderPipelineFunctionsWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(alignment, "alignment"); _MTL_PRIVATE_DEF_SEL(allAllocations, "allAllocations"); _MTL_PRIVATE_DEF_SEL(allocatedSize, "allocatedSize"); _MTL_PRIVATE_DEF_SEL(allocationCount, "allocationCount"); _MTL_PRIVATE_DEF_SEL(allowDuplicateIntersectionFunctionInvocation, "allowDuplicateIntersectionFunctionInvocation"); _MTL_PRIVATE_DEF_SEL(allowGPUOptimizedContents, "allowGPUOptimizedContents"); _MTL_PRIVATE_DEF_SEL(allowReferencingUndefinedSymbols, "allowReferencingUndefinedSymbols"); _MTL_PRIVATE_DEF_SEL(alphaBlendOperation, "alphaBlendOperation"); _MTL_PRIVATE_DEF_SEL(architecture, "architecture"); _MTL_PRIVATE_DEF_SEL(areBarycentricCoordsSupported, "areBarycentricCoordsSupported"); _MTL_PRIVATE_DEF_SEL(areProgrammableSamplePositionsSupported, "areProgrammableSamplePositionsSupported"); _MTL_PRIVATE_DEF_SEL(areRasterOrderGroupsSupported, "areRasterOrderGroupsSupported"); _MTL_PRIVATE_DEF_SEL(argumentBuffersSupport, "argumentBuffersSupport"); _MTL_PRIVATE_DEF_SEL(argumentDescriptor, "argumentDescriptor"); _MTL_PRIVATE_DEF_SEL(argumentIndex, "argumentIndex"); _MTL_PRIVATE_DEF_SEL(argumentIndexStride, "argumentIndexStride"); _MTL_PRIVATE_DEF_SEL(arguments, "arguments"); _MTL_PRIVATE_DEF_SEL(arrayLength, "arrayLength"); _MTL_PRIVATE_DEF_SEL(arrayType, "arrayType"); _MTL_PRIVATE_DEF_SEL(attributeIndex, "attributeIndex"); _MTL_PRIVATE_DEF_SEL(attributeType, "attributeType"); _MTL_PRIVATE_DEF_SEL(attributes, "attributes"); _MTL_PRIVATE_DEF_SEL(backFaceStencil, "backFaceStencil"); _MTL_PRIVATE_DEF_SEL(binaryArchives, "binaryArchives"); _MTL_PRIVATE_DEF_SEL(binaryFunctions, "binaryFunctions"); _MTL_PRIVATE_DEF_SEL(bindings, "bindings"); _MTL_PRIVATE_DEF_SEL(blitCommandEncoder, "blitCommandEncoder"); _MTL_PRIVATE_DEF_SEL(blitCommandEncoderWithDescriptor_, "blitCommandEncoderWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(blitPassDescriptor, "blitPassDescriptor"); _MTL_PRIVATE_DEF_SEL(borderColor, "borderColor"); _MTL_PRIVATE_DEF_SEL(boundingBoxBuffer, "boundingBoxBuffer"); _MTL_PRIVATE_DEF_SEL(boundingBoxBufferOffset, "boundingBoxBufferOffset"); _MTL_PRIVATE_DEF_SEL(boundingBoxBuffers, "boundingBoxBuffers"); _MTL_PRIVATE_DEF_SEL(boundingBoxCount, "boundingBoxCount"); _MTL_PRIVATE_DEF_SEL(boundingBoxStride, "boundingBoxStride"); _MTL_PRIVATE_DEF_SEL(buffer, "buffer"); _MTL_PRIVATE_DEF_SEL(bufferAlignment, "bufferAlignment"); _MTL_PRIVATE_DEF_SEL(bufferBytesPerRow, "bufferBytesPerRow"); _MTL_PRIVATE_DEF_SEL(bufferDataSize, "bufferDataSize"); _MTL_PRIVATE_DEF_SEL(bufferDataType, "bufferDataType"); _MTL_PRIVATE_DEF_SEL(bufferIndex, "bufferIndex"); _MTL_PRIVATE_DEF_SEL(bufferOffset, "bufferOffset"); _MTL_PRIVATE_DEF_SEL(bufferPointerType, "bufferPointerType"); _MTL_PRIVATE_DEF_SEL(bufferSize, "bufferSize"); _MTL_PRIVATE_DEF_SEL(bufferStructType, "bufferStructType"); _MTL_PRIVATE_DEF_SEL(buffers, "buffers"); _MTL_PRIVATE_DEF_SEL(buildAccelerationStructure_descriptor_scratchBuffer_scratchBufferOffset_, "buildAccelerationStructure:descriptor:scratchBuffer:scratchBufferOffset:"); _MTL_PRIVATE_DEF_SEL(captureObject, "captureObject"); _MTL_PRIVATE_DEF_SEL(clearBarrier, "clearBarrier"); _MTL_PRIVATE_DEF_SEL(clearColor, "clearColor"); _MTL_PRIVATE_DEF_SEL(clearDepth, "clearDepth"); _MTL_PRIVATE_DEF_SEL(clearStencil, "clearStencil"); _MTL_PRIVATE_DEF_SEL(colorAttachments, "colorAttachments"); _MTL_PRIVATE_DEF_SEL(column, "column"); _MTL_PRIVATE_DEF_SEL(commandBuffer, "commandBuffer"); _MTL_PRIVATE_DEF_SEL(commandBufferWithDescriptor_, "commandBufferWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(commandBufferWithUnretainedReferences, "commandBufferWithUnretainedReferences"); _MTL_PRIVATE_DEF_SEL(commandQueue, "commandQueue"); _MTL_PRIVATE_DEF_SEL(commandTypes, "commandTypes"); _MTL_PRIVATE_DEF_SEL(commit, "commit"); _MTL_PRIVATE_DEF_SEL(compareFunction, "compareFunction"); _MTL_PRIVATE_DEF_SEL(compileSymbolVisibility, "compileSymbolVisibility"); _MTL_PRIVATE_DEF_SEL(compressionType, "compressionType"); _MTL_PRIVATE_DEF_SEL(computeCommandEncoder, "computeCommandEncoder"); _MTL_PRIVATE_DEF_SEL(computeCommandEncoderWithDescriptor_, "computeCommandEncoderWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(computeCommandEncoderWithDispatchType_, "computeCommandEncoderWithDispatchType:"); _MTL_PRIVATE_DEF_SEL(computeFunction, "computeFunction"); _MTL_PRIVATE_DEF_SEL(computePassDescriptor, "computePassDescriptor"); _MTL_PRIVATE_DEF_SEL(concurrentDispatchThreadgroups_threadsPerThreadgroup_, "concurrentDispatchThreadgroups:threadsPerThreadgroup:"); _MTL_PRIVATE_DEF_SEL(concurrentDispatchThreads_threadsPerThreadgroup_, "concurrentDispatchThreads:threadsPerThreadgroup:"); _MTL_PRIVATE_DEF_SEL(constantBlockAlignment, "constantBlockAlignment"); _MTL_PRIVATE_DEF_SEL(constantDataAtIndex_, "constantDataAtIndex:"); _MTL_PRIVATE_DEF_SEL(constantValues, "constantValues"); _MTL_PRIVATE_DEF_SEL(containsAllocation_, "containsAllocation:"); _MTL_PRIVATE_DEF_SEL(contents, "contents"); _MTL_PRIVATE_DEF_SEL(controlDependencies, "controlDependencies"); _MTL_PRIVATE_DEF_SEL(controlPointBuffer, "controlPointBuffer"); _MTL_PRIVATE_DEF_SEL(controlPointBufferOffset, "controlPointBufferOffset"); _MTL_PRIVATE_DEF_SEL(controlPointBuffers, "controlPointBuffers"); _MTL_PRIVATE_DEF_SEL(controlPointCount, "controlPointCount"); _MTL_PRIVATE_DEF_SEL(controlPointFormat, "controlPointFormat"); _MTL_PRIVATE_DEF_SEL(controlPointStride, "controlPointStride"); _MTL_PRIVATE_DEF_SEL(convertSparsePixelRegions_toTileRegions_withTileSize_alignmentMode_numRegions_, "convertSparsePixelRegions:toTileRegions:withTileSize:alignmentMode:numRegions:"); _MTL_PRIVATE_DEF_SEL(convertSparseTileRegions_toPixelRegions_withTileSize_numRegions_, "convertSparseTileRegions:toPixelRegions:withTileSize:numRegions:"); _MTL_PRIVATE_DEF_SEL(copyAccelerationStructure_toAccelerationStructure_, "copyAccelerationStructure:toAccelerationStructure:"); _MTL_PRIVATE_DEF_SEL(copyAndCompactAccelerationStructure_toAccelerationStructure_, "copyAndCompactAccelerationStructure:toAccelerationStructure:"); _MTL_PRIVATE_DEF_SEL(copyFromBuffer_sourceOffset_sourceBytesPerRow_sourceBytesPerImage_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_, "copyFromBuffer:sourceOffset:sourceBytesPerRow:sourceBytesPerImage:sourceSize:toTexture:destinationSlice:destinationLevel:destinationOrigin:"); _MTL_PRIVATE_DEF_SEL(copyFromBuffer_sourceOffset_sourceBytesPerRow_sourceBytesPerImage_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_options_, "copyFromBuffer:sourceOffset:sourceBytesPerRow:sourceBytesPerImage:sourceSize:toTexture:destinationSlice:destinationLevel:destinationOrigin:options:"); _MTL_PRIVATE_DEF_SEL(copyFromBuffer_sourceOffset_toBuffer_destinationOffset_size_, "copyFromBuffer:sourceOffset:toBuffer:destinationOffset:size:"); _MTL_PRIVATE_DEF_SEL(copyFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toBuffer_destinationOffset_destinationBytesPerRow_destinationBytesPerImage_, "copyFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toBuffer:destinationOffset:destinationBytesPerRow:destinationBytesPerImage:"); _MTL_PRIVATE_DEF_SEL(copyFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toBuffer_destinationOffset_destinationBytesPerRow_destinationBytesPerImage_options_, "copyFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toBuffer:destinationOffset:destinationBytesPerRow:destinationBytesPerImage:options:"); _MTL_PRIVATE_DEF_SEL(copyFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_, "copyFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toTexture:destinationSlice:destinationLevel:destinationOrigin:"); _MTL_PRIVATE_DEF_SEL(copyFromTexture_sourceSlice_sourceLevel_toTexture_destinationSlice_destinationLevel_sliceCount_levelCount_, "copyFromTexture:sourceSlice:sourceLevel:toTexture:destinationSlice:destinationLevel:sliceCount:levelCount:"); _MTL_PRIVATE_DEF_SEL(copyFromTexture_toTexture_, "copyFromTexture:toTexture:"); _MTL_PRIVATE_DEF_SEL(copyIndirectCommandBuffer_sourceRange_destination_destinationIndex_, "copyIndirectCommandBuffer:sourceRange:destination:destinationIndex:"); _MTL_PRIVATE_DEF_SEL(copyParameterDataToBuffer_offset_, "copyParameterDataToBuffer:offset:"); _MTL_PRIVATE_DEF_SEL(copyStatusToBuffer_offset_, "copyStatusToBuffer:offset:"); _MTL_PRIVATE_DEF_SEL(counterSet, "counterSet"); _MTL_PRIVATE_DEF_SEL(counterSets, "counterSets"); _MTL_PRIVATE_DEF_SEL(counters, "counters"); _MTL_PRIVATE_DEF_SEL(cpuCacheMode, "cpuCacheMode"); _MTL_PRIVATE_DEF_SEL(currentAllocatedSize, "currentAllocatedSize"); _MTL_PRIVATE_DEF_SEL(curveBasis, "curveBasis"); _MTL_PRIVATE_DEF_SEL(curveEndCaps, "curveEndCaps"); _MTL_PRIVATE_DEF_SEL(curveType, "curveType"); _MTL_PRIVATE_DEF_SEL(data, "data"); _MTL_PRIVATE_DEF_SEL(dataSize, "dataSize"); _MTL_PRIVATE_DEF_SEL(dataType, "dataType"); _MTL_PRIVATE_DEF_SEL(dealloc, "dealloc"); _MTL_PRIVATE_DEF_SEL(debugLocation, "debugLocation"); _MTL_PRIVATE_DEF_SEL(debugSignposts, "debugSignposts"); _MTL_PRIVATE_DEF_SEL(defaultCaptureScope, "defaultCaptureScope"); _MTL_PRIVATE_DEF_SEL(defaultRasterSampleCount, "defaultRasterSampleCount"); _MTL_PRIVATE_DEF_SEL(depth, "depth"); _MTL_PRIVATE_DEF_SEL(depthAttachment, "depthAttachment"); _MTL_PRIVATE_DEF_SEL(depthAttachmentPixelFormat, "depthAttachmentPixelFormat"); _MTL_PRIVATE_DEF_SEL(depthCompareFunction, "depthCompareFunction"); _MTL_PRIVATE_DEF_SEL(depthFailureOperation, "depthFailureOperation"); _MTL_PRIVATE_DEF_SEL(depthPlane, "depthPlane"); _MTL_PRIVATE_DEF_SEL(depthResolveFilter, "depthResolveFilter"); _MTL_PRIVATE_DEF_SEL(depthStencilPassOperation, "depthStencilPassOperation"); _MTL_PRIVATE_DEF_SEL(descriptor, "descriptor"); _MTL_PRIVATE_DEF_SEL(destination, "destination"); _MTL_PRIVATE_DEF_SEL(destinationAlphaBlendFactor, "destinationAlphaBlendFactor"); _MTL_PRIVATE_DEF_SEL(destinationRGBBlendFactor, "destinationRGBBlendFactor"); _MTL_PRIVATE_DEF_SEL(device, "device"); _MTL_PRIVATE_DEF_SEL(didModifyRange_, "didModifyRange:"); _MTL_PRIVATE_DEF_SEL(dispatchQueue, "dispatchQueue"); _MTL_PRIVATE_DEF_SEL(dispatchThreadgroups_threadsPerThreadgroup_, "dispatchThreadgroups:threadsPerThreadgroup:"); _MTL_PRIVATE_DEF_SEL(dispatchThreadgroupsWithIndirectBuffer_indirectBufferOffset_threadsPerThreadgroup_, "dispatchThreadgroupsWithIndirectBuffer:indirectBufferOffset:threadsPerThreadgroup:"); _MTL_PRIVATE_DEF_SEL(dispatchThreads_threadsPerThreadgroup_, "dispatchThreads:threadsPerThreadgroup:"); _MTL_PRIVATE_DEF_SEL(dispatchThreadsPerTile_, "dispatchThreadsPerTile:"); _MTL_PRIVATE_DEF_SEL(dispatchType, "dispatchType"); _MTL_PRIVATE_DEF_SEL(drawIndexedPatches_patchIndexBuffer_patchIndexBufferOffset_controlPointIndexBuffer_controlPointIndexBufferOffset_indirectBuffer_indirectBufferOffset_, "drawIndexedPatches:patchIndexBuffer:patchIndexBufferOffset:controlPointIndexBuffer:controlPointIndexBufferOffset:indirectBuffer:indirectBufferOffset:"); _MTL_PRIVATE_DEF_SEL(drawIndexedPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_controlPointIndexBuffer_controlPointIndexBufferOffset_instanceCount_baseInstance_, "drawIndexedPatches:patchStart:patchCount:patchIndexBuffer:patchIndexBufferOffset:controlPointIndexBuffer:controlPointIndexBufferOffset:instanceCount:baseInstance:"); _MTL_PRIVATE_DEF_SEL(drawIndexedPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_controlPointIndexBuffer_controlPointIndexBufferOffset_instanceCount_baseInstance_tessellationFactorBuffer_tessellationFactorBufferOffset_tessellationFactorBufferInstanceStride_, "drawIndexedPatches:patchStart:patchCount:patchIndexBuffer:patchIndexBufferOffset:controlPointIndexBuffer:controlPointIndexBufferOffset:instanceCount:baseInstance:tessellationFactorBuffer:tessellationFactorBufferOffset:tessellationFactorBufferInstanceStride:"); _MTL_PRIVATE_DEF_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_, "drawIndexedPrimitives:indexCount:indexType:indexBuffer:indexBufferOffset:"); _MTL_PRIVATE_DEF_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_instanceCount_, "drawIndexedPrimitives:indexCount:indexType:indexBuffer:indexBufferOffset:instanceCount:"); _MTL_PRIVATE_DEF_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_instanceCount_baseVertex_baseInstance_, "drawIndexedPrimitives:indexCount:indexType:indexBuffer:indexBufferOffset:instanceCount:baseVertex:baseInstance:"); _MTL_PRIVATE_DEF_SEL(drawIndexedPrimitives_indexType_indexBuffer_indexBufferOffset_indirectBuffer_indirectBufferOffset_, "drawIndexedPrimitives:indexType:indexBuffer:indexBufferOffset:indirectBuffer:indirectBufferOffset:"); _MTL_PRIVATE_DEF_SEL(drawMeshThreadgroups_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_, "drawMeshThreadgroups:threadsPerObjectThreadgroup:threadsPerMeshThreadgroup:"); _MTL_PRIVATE_DEF_SEL(drawMeshThreadgroupsWithIndirectBuffer_indirectBufferOffset_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_, "drawMeshThreadgroupsWithIndirectBuffer:indirectBufferOffset:threadsPerObjectThreadgroup:threadsPerMeshThreadgroup:"); _MTL_PRIVATE_DEF_SEL(drawMeshThreads_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_, "drawMeshThreads:threadsPerObjectThreadgroup:threadsPerMeshThreadgroup:"); _MTL_PRIVATE_DEF_SEL(drawPatches_patchIndexBuffer_patchIndexBufferOffset_indirectBuffer_indirectBufferOffset_, "drawPatches:patchIndexBuffer:patchIndexBufferOffset:indirectBuffer:indirectBufferOffset:"); _MTL_PRIVATE_DEF_SEL(drawPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_instanceCount_baseInstance_, "drawPatches:patchStart:patchCount:patchIndexBuffer:patchIndexBufferOffset:instanceCount:baseInstance:"); _MTL_PRIVATE_DEF_SEL(drawPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_instanceCount_baseInstance_tessellationFactorBuffer_tessellationFactorBufferOffset_tessellationFactorBufferInstanceStride_, "drawPatches:patchStart:patchCount:patchIndexBuffer:patchIndexBufferOffset:instanceCount:baseInstance:tessellationFactorBuffer:tessellationFactorBufferOffset:tessellationFactorBufferInstanceStride:"); _MTL_PRIVATE_DEF_SEL(drawPrimitives_indirectBuffer_indirectBufferOffset_, "drawPrimitives:indirectBuffer:indirectBufferOffset:"); _MTL_PRIVATE_DEF_SEL(drawPrimitives_vertexStart_vertexCount_, "drawPrimitives:vertexStart:vertexCount:"); _MTL_PRIVATE_DEF_SEL(drawPrimitives_vertexStart_vertexCount_instanceCount_, "drawPrimitives:vertexStart:vertexCount:instanceCount:"); _MTL_PRIVATE_DEF_SEL(drawPrimitives_vertexStart_vertexCount_instanceCount_baseInstance_, "drawPrimitives:vertexStart:vertexCount:instanceCount:baseInstance:"); _MTL_PRIVATE_DEF_SEL(drawableID, "drawableID"); _MTL_PRIVATE_DEF_SEL(elementArrayType, "elementArrayType"); _MTL_PRIVATE_DEF_SEL(elementIsArgumentBuffer, "elementIsArgumentBuffer"); _MTL_PRIVATE_DEF_SEL(elementPointerType, "elementPointerType"); _MTL_PRIVATE_DEF_SEL(elementStructType, "elementStructType"); _MTL_PRIVATE_DEF_SEL(elementTextureReferenceType, "elementTextureReferenceType"); _MTL_PRIVATE_DEF_SEL(elementType, "elementType"); _MTL_PRIVATE_DEF_SEL(enableLogging, "enableLogging"); _MTL_PRIVATE_DEF_SEL(encodeSignalEvent_value_, "encodeSignalEvent:value:"); _MTL_PRIVATE_DEF_SEL(encodeWaitForEvent_value_, "encodeWaitForEvent:value:"); _MTL_PRIVATE_DEF_SEL(encodedLength, "encodedLength"); _MTL_PRIVATE_DEF_SEL(encoderLabel, "encoderLabel"); _MTL_PRIVATE_DEF_SEL(endEncoding, "endEncoding"); _MTL_PRIVATE_DEF_SEL(endOfEncoderSampleIndex, "endOfEncoderSampleIndex"); _MTL_PRIVATE_DEF_SEL(endOfFragmentSampleIndex, "endOfFragmentSampleIndex"); _MTL_PRIVATE_DEF_SEL(endOfVertexSampleIndex, "endOfVertexSampleIndex"); _MTL_PRIVATE_DEF_SEL(endResidency, "endResidency"); _MTL_PRIVATE_DEF_SEL(enqueue, "enqueue"); _MTL_PRIVATE_DEF_SEL(enqueueBarrier, "enqueueBarrier"); _MTL_PRIVATE_DEF_SEL(error, "error"); _MTL_PRIVATE_DEF_SEL(errorOptions, "errorOptions"); _MTL_PRIVATE_DEF_SEL(errorState, "errorState"); _MTL_PRIVATE_DEF_SEL(executeCommandsInBuffer_indirectBuffer_indirectBufferOffset_, "executeCommandsInBuffer:indirectBuffer:indirectBufferOffset:"); _MTL_PRIVATE_DEF_SEL(executeCommandsInBuffer_withRange_, "executeCommandsInBuffer:withRange:"); _MTL_PRIVATE_DEF_SEL(fastMathEnabled, "fastMathEnabled"); _MTL_PRIVATE_DEF_SEL(fillBuffer_range_value_, "fillBuffer:range:value:"); _MTL_PRIVATE_DEF_SEL(firstMipmapInTail, "firstMipmapInTail"); _MTL_PRIVATE_DEF_SEL(format, "format"); _MTL_PRIVATE_DEF_SEL(fragmentAdditionalBinaryFunctions, "fragmentAdditionalBinaryFunctions"); _MTL_PRIVATE_DEF_SEL(fragmentArguments, "fragmentArguments"); _MTL_PRIVATE_DEF_SEL(fragmentBindings, "fragmentBindings"); _MTL_PRIVATE_DEF_SEL(fragmentBuffers, "fragmentBuffers"); _MTL_PRIVATE_DEF_SEL(fragmentFunction, "fragmentFunction"); _MTL_PRIVATE_DEF_SEL(fragmentLinkedFunctions, "fragmentLinkedFunctions"); _MTL_PRIVATE_DEF_SEL(fragmentPreloadedLibraries, "fragmentPreloadedLibraries"); _MTL_PRIVATE_DEF_SEL(frontFaceStencil, "frontFaceStencil"); _MTL_PRIVATE_DEF_SEL(function, "function"); _MTL_PRIVATE_DEF_SEL(functionConstantsDictionary, "functionConstantsDictionary"); _MTL_PRIVATE_DEF_SEL(functionCount, "functionCount"); _MTL_PRIVATE_DEF_SEL(functionDescriptor, "functionDescriptor"); _MTL_PRIVATE_DEF_SEL(functionGraphs, "functionGraphs"); _MTL_PRIVATE_DEF_SEL(functionHandleWithFunction_, "functionHandleWithFunction:"); _MTL_PRIVATE_DEF_SEL(functionHandleWithFunction_stage_, "functionHandleWithFunction:stage:"); _MTL_PRIVATE_DEF_SEL(functionName, "functionName"); _MTL_PRIVATE_DEF_SEL(functionNames, "functionNames"); _MTL_PRIVATE_DEF_SEL(functionType, "functionType"); _MTL_PRIVATE_DEF_SEL(functions, "functions"); _MTL_PRIVATE_DEF_SEL(generateMipmapsForTexture_, "generateMipmapsForTexture:"); _MTL_PRIVATE_DEF_SEL(geometryDescriptors, "geometryDescriptors"); _MTL_PRIVATE_DEF_SEL(getBytes_bytesPerRow_bytesPerImage_fromRegion_mipmapLevel_slice_, "getBytes:bytesPerRow:bytesPerImage:fromRegion:mipmapLevel:slice:"); _MTL_PRIVATE_DEF_SEL(getBytes_bytesPerRow_fromRegion_mipmapLevel_, "getBytes:bytesPerRow:fromRegion:mipmapLevel:"); _MTL_PRIVATE_DEF_SEL(getDefaultSamplePositions_count_, "getDefaultSamplePositions:count:"); _MTL_PRIVATE_DEF_SEL(getSamplePositions_count_, "getSamplePositions:count:"); _MTL_PRIVATE_DEF_SEL(getTextureAccessCounters_region_mipLevel_slice_resetCounters_countersBuffer_countersBufferOffset_, "getTextureAccessCounters:region:mipLevel:slice:resetCounters:countersBuffer:countersBufferOffset:"); _MTL_PRIVATE_DEF_SEL(gpuAddress, "gpuAddress"); _MTL_PRIVATE_DEF_SEL(gpuResourceID, "gpuResourceID"); _MTL_PRIVATE_DEF_SEL(groups, "groups"); _MTL_PRIVATE_DEF_SEL(hasUnifiedMemory, "hasUnifiedMemory"); _MTL_PRIVATE_DEF_SEL(hazardTrackingMode, "hazardTrackingMode"); _MTL_PRIVATE_DEF_SEL(heap, "heap"); _MTL_PRIVATE_DEF_SEL(heapAccelerationStructureSizeAndAlignWithDescriptor_, "heapAccelerationStructureSizeAndAlignWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(heapAccelerationStructureSizeAndAlignWithSize_, "heapAccelerationStructureSizeAndAlignWithSize:"); _MTL_PRIVATE_DEF_SEL(heapBufferSizeAndAlignWithLength_options_, "heapBufferSizeAndAlignWithLength:options:"); _MTL_PRIVATE_DEF_SEL(heapOffset, "heapOffset"); _MTL_PRIVATE_DEF_SEL(heapTextureSizeAndAlignWithDescriptor_, "heapTextureSizeAndAlignWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(height, "height"); _MTL_PRIVATE_DEF_SEL(horizontal, "horizontal"); _MTL_PRIVATE_DEF_SEL(horizontalSampleStorage, "horizontalSampleStorage"); _MTL_PRIVATE_DEF_SEL(imageblockMemoryLengthForDimensions_, "imageblockMemoryLengthForDimensions:"); _MTL_PRIVATE_DEF_SEL(imageblockSampleLength, "imageblockSampleLength"); _MTL_PRIVATE_DEF_SEL(index, "index"); _MTL_PRIVATE_DEF_SEL(indexBuffer, "indexBuffer"); _MTL_PRIVATE_DEF_SEL(indexBufferIndex, "indexBufferIndex"); _MTL_PRIVATE_DEF_SEL(indexBufferOffset, "indexBufferOffset"); _MTL_PRIVATE_DEF_SEL(indexType, "indexType"); _MTL_PRIVATE_DEF_SEL(indirectComputeCommandAtIndex_, "indirectComputeCommandAtIndex:"); _MTL_PRIVATE_DEF_SEL(indirectRenderCommandAtIndex_, "indirectRenderCommandAtIndex:"); _MTL_PRIVATE_DEF_SEL(inheritBuffers, "inheritBuffers"); _MTL_PRIVATE_DEF_SEL(inheritPipelineState, "inheritPipelineState"); _MTL_PRIVATE_DEF_SEL(init, "init"); _MTL_PRIVATE_DEF_SEL(initWithArgumentIndex_, "initWithArgumentIndex:"); _MTL_PRIVATE_DEF_SEL(initWithDispatchQueue_, "initWithDispatchQueue:"); _MTL_PRIVATE_DEF_SEL(initWithFunctionName_nodes_outputNode_attributes_, "initWithFunctionName:nodes:outputNode:attributes:"); _MTL_PRIVATE_DEF_SEL(initWithName_arguments_controlDependencies_, "initWithName:arguments:controlDependencies:"); _MTL_PRIVATE_DEF_SEL(initWithSampleCount_, "initWithSampleCount:"); _MTL_PRIVATE_DEF_SEL(initWithSampleCount_horizontal_vertical_, "initWithSampleCount:horizontal:vertical:"); _MTL_PRIVATE_DEF_SEL(initialCapacity, "initialCapacity"); _MTL_PRIVATE_DEF_SEL(inputPrimitiveTopology, "inputPrimitiveTopology"); _MTL_PRIVATE_DEF_SEL(insertDebugCaptureBoundary, "insertDebugCaptureBoundary"); _MTL_PRIVATE_DEF_SEL(insertDebugSignpost_, "insertDebugSignpost:"); _MTL_PRIVATE_DEF_SEL(insertLibraries, "insertLibraries"); _MTL_PRIVATE_DEF_SEL(installName, "installName"); _MTL_PRIVATE_DEF_SEL(instanceCount, "instanceCount"); _MTL_PRIVATE_DEF_SEL(instanceCountBuffer, "instanceCountBuffer"); _MTL_PRIVATE_DEF_SEL(instanceCountBufferOffset, "instanceCountBufferOffset"); _MTL_PRIVATE_DEF_SEL(instanceDescriptorBuffer, "instanceDescriptorBuffer"); _MTL_PRIVATE_DEF_SEL(instanceDescriptorBufferOffset, "instanceDescriptorBufferOffset"); _MTL_PRIVATE_DEF_SEL(instanceDescriptorStride, "instanceDescriptorStride"); _MTL_PRIVATE_DEF_SEL(instanceDescriptorType, "instanceDescriptorType"); _MTL_PRIVATE_DEF_SEL(instanceTransformationMatrixLayout, "instanceTransformationMatrixLayout"); _MTL_PRIVATE_DEF_SEL(instancedAccelerationStructures, "instancedAccelerationStructures"); _MTL_PRIVATE_DEF_SEL(intersectionFunctionTableDescriptor, "intersectionFunctionTableDescriptor"); _MTL_PRIVATE_DEF_SEL(intersectionFunctionTableOffset, "intersectionFunctionTableOffset"); _MTL_PRIVATE_DEF_SEL(iosurface, "iosurface"); _MTL_PRIVATE_DEF_SEL(iosurfacePlane, "iosurfacePlane"); _MTL_PRIVATE_DEF_SEL(isActive, "isActive"); _MTL_PRIVATE_DEF_SEL(isAliasable, "isAliasable"); _MTL_PRIVATE_DEF_SEL(isAlphaToCoverageEnabled, "isAlphaToCoverageEnabled"); _MTL_PRIVATE_DEF_SEL(isAlphaToOneEnabled, "isAlphaToOneEnabled"); _MTL_PRIVATE_DEF_SEL(isArgument, "isArgument"); _MTL_PRIVATE_DEF_SEL(isBlendingEnabled, "isBlendingEnabled"); _MTL_PRIVATE_DEF_SEL(isCapturing, "isCapturing"); _MTL_PRIVATE_DEF_SEL(isDepth24Stencil8PixelFormatSupported, "isDepth24Stencil8PixelFormatSupported"); _MTL_PRIVATE_DEF_SEL(isDepthTexture, "isDepthTexture"); _MTL_PRIVATE_DEF_SEL(isDepthWriteEnabled, "isDepthWriteEnabled"); _MTL_PRIVATE_DEF_SEL(isFramebufferOnly, "isFramebufferOnly"); _MTL_PRIVATE_DEF_SEL(isHeadless, "isHeadless"); _MTL_PRIVATE_DEF_SEL(isLowPower, "isLowPower"); _MTL_PRIVATE_DEF_SEL(isPatchControlPointData, "isPatchControlPointData"); _MTL_PRIVATE_DEF_SEL(isPatchData, "isPatchData"); _MTL_PRIVATE_DEF_SEL(isRasterizationEnabled, "isRasterizationEnabled"); _MTL_PRIVATE_DEF_SEL(isRemovable, "isRemovable"); _MTL_PRIVATE_DEF_SEL(isShareable, "isShareable"); _MTL_PRIVATE_DEF_SEL(isSparse, "isSparse"); _MTL_PRIVATE_DEF_SEL(isTessellationFactorScaleEnabled, "isTessellationFactorScaleEnabled"); _MTL_PRIVATE_DEF_SEL(isUsed, "isUsed"); _MTL_PRIVATE_DEF_SEL(kernelEndTime, "kernelEndTime"); _MTL_PRIVATE_DEF_SEL(kernelStartTime, "kernelStartTime"); _MTL_PRIVATE_DEF_SEL(label, "label"); _MTL_PRIVATE_DEF_SEL(languageVersion, "languageVersion"); _MTL_PRIVATE_DEF_SEL(layerAtIndex_, "layerAtIndex:"); _MTL_PRIVATE_DEF_SEL(layerCount, "layerCount"); _MTL_PRIVATE_DEF_SEL(layers, "layers"); _MTL_PRIVATE_DEF_SEL(layouts, "layouts"); _MTL_PRIVATE_DEF_SEL(length, "length"); _MTL_PRIVATE_DEF_SEL(level, "level"); _MTL_PRIVATE_DEF_SEL(libraries, "libraries"); _MTL_PRIVATE_DEF_SEL(libraryType, "libraryType"); _MTL_PRIVATE_DEF_SEL(line, "line"); _MTL_PRIVATE_DEF_SEL(linkedFunctions, "linkedFunctions"); _MTL_PRIVATE_DEF_SEL(loadAction, "loadAction"); _MTL_PRIVATE_DEF_SEL(loadBuffer_offset_size_sourceHandle_sourceHandleOffset_, "loadBuffer:offset:size:sourceHandle:sourceHandleOffset:"); _MTL_PRIVATE_DEF_SEL(loadBytes_size_sourceHandle_sourceHandleOffset_, "loadBytes:size:sourceHandle:sourceHandleOffset:"); _MTL_PRIVATE_DEF_SEL(loadTexture_slice_level_size_sourceBytesPerRow_sourceBytesPerImage_destinationOrigin_sourceHandle_sourceHandleOffset_, "loadTexture:slice:level:size:sourceBytesPerRow:sourceBytesPerImage:destinationOrigin:sourceHandle:sourceHandleOffset:"); _MTL_PRIVATE_DEF_SEL(location, "location"); _MTL_PRIVATE_DEF_SEL(locationNumber, "locationNumber"); _MTL_PRIVATE_DEF_SEL(lodAverage, "lodAverage"); _MTL_PRIVATE_DEF_SEL(lodMaxClamp, "lodMaxClamp"); _MTL_PRIVATE_DEF_SEL(lodMinClamp, "lodMinClamp"); _MTL_PRIVATE_DEF_SEL(logState, "logState"); _MTL_PRIVATE_DEF_SEL(logs, "logs"); _MTL_PRIVATE_DEF_SEL(magFilter, "magFilter"); _MTL_PRIVATE_DEF_SEL(makeAliasable, "makeAliasable"); _MTL_PRIVATE_DEF_SEL(mapPhysicalToScreenCoordinates_forLayer_, "mapPhysicalToScreenCoordinates:forLayer:"); _MTL_PRIVATE_DEF_SEL(mapScreenToPhysicalCoordinates_forLayer_, "mapScreenToPhysicalCoordinates:forLayer:"); _MTL_PRIVATE_DEF_SEL(mathFloatingPointFunctions, "mathFloatingPointFunctions"); _MTL_PRIVATE_DEF_SEL(mathMode, "mathMode"); _MTL_PRIVATE_DEF_SEL(maxAnisotropy, "maxAnisotropy"); _MTL_PRIVATE_DEF_SEL(maxArgumentBufferSamplerCount, "maxArgumentBufferSamplerCount"); _MTL_PRIVATE_DEF_SEL(maxAvailableSizeWithAlignment_, "maxAvailableSizeWithAlignment:"); _MTL_PRIVATE_DEF_SEL(maxBufferLength, "maxBufferLength"); _MTL_PRIVATE_DEF_SEL(maxCallStackDepth, "maxCallStackDepth"); _MTL_PRIVATE_DEF_SEL(maxCommandBufferCount, "maxCommandBufferCount"); _MTL_PRIVATE_DEF_SEL(maxCommandsInFlight, "maxCommandsInFlight"); _MTL_PRIVATE_DEF_SEL(maxFragmentBufferBindCount, "maxFragmentBufferBindCount"); _MTL_PRIVATE_DEF_SEL(maxFragmentCallStackDepth, "maxFragmentCallStackDepth"); _MTL_PRIVATE_DEF_SEL(maxInstanceCount, "maxInstanceCount"); _MTL_PRIVATE_DEF_SEL(maxKernelBufferBindCount, "maxKernelBufferBindCount"); _MTL_PRIVATE_DEF_SEL(maxKernelThreadgroupMemoryBindCount, "maxKernelThreadgroupMemoryBindCount"); _MTL_PRIVATE_DEF_SEL(maxMeshBufferBindCount, "maxMeshBufferBindCount"); _MTL_PRIVATE_DEF_SEL(maxMotionTransformCount, "maxMotionTransformCount"); _MTL_PRIVATE_DEF_SEL(maxObjectBufferBindCount, "maxObjectBufferBindCount"); _MTL_PRIVATE_DEF_SEL(maxObjectThreadgroupMemoryBindCount, "maxObjectThreadgroupMemoryBindCount"); _MTL_PRIVATE_DEF_SEL(maxSampleCount, "maxSampleCount"); _MTL_PRIVATE_DEF_SEL(maxTessellationFactor, "maxTessellationFactor"); _MTL_PRIVATE_DEF_SEL(maxThreadgroupMemoryLength, "maxThreadgroupMemoryLength"); _MTL_PRIVATE_DEF_SEL(maxThreadsPerThreadgroup, "maxThreadsPerThreadgroup"); _MTL_PRIVATE_DEF_SEL(maxTotalThreadgroupsPerMeshGrid, "maxTotalThreadgroupsPerMeshGrid"); _MTL_PRIVATE_DEF_SEL(maxTotalThreadsPerMeshThreadgroup, "maxTotalThreadsPerMeshThreadgroup"); _MTL_PRIVATE_DEF_SEL(maxTotalThreadsPerObjectThreadgroup, "maxTotalThreadsPerObjectThreadgroup"); _MTL_PRIVATE_DEF_SEL(maxTotalThreadsPerThreadgroup, "maxTotalThreadsPerThreadgroup"); _MTL_PRIVATE_DEF_SEL(maxTransferRate, "maxTransferRate"); _MTL_PRIVATE_DEF_SEL(maxVertexAmplificationCount, "maxVertexAmplificationCount"); _MTL_PRIVATE_DEF_SEL(maxVertexBufferBindCount, "maxVertexBufferBindCount"); _MTL_PRIVATE_DEF_SEL(maxVertexCallStackDepth, "maxVertexCallStackDepth"); _MTL_PRIVATE_DEF_SEL(maximumConcurrentCompilationTaskCount, "maximumConcurrentCompilationTaskCount"); _MTL_PRIVATE_DEF_SEL(memberByName_, "memberByName:"); _MTL_PRIVATE_DEF_SEL(members, "members"); _MTL_PRIVATE_DEF_SEL(memoryBarrierWithResources_count_, "memoryBarrierWithResources:count:"); _MTL_PRIVATE_DEF_SEL(memoryBarrierWithResources_count_afterStages_beforeStages_, "memoryBarrierWithResources:count:afterStages:beforeStages:"); _MTL_PRIVATE_DEF_SEL(memoryBarrierWithScope_, "memoryBarrierWithScope:"); _MTL_PRIVATE_DEF_SEL(memoryBarrierWithScope_afterStages_beforeStages_, "memoryBarrierWithScope:afterStages:beforeStages:"); _MTL_PRIVATE_DEF_SEL(meshBindings, "meshBindings"); _MTL_PRIVATE_DEF_SEL(meshBuffers, "meshBuffers"); _MTL_PRIVATE_DEF_SEL(meshFunction, "meshFunction"); _MTL_PRIVATE_DEF_SEL(meshLinkedFunctions, "meshLinkedFunctions"); _MTL_PRIVATE_DEF_SEL(meshThreadExecutionWidth, "meshThreadExecutionWidth"); _MTL_PRIVATE_DEF_SEL(meshThreadgroupSizeIsMultipleOfThreadExecutionWidth, "meshThreadgroupSizeIsMultipleOfThreadExecutionWidth"); _MTL_PRIVATE_DEF_SEL(minFilter, "minFilter"); _MTL_PRIVATE_DEF_SEL(minimumLinearTextureAlignmentForPixelFormat_, "minimumLinearTextureAlignmentForPixelFormat:"); _MTL_PRIVATE_DEF_SEL(minimumTextureBufferAlignmentForPixelFormat_, "minimumTextureBufferAlignmentForPixelFormat:"); _MTL_PRIVATE_DEF_SEL(mipFilter, "mipFilter"); _MTL_PRIVATE_DEF_SEL(mipmapLevelCount, "mipmapLevelCount"); _MTL_PRIVATE_DEF_SEL(motionEndBorderMode, "motionEndBorderMode"); _MTL_PRIVATE_DEF_SEL(motionEndTime, "motionEndTime"); _MTL_PRIVATE_DEF_SEL(motionKeyframeCount, "motionKeyframeCount"); _MTL_PRIVATE_DEF_SEL(motionStartBorderMode, "motionStartBorderMode"); _MTL_PRIVATE_DEF_SEL(motionStartTime, "motionStartTime"); _MTL_PRIVATE_DEF_SEL(motionTransformBuffer, "motionTransformBuffer"); _MTL_PRIVATE_DEF_SEL(motionTransformBufferOffset, "motionTransformBufferOffset"); _MTL_PRIVATE_DEF_SEL(motionTransformCount, "motionTransformCount"); _MTL_PRIVATE_DEF_SEL(motionTransformCountBuffer, "motionTransformCountBuffer"); _MTL_PRIVATE_DEF_SEL(motionTransformCountBufferOffset, "motionTransformCountBufferOffset"); _MTL_PRIVATE_DEF_SEL(motionTransformStride, "motionTransformStride"); _MTL_PRIVATE_DEF_SEL(motionTransformType, "motionTransformType"); _MTL_PRIVATE_DEF_SEL(moveTextureMappingsFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_, "moveTextureMappingsFromTexture:sourceSlice:sourceLevel:sourceOrigin:sourceSize:toTexture:destinationSlice:destinationLevel:destinationOrigin:"); _MTL_PRIVATE_DEF_SEL(mutability, "mutability"); _MTL_PRIVATE_DEF_SEL(name, "name"); _MTL_PRIVATE_DEF_SEL(newAccelerationStructureWithDescriptor_, "newAccelerationStructureWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(newAccelerationStructureWithDescriptor_offset_, "newAccelerationStructureWithDescriptor:offset:"); _MTL_PRIVATE_DEF_SEL(newAccelerationStructureWithSize_, "newAccelerationStructureWithSize:"); _MTL_PRIVATE_DEF_SEL(newAccelerationStructureWithSize_offset_, "newAccelerationStructureWithSize:offset:"); _MTL_PRIVATE_DEF_SEL(newArgumentEncoderForBufferAtIndex_, "newArgumentEncoderForBufferAtIndex:"); _MTL_PRIVATE_DEF_SEL(newArgumentEncoderWithArguments_, "newArgumentEncoderWithArguments:"); _MTL_PRIVATE_DEF_SEL(newArgumentEncoderWithBufferBinding_, "newArgumentEncoderWithBufferBinding:"); _MTL_PRIVATE_DEF_SEL(newArgumentEncoderWithBufferIndex_, "newArgumentEncoderWithBufferIndex:"); _MTL_PRIVATE_DEF_SEL(newArgumentEncoderWithBufferIndex_reflection_, "newArgumentEncoderWithBufferIndex:reflection:"); _MTL_PRIVATE_DEF_SEL(newBinaryArchiveWithDescriptor_error_, "newBinaryArchiveWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(newBufferWithBytes_length_options_, "newBufferWithBytes:length:options:"); _MTL_PRIVATE_DEF_SEL(newBufferWithBytesNoCopy_length_options_deallocator_, "newBufferWithBytesNoCopy:length:options:deallocator:"); _MTL_PRIVATE_DEF_SEL(newBufferWithLength_options_, "newBufferWithLength:options:"); _MTL_PRIVATE_DEF_SEL(newBufferWithLength_options_offset_, "newBufferWithLength:options:offset:"); _MTL_PRIVATE_DEF_SEL(newCaptureScopeWithCommandQueue_, "newCaptureScopeWithCommandQueue:"); _MTL_PRIVATE_DEF_SEL(newCaptureScopeWithDevice_, "newCaptureScopeWithDevice:"); _MTL_PRIVATE_DEF_SEL(newCommandQueue, "newCommandQueue"); _MTL_PRIVATE_DEF_SEL(newCommandQueueWithDescriptor_, "newCommandQueueWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(newCommandQueueWithMaxCommandBufferCount_, "newCommandQueueWithMaxCommandBufferCount:"); _MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithAdditionalBinaryFunctions_error_, "newComputePipelineStateWithAdditionalBinaryFunctions:error:"); _MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithDescriptor_options_completionHandler_, "newComputePipelineStateWithDescriptor:options:completionHandler:"); _MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithDescriptor_options_reflection_error_, "newComputePipelineStateWithDescriptor:options:reflection:error:"); _MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithFunction_completionHandler_, "newComputePipelineStateWithFunction:completionHandler:"); _MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithFunction_error_, "newComputePipelineStateWithFunction:error:"); _MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithFunction_options_completionHandler_, "newComputePipelineStateWithFunction:options:completionHandler:"); _MTL_PRIVATE_DEF_SEL(newComputePipelineStateWithFunction_options_reflection_error_, "newComputePipelineStateWithFunction:options:reflection:error:"); _MTL_PRIVATE_DEF_SEL(newCounterSampleBufferWithDescriptor_error_, "newCounterSampleBufferWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(newDefaultLibrary, "newDefaultLibrary"); _MTL_PRIVATE_DEF_SEL(newDefaultLibraryWithBundle_error_, "newDefaultLibraryWithBundle:error:"); _MTL_PRIVATE_DEF_SEL(newDepthStencilStateWithDescriptor_, "newDepthStencilStateWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(newDynamicLibrary_error_, "newDynamicLibrary:error:"); _MTL_PRIVATE_DEF_SEL(newDynamicLibraryWithURL_error_, "newDynamicLibraryWithURL:error:"); _MTL_PRIVATE_DEF_SEL(newEvent, "newEvent"); _MTL_PRIVATE_DEF_SEL(newFence, "newFence"); _MTL_PRIVATE_DEF_SEL(newFunctionWithDescriptor_completionHandler_, "newFunctionWithDescriptor:completionHandler:"); _MTL_PRIVATE_DEF_SEL(newFunctionWithDescriptor_error_, "newFunctionWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(newFunctionWithName_, "newFunctionWithName:"); _MTL_PRIVATE_DEF_SEL(newFunctionWithName_constantValues_completionHandler_, "newFunctionWithName:constantValues:completionHandler:"); _MTL_PRIVATE_DEF_SEL(newFunctionWithName_constantValues_error_, "newFunctionWithName:constantValues:error:"); _MTL_PRIVATE_DEF_SEL(newHeapWithDescriptor_, "newHeapWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(newIOCommandQueueWithDescriptor_error_, "newIOCommandQueueWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(newIOFileHandleWithURL_compressionMethod_error_, "newIOFileHandleWithURL:compressionMethod:error:"); _MTL_PRIVATE_DEF_SEL(newIOFileHandleWithURL_error_, "newIOFileHandleWithURL:error:"); _MTL_PRIVATE_DEF_SEL(newIOHandleWithURL_compressionMethod_error_, "newIOHandleWithURL:compressionMethod:error:"); _MTL_PRIVATE_DEF_SEL(newIOHandleWithURL_error_, "newIOHandleWithURL:error:"); _MTL_PRIVATE_DEF_SEL(newIndirectCommandBufferWithDescriptor_maxCommandCount_options_, "newIndirectCommandBufferWithDescriptor:maxCommandCount:options:"); _MTL_PRIVATE_DEF_SEL(newIntersectionFunctionTableWithDescriptor_, "newIntersectionFunctionTableWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(newIntersectionFunctionTableWithDescriptor_stage_, "newIntersectionFunctionTableWithDescriptor:stage:"); _MTL_PRIVATE_DEF_SEL(newIntersectionFunctionWithDescriptor_completionHandler_, "newIntersectionFunctionWithDescriptor:completionHandler:"); _MTL_PRIVATE_DEF_SEL(newIntersectionFunctionWithDescriptor_error_, "newIntersectionFunctionWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(newLibraryWithData_error_, "newLibraryWithData:error:"); _MTL_PRIVATE_DEF_SEL(newLibraryWithFile_error_, "newLibraryWithFile:error:"); _MTL_PRIVATE_DEF_SEL(newLibraryWithSource_options_completionHandler_, "newLibraryWithSource:options:completionHandler:"); _MTL_PRIVATE_DEF_SEL(newLibraryWithSource_options_error_, "newLibraryWithSource:options:error:"); _MTL_PRIVATE_DEF_SEL(newLibraryWithStitchedDescriptor_completionHandler_, "newLibraryWithStitchedDescriptor:completionHandler:"); _MTL_PRIVATE_DEF_SEL(newLibraryWithStitchedDescriptor_error_, "newLibraryWithStitchedDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(newLibraryWithURL_error_, "newLibraryWithURL:error:"); _MTL_PRIVATE_DEF_SEL(newLogStateWithDescriptor_error_, "newLogStateWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(newRasterizationRateMapWithDescriptor_, "newRasterizationRateMapWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(newRemoteBufferViewForDevice_, "newRemoteBufferViewForDevice:"); _MTL_PRIVATE_DEF_SEL(newRemoteTextureViewForDevice_, "newRemoteTextureViewForDevice:"); _MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithAdditionalBinaryFunctions_error_, "newRenderPipelineStateWithAdditionalBinaryFunctions:error:"); _MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithDescriptor_completionHandler_, "newRenderPipelineStateWithDescriptor:completionHandler:"); _MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithDescriptor_error_, "newRenderPipelineStateWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithDescriptor_options_completionHandler_, "newRenderPipelineStateWithDescriptor:options:completionHandler:"); _MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithDescriptor_options_reflection_error_, "newRenderPipelineStateWithDescriptor:options:reflection:error:"); _MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithMeshDescriptor_options_completionHandler_, "newRenderPipelineStateWithMeshDescriptor:options:completionHandler:"); _MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithMeshDescriptor_options_reflection_error_, "newRenderPipelineStateWithMeshDescriptor:options:reflection:error:"); _MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithTileDescriptor_options_completionHandler_, "newRenderPipelineStateWithTileDescriptor:options:completionHandler:"); _MTL_PRIVATE_DEF_SEL(newRenderPipelineStateWithTileDescriptor_options_reflection_error_, "newRenderPipelineStateWithTileDescriptor:options:reflection:error:"); _MTL_PRIVATE_DEF_SEL(newResidencySetWithDescriptor_error_, "newResidencySetWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(newSamplerStateWithDescriptor_, "newSamplerStateWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(newScratchBufferWithMinimumSize_, "newScratchBufferWithMinimumSize:"); _MTL_PRIVATE_DEF_SEL(newSharedEvent, "newSharedEvent"); _MTL_PRIVATE_DEF_SEL(newSharedEventHandle, "newSharedEventHandle"); _MTL_PRIVATE_DEF_SEL(newSharedEventWithHandle_, "newSharedEventWithHandle:"); _MTL_PRIVATE_DEF_SEL(newSharedTextureHandle, "newSharedTextureHandle"); _MTL_PRIVATE_DEF_SEL(newSharedTextureWithDescriptor_, "newSharedTextureWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(newSharedTextureWithHandle_, "newSharedTextureWithHandle:"); _MTL_PRIVATE_DEF_SEL(newTextureViewWithPixelFormat_, "newTextureViewWithPixelFormat:"); _MTL_PRIVATE_DEF_SEL(newTextureViewWithPixelFormat_textureType_levels_slices_, "newTextureViewWithPixelFormat:textureType:levels:slices:"); _MTL_PRIVATE_DEF_SEL(newTextureViewWithPixelFormat_textureType_levels_slices_swizzle_, "newTextureViewWithPixelFormat:textureType:levels:slices:swizzle:"); _MTL_PRIVATE_DEF_SEL(newTextureWithDescriptor_, "newTextureWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(newTextureWithDescriptor_iosurface_plane_, "newTextureWithDescriptor:iosurface:plane:"); _MTL_PRIVATE_DEF_SEL(newTextureWithDescriptor_offset_, "newTextureWithDescriptor:offset:"); _MTL_PRIVATE_DEF_SEL(newTextureWithDescriptor_offset_bytesPerRow_, "newTextureWithDescriptor:offset:bytesPerRow:"); _MTL_PRIVATE_DEF_SEL(newVisibleFunctionTableWithDescriptor_, "newVisibleFunctionTableWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(newVisibleFunctionTableWithDescriptor_stage_, "newVisibleFunctionTableWithDescriptor:stage:"); _MTL_PRIVATE_DEF_SEL(nodes, "nodes"); _MTL_PRIVATE_DEF_SEL(normalizedCoordinates, "normalizedCoordinates"); _MTL_PRIVATE_DEF_SEL(notifyListener_atValue_block_, "notifyListener:atValue:block:"); _MTL_PRIVATE_DEF_SEL(objectAtIndexedSubscript_, "objectAtIndexedSubscript:"); _MTL_PRIVATE_DEF_SEL(objectBindings, "objectBindings"); _MTL_PRIVATE_DEF_SEL(objectBuffers, "objectBuffers"); _MTL_PRIVATE_DEF_SEL(objectFunction, "objectFunction"); _MTL_PRIVATE_DEF_SEL(objectLinkedFunctions, "objectLinkedFunctions"); _MTL_PRIVATE_DEF_SEL(objectPayloadAlignment, "objectPayloadAlignment"); _MTL_PRIVATE_DEF_SEL(objectPayloadDataSize, "objectPayloadDataSize"); _MTL_PRIVATE_DEF_SEL(objectThreadExecutionWidth, "objectThreadExecutionWidth"); _MTL_PRIVATE_DEF_SEL(objectThreadgroupSizeIsMultipleOfThreadExecutionWidth, "objectThreadgroupSizeIsMultipleOfThreadExecutionWidth"); _MTL_PRIVATE_DEF_SEL(offset, "offset"); _MTL_PRIVATE_DEF_SEL(opaque, "opaque"); _MTL_PRIVATE_DEF_SEL(optimizationLevel, "optimizationLevel"); _MTL_PRIVATE_DEF_SEL(optimizeContentsForCPUAccess_, "optimizeContentsForCPUAccess:"); _MTL_PRIVATE_DEF_SEL(optimizeContentsForCPUAccess_slice_level_, "optimizeContentsForCPUAccess:slice:level:"); _MTL_PRIVATE_DEF_SEL(optimizeContentsForGPUAccess_, "optimizeContentsForGPUAccess:"); _MTL_PRIVATE_DEF_SEL(optimizeContentsForGPUAccess_slice_level_, "optimizeContentsForGPUAccess:slice:level:"); _MTL_PRIVATE_DEF_SEL(optimizeIndirectCommandBuffer_withRange_, "optimizeIndirectCommandBuffer:withRange:"); _MTL_PRIVATE_DEF_SEL(options, "options"); _MTL_PRIVATE_DEF_SEL(outputNode, "outputNode"); _MTL_PRIVATE_DEF_SEL(outputURL, "outputURL"); _MTL_PRIVATE_DEF_SEL(parallelRenderCommandEncoderWithDescriptor_, "parallelRenderCommandEncoderWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(parameterBufferSizeAndAlign, "parameterBufferSizeAndAlign"); _MTL_PRIVATE_DEF_SEL(parentRelativeLevel, "parentRelativeLevel"); _MTL_PRIVATE_DEF_SEL(parentRelativeSlice, "parentRelativeSlice"); _MTL_PRIVATE_DEF_SEL(parentTexture, "parentTexture"); _MTL_PRIVATE_DEF_SEL(patchControlPointCount, "patchControlPointCount"); _MTL_PRIVATE_DEF_SEL(patchType, "patchType"); _MTL_PRIVATE_DEF_SEL(payloadMemoryLength, "payloadMemoryLength"); _MTL_PRIVATE_DEF_SEL(peerCount, "peerCount"); _MTL_PRIVATE_DEF_SEL(peerGroupID, "peerGroupID"); _MTL_PRIVATE_DEF_SEL(peerIndex, "peerIndex"); _MTL_PRIVATE_DEF_SEL(physicalGranularity, "physicalGranularity"); _MTL_PRIVATE_DEF_SEL(physicalSizeForLayer_, "physicalSizeForLayer:"); _MTL_PRIVATE_DEF_SEL(pixelFormat, "pixelFormat"); _MTL_PRIVATE_DEF_SEL(pointerType, "pointerType"); _MTL_PRIVATE_DEF_SEL(popDebugGroup, "popDebugGroup"); _MTL_PRIVATE_DEF_SEL(preloadedLibraries, "preloadedLibraries"); _MTL_PRIVATE_DEF_SEL(preprocessorMacros, "preprocessorMacros"); _MTL_PRIVATE_DEF_SEL(present, "present"); _MTL_PRIVATE_DEF_SEL(presentAfterMinimumDuration_, "presentAfterMinimumDuration:"); _MTL_PRIVATE_DEF_SEL(presentAtTime_, "presentAtTime:"); _MTL_PRIVATE_DEF_SEL(presentDrawable_, "presentDrawable:"); _MTL_PRIVATE_DEF_SEL(presentDrawable_afterMinimumDuration_, "presentDrawable:afterMinimumDuration:"); _MTL_PRIVATE_DEF_SEL(presentDrawable_atTime_, "presentDrawable:atTime:"); _MTL_PRIVATE_DEF_SEL(presentedTime, "presentedTime"); _MTL_PRIVATE_DEF_SEL(preserveInvariance, "preserveInvariance"); _MTL_PRIVATE_DEF_SEL(primitiveDataBuffer, "primitiveDataBuffer"); _MTL_PRIVATE_DEF_SEL(primitiveDataBufferOffset, "primitiveDataBufferOffset"); _MTL_PRIVATE_DEF_SEL(primitiveDataElementSize, "primitiveDataElementSize"); _MTL_PRIVATE_DEF_SEL(primitiveDataStride, "primitiveDataStride"); _MTL_PRIVATE_DEF_SEL(priority, "priority"); _MTL_PRIVATE_DEF_SEL(privateFunctions, "privateFunctions"); _MTL_PRIVATE_DEF_SEL(pushDebugGroup_, "pushDebugGroup:"); _MTL_PRIVATE_DEF_SEL(rAddressMode, "rAddressMode"); _MTL_PRIVATE_DEF_SEL(radiusBuffer, "radiusBuffer"); _MTL_PRIVATE_DEF_SEL(radiusBufferOffset, "radiusBufferOffset"); _MTL_PRIVATE_DEF_SEL(radiusBuffers, "radiusBuffers"); _MTL_PRIVATE_DEF_SEL(radiusFormat, "radiusFormat"); _MTL_PRIVATE_DEF_SEL(radiusStride, "radiusStride"); _MTL_PRIVATE_DEF_SEL(rasterSampleCount, "rasterSampleCount"); _MTL_PRIVATE_DEF_SEL(rasterizationRateMap, "rasterizationRateMap"); _MTL_PRIVATE_DEF_SEL(rasterizationRateMapDescriptorWithScreenSize_, "rasterizationRateMapDescriptorWithScreenSize:"); _MTL_PRIVATE_DEF_SEL(rasterizationRateMapDescriptorWithScreenSize_layer_, "rasterizationRateMapDescriptorWithScreenSize:layer:"); _MTL_PRIVATE_DEF_SEL(rasterizationRateMapDescriptorWithScreenSize_layerCount_layers_, "rasterizationRateMapDescriptorWithScreenSize:layerCount:layers:"); _MTL_PRIVATE_DEF_SEL(readMask, "readMask"); _MTL_PRIVATE_DEF_SEL(readWriteTextureSupport, "readWriteTextureSupport"); _MTL_PRIVATE_DEF_SEL(recommendedMaxWorkingSetSize, "recommendedMaxWorkingSetSize"); _MTL_PRIVATE_DEF_SEL(refitAccelerationStructure_descriptor_destination_scratchBuffer_scratchBufferOffset_, "refitAccelerationStructure:descriptor:destination:scratchBuffer:scratchBufferOffset:"); _MTL_PRIVATE_DEF_SEL(refitAccelerationStructure_descriptor_destination_scratchBuffer_scratchBufferOffset_options_, "refitAccelerationStructure:descriptor:destination:scratchBuffer:scratchBufferOffset:options:"); _MTL_PRIVATE_DEF_SEL(registryID, "registryID"); _MTL_PRIVATE_DEF_SEL(remoteStorageBuffer, "remoteStorageBuffer"); _MTL_PRIVATE_DEF_SEL(remoteStorageTexture, "remoteStorageTexture"); _MTL_PRIVATE_DEF_SEL(removeAllAllocations, "removeAllAllocations"); _MTL_PRIVATE_DEF_SEL(removeAllDebugMarkers, "removeAllDebugMarkers"); _MTL_PRIVATE_DEF_SEL(removeAllocation_, "removeAllocation:"); _MTL_PRIVATE_DEF_SEL(removeAllocations_count_, "removeAllocations:count:"); _MTL_PRIVATE_DEF_SEL(removeResidencySet_, "removeResidencySet:"); _MTL_PRIVATE_DEF_SEL(removeResidencySets_count_, "removeResidencySets:count:"); _MTL_PRIVATE_DEF_SEL(renderCommandEncoder, "renderCommandEncoder"); _MTL_PRIVATE_DEF_SEL(renderCommandEncoderWithDescriptor_, "renderCommandEncoderWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(renderPassDescriptor, "renderPassDescriptor"); _MTL_PRIVATE_DEF_SEL(renderTargetArrayLength, "renderTargetArrayLength"); _MTL_PRIVATE_DEF_SEL(renderTargetHeight, "renderTargetHeight"); _MTL_PRIVATE_DEF_SEL(renderTargetWidth, "renderTargetWidth"); _MTL_PRIVATE_DEF_SEL(replaceRegion_mipmapLevel_slice_withBytes_bytesPerRow_bytesPerImage_, "replaceRegion:mipmapLevel:slice:withBytes:bytesPerRow:bytesPerImage:"); _MTL_PRIVATE_DEF_SEL(replaceRegion_mipmapLevel_withBytes_bytesPerRow_, "replaceRegion:mipmapLevel:withBytes:bytesPerRow:"); _MTL_PRIVATE_DEF_SEL(requestResidency, "requestResidency"); _MTL_PRIVATE_DEF_SEL(required, "required"); _MTL_PRIVATE_DEF_SEL(reset, "reset"); _MTL_PRIVATE_DEF_SEL(resetCommandsInBuffer_withRange_, "resetCommandsInBuffer:withRange:"); _MTL_PRIVATE_DEF_SEL(resetTextureAccessCounters_region_mipLevel_slice_, "resetTextureAccessCounters:region:mipLevel:slice:"); _MTL_PRIVATE_DEF_SEL(resetWithRange_, "resetWithRange:"); _MTL_PRIVATE_DEF_SEL(resolveCounterRange_, "resolveCounterRange:"); _MTL_PRIVATE_DEF_SEL(resolveCounters_inRange_destinationBuffer_destinationOffset_, "resolveCounters:inRange:destinationBuffer:destinationOffset:"); _MTL_PRIVATE_DEF_SEL(resolveDepthPlane, "resolveDepthPlane"); _MTL_PRIVATE_DEF_SEL(resolveLevel, "resolveLevel"); _MTL_PRIVATE_DEF_SEL(resolveSlice, "resolveSlice"); _MTL_PRIVATE_DEF_SEL(resolveTexture, "resolveTexture"); _MTL_PRIVATE_DEF_SEL(resourceOptions, "resourceOptions"); _MTL_PRIVATE_DEF_SEL(resourceStateCommandEncoder, "resourceStateCommandEncoder"); _MTL_PRIVATE_DEF_SEL(resourceStateCommandEncoderWithDescriptor_, "resourceStateCommandEncoderWithDescriptor:"); _MTL_PRIVATE_DEF_SEL(resourceStatePassDescriptor, "resourceStatePassDescriptor"); _MTL_PRIVATE_DEF_SEL(retainedReferences, "retainedReferences"); _MTL_PRIVATE_DEF_SEL(rgbBlendOperation, "rgbBlendOperation"); _MTL_PRIVATE_DEF_SEL(rootResource, "rootResource"); _MTL_PRIVATE_DEF_SEL(sAddressMode, "sAddressMode"); _MTL_PRIVATE_DEF_SEL(sampleBuffer, "sampleBuffer"); _MTL_PRIVATE_DEF_SEL(sampleBufferAttachments, "sampleBufferAttachments"); _MTL_PRIVATE_DEF_SEL(sampleCount, "sampleCount"); _MTL_PRIVATE_DEF_SEL(sampleCountersInBuffer_atSampleIndex_withBarrier_, "sampleCountersInBuffer:atSampleIndex:withBarrier:"); _MTL_PRIVATE_DEF_SEL(sampleTimestamps_gpuTimestamp_, "sampleTimestamps:gpuTimestamp:"); _MTL_PRIVATE_DEF_SEL(scratchBufferAllocator, "scratchBufferAllocator"); _MTL_PRIVATE_DEF_SEL(screenSize, "screenSize"); _MTL_PRIVATE_DEF_SEL(segmentControlPointCount, "segmentControlPointCount"); _MTL_PRIVATE_DEF_SEL(segmentCount, "segmentCount"); _MTL_PRIVATE_DEF_SEL(serializeToURL_error_, "serializeToURL:error:"); _MTL_PRIVATE_DEF_SEL(setAccelerationStructure_atBufferIndex_, "setAccelerationStructure:atBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setAccelerationStructure_atIndex_, "setAccelerationStructure:atIndex:"); _MTL_PRIVATE_DEF_SEL(setAccess_, "setAccess:"); _MTL_PRIVATE_DEF_SEL(setAllowDuplicateIntersectionFunctionInvocation_, "setAllowDuplicateIntersectionFunctionInvocation:"); _MTL_PRIVATE_DEF_SEL(setAllowGPUOptimizedContents_, "setAllowGPUOptimizedContents:"); _MTL_PRIVATE_DEF_SEL(setAllowReferencingUndefinedSymbols_, "setAllowReferencingUndefinedSymbols:"); _MTL_PRIVATE_DEF_SEL(setAlphaBlendOperation_, "setAlphaBlendOperation:"); _MTL_PRIVATE_DEF_SEL(setAlphaToCoverageEnabled_, "setAlphaToCoverageEnabled:"); _MTL_PRIVATE_DEF_SEL(setAlphaToOneEnabled_, "setAlphaToOneEnabled:"); _MTL_PRIVATE_DEF_SEL(setArgumentBuffer_offset_, "setArgumentBuffer:offset:"); _MTL_PRIVATE_DEF_SEL(setArgumentBuffer_startOffset_arrayElement_, "setArgumentBuffer:startOffset:arrayElement:"); _MTL_PRIVATE_DEF_SEL(setArgumentIndex_, "setArgumentIndex:"); _MTL_PRIVATE_DEF_SEL(setArguments_, "setArguments:"); _MTL_PRIVATE_DEF_SEL(setArrayLength_, "setArrayLength:"); _MTL_PRIVATE_DEF_SEL(setAttributes_, "setAttributes:"); _MTL_PRIVATE_DEF_SEL(setBackFaceStencil_, "setBackFaceStencil:"); _MTL_PRIVATE_DEF_SEL(setBarrier, "setBarrier"); _MTL_PRIVATE_DEF_SEL(setBinaryArchives_, "setBinaryArchives:"); _MTL_PRIVATE_DEF_SEL(setBinaryFunctions_, "setBinaryFunctions:"); _MTL_PRIVATE_DEF_SEL(setBlendColorRed_green_blue_alpha_, "setBlendColorRed:green:blue:alpha:"); _MTL_PRIVATE_DEF_SEL(setBlendingEnabled_, "setBlendingEnabled:"); _MTL_PRIVATE_DEF_SEL(setBorderColor_, "setBorderColor:"); _MTL_PRIVATE_DEF_SEL(setBoundingBoxBuffer_, "setBoundingBoxBuffer:"); _MTL_PRIVATE_DEF_SEL(setBoundingBoxBufferOffset_, "setBoundingBoxBufferOffset:"); _MTL_PRIVATE_DEF_SEL(setBoundingBoxBuffers_, "setBoundingBoxBuffers:"); _MTL_PRIVATE_DEF_SEL(setBoundingBoxCount_, "setBoundingBoxCount:"); _MTL_PRIVATE_DEF_SEL(setBoundingBoxStride_, "setBoundingBoxStride:"); _MTL_PRIVATE_DEF_SEL(setBuffer_, "setBuffer:"); _MTL_PRIVATE_DEF_SEL(setBuffer_offset_atIndex_, "setBuffer:offset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setBuffer_offset_attributeStride_atIndex_, "setBuffer:offset:attributeStride:atIndex:"); _MTL_PRIVATE_DEF_SEL(setBufferIndex_, "setBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setBufferOffset_atIndex_, "setBufferOffset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setBufferOffset_attributeStride_atIndex_, "setBufferOffset:attributeStride:atIndex:"); _MTL_PRIVATE_DEF_SEL(setBufferSize_, "setBufferSize:"); _MTL_PRIVATE_DEF_SEL(setBuffers_offsets_attributeStrides_withRange_, "setBuffers:offsets:attributeStrides:withRange:"); _MTL_PRIVATE_DEF_SEL(setBuffers_offsets_withRange_, "setBuffers:offsets:withRange:"); _MTL_PRIVATE_DEF_SEL(setBytes_length_atIndex_, "setBytes:length:atIndex:"); _MTL_PRIVATE_DEF_SEL(setBytes_length_attributeStride_atIndex_, "setBytes:length:attributeStride:atIndex:"); _MTL_PRIVATE_DEF_SEL(setCaptureObject_, "setCaptureObject:"); _MTL_PRIVATE_DEF_SEL(setClearColor_, "setClearColor:"); _MTL_PRIVATE_DEF_SEL(setClearDepth_, "setClearDepth:"); _MTL_PRIVATE_DEF_SEL(setClearStencil_, "setClearStencil:"); _MTL_PRIVATE_DEF_SEL(setColorStoreAction_atIndex_, "setColorStoreAction:atIndex:"); _MTL_PRIVATE_DEF_SEL(setColorStoreActionOptions_atIndex_, "setColorStoreActionOptions:atIndex:"); _MTL_PRIVATE_DEF_SEL(setCommandTypes_, "setCommandTypes:"); _MTL_PRIVATE_DEF_SEL(setCompareFunction_, "setCompareFunction:"); _MTL_PRIVATE_DEF_SEL(setCompileSymbolVisibility_, "setCompileSymbolVisibility:"); _MTL_PRIVATE_DEF_SEL(setCompressionType_, "setCompressionType:"); _MTL_PRIVATE_DEF_SEL(setComputeFunction_, "setComputeFunction:"); _MTL_PRIVATE_DEF_SEL(setComputePipelineState_, "setComputePipelineState:"); _MTL_PRIVATE_DEF_SEL(setComputePipelineState_atIndex_, "setComputePipelineState:atIndex:"); _MTL_PRIVATE_DEF_SEL(setComputePipelineStates_withRange_, "setComputePipelineStates:withRange:"); _MTL_PRIVATE_DEF_SEL(setConstantBlockAlignment_, "setConstantBlockAlignment:"); _MTL_PRIVATE_DEF_SEL(setConstantValue_type_atIndex_, "setConstantValue:type:atIndex:"); _MTL_PRIVATE_DEF_SEL(setConstantValue_type_withName_, "setConstantValue:type:withName:"); _MTL_PRIVATE_DEF_SEL(setConstantValues_, "setConstantValues:"); _MTL_PRIVATE_DEF_SEL(setConstantValues_type_withRange_, "setConstantValues:type:withRange:"); _MTL_PRIVATE_DEF_SEL(setControlDependencies_, "setControlDependencies:"); _MTL_PRIVATE_DEF_SEL(setControlPointBuffer_, "setControlPointBuffer:"); _MTL_PRIVATE_DEF_SEL(setControlPointBufferOffset_, "setControlPointBufferOffset:"); _MTL_PRIVATE_DEF_SEL(setControlPointBuffers_, "setControlPointBuffers:"); _MTL_PRIVATE_DEF_SEL(setControlPointCount_, "setControlPointCount:"); _MTL_PRIVATE_DEF_SEL(setControlPointFormat_, "setControlPointFormat:"); _MTL_PRIVATE_DEF_SEL(setControlPointStride_, "setControlPointStride:"); _MTL_PRIVATE_DEF_SEL(setCounterSet_, "setCounterSet:"); _MTL_PRIVATE_DEF_SEL(setCpuCacheMode_, "setCpuCacheMode:"); _MTL_PRIVATE_DEF_SEL(setCullMode_, "setCullMode:"); _MTL_PRIVATE_DEF_SEL(setCurveBasis_, "setCurveBasis:"); _MTL_PRIVATE_DEF_SEL(setCurveEndCaps_, "setCurveEndCaps:"); _MTL_PRIVATE_DEF_SEL(setCurveType_, "setCurveType:"); _MTL_PRIVATE_DEF_SEL(setDataType_, "setDataType:"); _MTL_PRIVATE_DEF_SEL(setDefaultCaptureScope_, "setDefaultCaptureScope:"); _MTL_PRIVATE_DEF_SEL(setDefaultRasterSampleCount_, "setDefaultRasterSampleCount:"); _MTL_PRIVATE_DEF_SEL(setDepth_, "setDepth:"); _MTL_PRIVATE_DEF_SEL(setDepthAttachment_, "setDepthAttachment:"); _MTL_PRIVATE_DEF_SEL(setDepthAttachmentPixelFormat_, "setDepthAttachmentPixelFormat:"); _MTL_PRIVATE_DEF_SEL(setDepthBias_slopeScale_clamp_, "setDepthBias:slopeScale:clamp:"); _MTL_PRIVATE_DEF_SEL(setDepthClipMode_, "setDepthClipMode:"); _MTL_PRIVATE_DEF_SEL(setDepthCompareFunction_, "setDepthCompareFunction:"); _MTL_PRIVATE_DEF_SEL(setDepthFailureOperation_, "setDepthFailureOperation:"); _MTL_PRIVATE_DEF_SEL(setDepthPlane_, "setDepthPlane:"); _MTL_PRIVATE_DEF_SEL(setDepthResolveFilter_, "setDepthResolveFilter:"); _MTL_PRIVATE_DEF_SEL(setDepthStencilPassOperation_, "setDepthStencilPassOperation:"); _MTL_PRIVATE_DEF_SEL(setDepthStencilState_, "setDepthStencilState:"); _MTL_PRIVATE_DEF_SEL(setDepthStoreAction_, "setDepthStoreAction:"); _MTL_PRIVATE_DEF_SEL(setDepthStoreActionOptions_, "setDepthStoreActionOptions:"); _MTL_PRIVATE_DEF_SEL(setDepthWriteEnabled_, "setDepthWriteEnabled:"); _MTL_PRIVATE_DEF_SEL(setDestination_, "setDestination:"); _MTL_PRIVATE_DEF_SEL(setDestinationAlphaBlendFactor_, "setDestinationAlphaBlendFactor:"); _MTL_PRIVATE_DEF_SEL(setDestinationRGBBlendFactor_, "setDestinationRGBBlendFactor:"); _MTL_PRIVATE_DEF_SEL(setDispatchType_, "setDispatchType:"); _MTL_PRIVATE_DEF_SEL(setEnableLogging_, "setEnableLogging:"); _MTL_PRIVATE_DEF_SEL(setEndOfEncoderSampleIndex_, "setEndOfEncoderSampleIndex:"); _MTL_PRIVATE_DEF_SEL(setEndOfFragmentSampleIndex_, "setEndOfFragmentSampleIndex:"); _MTL_PRIVATE_DEF_SEL(setEndOfVertexSampleIndex_, "setEndOfVertexSampleIndex:"); _MTL_PRIVATE_DEF_SEL(setErrorOptions_, "setErrorOptions:"); _MTL_PRIVATE_DEF_SEL(setFastMathEnabled_, "setFastMathEnabled:"); _MTL_PRIVATE_DEF_SEL(setFormat_, "setFormat:"); _MTL_PRIVATE_DEF_SEL(setFragmentAccelerationStructure_atBufferIndex_, "setFragmentAccelerationStructure:atBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setFragmentAdditionalBinaryFunctions_, "setFragmentAdditionalBinaryFunctions:"); _MTL_PRIVATE_DEF_SEL(setFragmentBuffer_offset_atIndex_, "setFragmentBuffer:offset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setFragmentBufferOffset_atIndex_, "setFragmentBufferOffset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setFragmentBuffers_offsets_withRange_, "setFragmentBuffers:offsets:withRange:"); _MTL_PRIVATE_DEF_SEL(setFragmentBytes_length_atIndex_, "setFragmentBytes:length:atIndex:"); _MTL_PRIVATE_DEF_SEL(setFragmentFunction_, "setFragmentFunction:"); _MTL_PRIVATE_DEF_SEL(setFragmentIntersectionFunctionTable_atBufferIndex_, "setFragmentIntersectionFunctionTable:atBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setFragmentIntersectionFunctionTables_withBufferRange_, "setFragmentIntersectionFunctionTables:withBufferRange:"); _MTL_PRIVATE_DEF_SEL(setFragmentLinkedFunctions_, "setFragmentLinkedFunctions:"); _MTL_PRIVATE_DEF_SEL(setFragmentPreloadedLibraries_, "setFragmentPreloadedLibraries:"); _MTL_PRIVATE_DEF_SEL(setFragmentSamplerState_atIndex_, "setFragmentSamplerState:atIndex:"); _MTL_PRIVATE_DEF_SEL(setFragmentSamplerState_lodMinClamp_lodMaxClamp_atIndex_, "setFragmentSamplerState:lodMinClamp:lodMaxClamp:atIndex:"); _MTL_PRIVATE_DEF_SEL(setFragmentSamplerStates_lodMinClamps_lodMaxClamps_withRange_, "setFragmentSamplerStates:lodMinClamps:lodMaxClamps:withRange:"); _MTL_PRIVATE_DEF_SEL(setFragmentSamplerStates_withRange_, "setFragmentSamplerStates:withRange:"); _MTL_PRIVATE_DEF_SEL(setFragmentTexture_atIndex_, "setFragmentTexture:atIndex:"); _MTL_PRIVATE_DEF_SEL(setFragmentTextures_withRange_, "setFragmentTextures:withRange:"); _MTL_PRIVATE_DEF_SEL(setFragmentVisibleFunctionTable_atBufferIndex_, "setFragmentVisibleFunctionTable:atBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setFragmentVisibleFunctionTables_withBufferRange_, "setFragmentVisibleFunctionTables:withBufferRange:"); _MTL_PRIVATE_DEF_SEL(setFrontFaceStencil_, "setFrontFaceStencil:"); _MTL_PRIVATE_DEF_SEL(setFrontFacingWinding_, "setFrontFacingWinding:"); _MTL_PRIVATE_DEF_SEL(setFunction_atIndex_, "setFunction:atIndex:"); _MTL_PRIVATE_DEF_SEL(setFunctionCount_, "setFunctionCount:"); _MTL_PRIVATE_DEF_SEL(setFunctionGraphs_, "setFunctionGraphs:"); _MTL_PRIVATE_DEF_SEL(setFunctionName_, "setFunctionName:"); _MTL_PRIVATE_DEF_SEL(setFunctions_, "setFunctions:"); _MTL_PRIVATE_DEF_SEL(setFunctions_withRange_, "setFunctions:withRange:"); _MTL_PRIVATE_DEF_SEL(setGeometryDescriptors_, "setGeometryDescriptors:"); _MTL_PRIVATE_DEF_SEL(setGroups_, "setGroups:"); _MTL_PRIVATE_DEF_SEL(setHazardTrackingMode_, "setHazardTrackingMode:"); _MTL_PRIVATE_DEF_SEL(setHeight_, "setHeight:"); _MTL_PRIVATE_DEF_SEL(setImageblockSampleLength_, "setImageblockSampleLength:"); _MTL_PRIVATE_DEF_SEL(setImageblockWidth_height_, "setImageblockWidth:height:"); _MTL_PRIVATE_DEF_SEL(setIndex_, "setIndex:"); _MTL_PRIVATE_DEF_SEL(setIndexBuffer_, "setIndexBuffer:"); _MTL_PRIVATE_DEF_SEL(setIndexBufferIndex_, "setIndexBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setIndexBufferOffset_, "setIndexBufferOffset:"); _MTL_PRIVATE_DEF_SEL(setIndexType_, "setIndexType:"); _MTL_PRIVATE_DEF_SEL(setIndirectCommandBuffer_atIndex_, "setIndirectCommandBuffer:atIndex:"); _MTL_PRIVATE_DEF_SEL(setIndirectCommandBuffers_withRange_, "setIndirectCommandBuffers:withRange:"); _MTL_PRIVATE_DEF_SEL(setInheritBuffers_, "setInheritBuffers:"); _MTL_PRIVATE_DEF_SEL(setInheritPipelineState_, "setInheritPipelineState:"); _MTL_PRIVATE_DEF_SEL(setInitialCapacity_, "setInitialCapacity:"); _MTL_PRIVATE_DEF_SEL(setInputPrimitiveTopology_, "setInputPrimitiveTopology:"); _MTL_PRIVATE_DEF_SEL(setInsertLibraries_, "setInsertLibraries:"); _MTL_PRIVATE_DEF_SEL(setInstallName_, "setInstallName:"); _MTL_PRIVATE_DEF_SEL(setInstanceCount_, "setInstanceCount:"); _MTL_PRIVATE_DEF_SEL(setInstanceCountBuffer_, "setInstanceCountBuffer:"); _MTL_PRIVATE_DEF_SEL(setInstanceCountBufferOffset_, "setInstanceCountBufferOffset:"); _MTL_PRIVATE_DEF_SEL(setInstanceDescriptorBuffer_, "setInstanceDescriptorBuffer:"); _MTL_PRIVATE_DEF_SEL(setInstanceDescriptorBufferOffset_, "setInstanceDescriptorBufferOffset:"); _MTL_PRIVATE_DEF_SEL(setInstanceDescriptorStride_, "setInstanceDescriptorStride:"); _MTL_PRIVATE_DEF_SEL(setInstanceDescriptorType_, "setInstanceDescriptorType:"); _MTL_PRIVATE_DEF_SEL(setInstanceTransformationMatrixLayout_, "setInstanceTransformationMatrixLayout:"); _MTL_PRIVATE_DEF_SEL(setInstancedAccelerationStructures_, "setInstancedAccelerationStructures:"); _MTL_PRIVATE_DEF_SEL(setIntersectionFunctionTable_atBufferIndex_, "setIntersectionFunctionTable:atBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setIntersectionFunctionTable_atIndex_, "setIntersectionFunctionTable:atIndex:"); _MTL_PRIVATE_DEF_SEL(setIntersectionFunctionTableOffset_, "setIntersectionFunctionTableOffset:"); _MTL_PRIVATE_DEF_SEL(setIntersectionFunctionTables_withBufferRange_, "setIntersectionFunctionTables:withBufferRange:"); _MTL_PRIVATE_DEF_SEL(setIntersectionFunctionTables_withRange_, "setIntersectionFunctionTables:withRange:"); _MTL_PRIVATE_DEF_SEL(setKernelBuffer_offset_atIndex_, "setKernelBuffer:offset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setKernelBuffer_offset_attributeStride_atIndex_, "setKernelBuffer:offset:attributeStride:atIndex:"); _MTL_PRIVATE_DEF_SEL(setLabel_, "setLabel:"); _MTL_PRIVATE_DEF_SEL(setLanguageVersion_, "setLanguageVersion:"); _MTL_PRIVATE_DEF_SEL(setLayer_atIndex_, "setLayer:atIndex:"); _MTL_PRIVATE_DEF_SEL(setLevel_, "setLevel:"); _MTL_PRIVATE_DEF_SEL(setLibraries_, "setLibraries:"); _MTL_PRIVATE_DEF_SEL(setLibraryType_, "setLibraryType:"); _MTL_PRIVATE_DEF_SEL(setLinkedFunctions_, "setLinkedFunctions:"); _MTL_PRIVATE_DEF_SEL(setLoadAction_, "setLoadAction:"); _MTL_PRIVATE_DEF_SEL(setLodAverage_, "setLodAverage:"); _MTL_PRIVATE_DEF_SEL(setLodMaxClamp_, "setLodMaxClamp:"); _MTL_PRIVATE_DEF_SEL(setLodMinClamp_, "setLodMinClamp:"); _MTL_PRIVATE_DEF_SEL(setLogState_, "setLogState:"); _MTL_PRIVATE_DEF_SEL(setMagFilter_, "setMagFilter:"); _MTL_PRIVATE_DEF_SEL(setMathFloatingPointFunctions_, "setMathFloatingPointFunctions:"); _MTL_PRIVATE_DEF_SEL(setMathMode_, "setMathMode:"); _MTL_PRIVATE_DEF_SEL(setMaxAnisotropy_, "setMaxAnisotropy:"); _MTL_PRIVATE_DEF_SEL(setMaxCallStackDepth_, "setMaxCallStackDepth:"); _MTL_PRIVATE_DEF_SEL(setMaxCommandBufferCount_, "setMaxCommandBufferCount:"); _MTL_PRIVATE_DEF_SEL(setMaxCommandsInFlight_, "setMaxCommandsInFlight:"); _MTL_PRIVATE_DEF_SEL(setMaxFragmentBufferBindCount_, "setMaxFragmentBufferBindCount:"); _MTL_PRIVATE_DEF_SEL(setMaxFragmentCallStackDepth_, "setMaxFragmentCallStackDepth:"); _MTL_PRIVATE_DEF_SEL(setMaxInstanceCount_, "setMaxInstanceCount:"); _MTL_PRIVATE_DEF_SEL(setMaxKernelBufferBindCount_, "setMaxKernelBufferBindCount:"); _MTL_PRIVATE_DEF_SEL(setMaxKernelThreadgroupMemoryBindCount_, "setMaxKernelThreadgroupMemoryBindCount:"); _MTL_PRIVATE_DEF_SEL(setMaxMeshBufferBindCount_, "setMaxMeshBufferBindCount:"); _MTL_PRIVATE_DEF_SEL(setMaxMotionTransformCount_, "setMaxMotionTransformCount:"); _MTL_PRIVATE_DEF_SEL(setMaxObjectBufferBindCount_, "setMaxObjectBufferBindCount:"); _MTL_PRIVATE_DEF_SEL(setMaxObjectThreadgroupMemoryBindCount_, "setMaxObjectThreadgroupMemoryBindCount:"); _MTL_PRIVATE_DEF_SEL(setMaxTessellationFactor_, "setMaxTessellationFactor:"); _MTL_PRIVATE_DEF_SEL(setMaxTotalThreadgroupsPerMeshGrid_, "setMaxTotalThreadgroupsPerMeshGrid:"); _MTL_PRIVATE_DEF_SEL(setMaxTotalThreadsPerMeshThreadgroup_, "setMaxTotalThreadsPerMeshThreadgroup:"); _MTL_PRIVATE_DEF_SEL(setMaxTotalThreadsPerObjectThreadgroup_, "setMaxTotalThreadsPerObjectThreadgroup:"); _MTL_PRIVATE_DEF_SEL(setMaxTotalThreadsPerThreadgroup_, "setMaxTotalThreadsPerThreadgroup:"); _MTL_PRIVATE_DEF_SEL(setMaxVertexAmplificationCount_, "setMaxVertexAmplificationCount:"); _MTL_PRIVATE_DEF_SEL(setMaxVertexBufferBindCount_, "setMaxVertexBufferBindCount:"); _MTL_PRIVATE_DEF_SEL(setMaxVertexCallStackDepth_, "setMaxVertexCallStackDepth:"); _MTL_PRIVATE_DEF_SEL(setMeshBuffer_offset_atIndex_, "setMeshBuffer:offset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setMeshBufferOffset_atIndex_, "setMeshBufferOffset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setMeshBuffers_offsets_withRange_, "setMeshBuffers:offsets:withRange:"); _MTL_PRIVATE_DEF_SEL(setMeshBytes_length_atIndex_, "setMeshBytes:length:atIndex:"); _MTL_PRIVATE_DEF_SEL(setMeshFunction_, "setMeshFunction:"); _MTL_PRIVATE_DEF_SEL(setMeshLinkedFunctions_, "setMeshLinkedFunctions:"); _MTL_PRIVATE_DEF_SEL(setMeshSamplerState_atIndex_, "setMeshSamplerState:atIndex:"); _MTL_PRIVATE_DEF_SEL(setMeshSamplerState_lodMinClamp_lodMaxClamp_atIndex_, "setMeshSamplerState:lodMinClamp:lodMaxClamp:atIndex:"); _MTL_PRIVATE_DEF_SEL(setMeshSamplerStates_lodMinClamps_lodMaxClamps_withRange_, "setMeshSamplerStates:lodMinClamps:lodMaxClamps:withRange:"); _MTL_PRIVATE_DEF_SEL(setMeshSamplerStates_withRange_, "setMeshSamplerStates:withRange:"); _MTL_PRIVATE_DEF_SEL(setMeshTexture_atIndex_, "setMeshTexture:atIndex:"); _MTL_PRIVATE_DEF_SEL(setMeshTextures_withRange_, "setMeshTextures:withRange:"); _MTL_PRIVATE_DEF_SEL(setMeshThreadgroupSizeIsMultipleOfThreadExecutionWidth_, "setMeshThreadgroupSizeIsMultipleOfThreadExecutionWidth:"); _MTL_PRIVATE_DEF_SEL(setMinFilter_, "setMinFilter:"); _MTL_PRIVATE_DEF_SEL(setMipFilter_, "setMipFilter:"); _MTL_PRIVATE_DEF_SEL(setMipmapLevelCount_, "setMipmapLevelCount:"); _MTL_PRIVATE_DEF_SEL(setMotionEndBorderMode_, "setMotionEndBorderMode:"); _MTL_PRIVATE_DEF_SEL(setMotionEndTime_, "setMotionEndTime:"); _MTL_PRIVATE_DEF_SEL(setMotionKeyframeCount_, "setMotionKeyframeCount:"); _MTL_PRIVATE_DEF_SEL(setMotionStartBorderMode_, "setMotionStartBorderMode:"); _MTL_PRIVATE_DEF_SEL(setMotionStartTime_, "setMotionStartTime:"); _MTL_PRIVATE_DEF_SEL(setMotionTransformBuffer_, "setMotionTransformBuffer:"); _MTL_PRIVATE_DEF_SEL(setMotionTransformBufferOffset_, "setMotionTransformBufferOffset:"); _MTL_PRIVATE_DEF_SEL(setMotionTransformCount_, "setMotionTransformCount:"); _MTL_PRIVATE_DEF_SEL(setMotionTransformCountBuffer_, "setMotionTransformCountBuffer:"); _MTL_PRIVATE_DEF_SEL(setMotionTransformCountBufferOffset_, "setMotionTransformCountBufferOffset:"); _MTL_PRIVATE_DEF_SEL(setMotionTransformStride_, "setMotionTransformStride:"); _MTL_PRIVATE_DEF_SEL(setMotionTransformType_, "setMotionTransformType:"); _MTL_PRIVATE_DEF_SEL(setMutability_, "setMutability:"); _MTL_PRIVATE_DEF_SEL(setName_, "setName:"); _MTL_PRIVATE_DEF_SEL(setNodes_, "setNodes:"); _MTL_PRIVATE_DEF_SEL(setNormalizedCoordinates_, "setNormalizedCoordinates:"); _MTL_PRIVATE_DEF_SEL(setObject_atIndexedSubscript_, "setObject:atIndexedSubscript:"); _MTL_PRIVATE_DEF_SEL(setObjectBuffer_offset_atIndex_, "setObjectBuffer:offset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setObjectBufferOffset_atIndex_, "setObjectBufferOffset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setObjectBuffers_offsets_withRange_, "setObjectBuffers:offsets:withRange:"); _MTL_PRIVATE_DEF_SEL(setObjectBytes_length_atIndex_, "setObjectBytes:length:atIndex:"); _MTL_PRIVATE_DEF_SEL(setObjectFunction_, "setObjectFunction:"); _MTL_PRIVATE_DEF_SEL(setObjectLinkedFunctions_, "setObjectLinkedFunctions:"); _MTL_PRIVATE_DEF_SEL(setObjectSamplerState_atIndex_, "setObjectSamplerState:atIndex:"); _MTL_PRIVATE_DEF_SEL(setObjectSamplerState_lodMinClamp_lodMaxClamp_atIndex_, "setObjectSamplerState:lodMinClamp:lodMaxClamp:atIndex:"); _MTL_PRIVATE_DEF_SEL(setObjectSamplerStates_lodMinClamps_lodMaxClamps_withRange_, "setObjectSamplerStates:lodMinClamps:lodMaxClamps:withRange:"); _MTL_PRIVATE_DEF_SEL(setObjectSamplerStates_withRange_, "setObjectSamplerStates:withRange:"); _MTL_PRIVATE_DEF_SEL(setObjectTexture_atIndex_, "setObjectTexture:atIndex:"); _MTL_PRIVATE_DEF_SEL(setObjectTextures_withRange_, "setObjectTextures:withRange:"); _MTL_PRIVATE_DEF_SEL(setObjectThreadgroupMemoryLength_atIndex_, "setObjectThreadgroupMemoryLength:atIndex:"); _MTL_PRIVATE_DEF_SEL(setObjectThreadgroupSizeIsMultipleOfThreadExecutionWidth_, "setObjectThreadgroupSizeIsMultipleOfThreadExecutionWidth:"); _MTL_PRIVATE_DEF_SEL(setOffset_, "setOffset:"); _MTL_PRIVATE_DEF_SEL(setOpaque_, "setOpaque:"); _MTL_PRIVATE_DEF_SEL(setOpaqueCurveIntersectionFunctionWithSignature_atIndex_, "setOpaqueCurveIntersectionFunctionWithSignature:atIndex:"); _MTL_PRIVATE_DEF_SEL(setOpaqueCurveIntersectionFunctionWithSignature_withRange_, "setOpaqueCurveIntersectionFunctionWithSignature:withRange:"); _MTL_PRIVATE_DEF_SEL(setOpaqueTriangleIntersectionFunctionWithSignature_atIndex_, "setOpaqueTriangleIntersectionFunctionWithSignature:atIndex:"); _MTL_PRIVATE_DEF_SEL(setOpaqueTriangleIntersectionFunctionWithSignature_withRange_, "setOpaqueTriangleIntersectionFunctionWithSignature:withRange:"); _MTL_PRIVATE_DEF_SEL(setOptimizationLevel_, "setOptimizationLevel:"); _MTL_PRIVATE_DEF_SEL(setOptions_, "setOptions:"); _MTL_PRIVATE_DEF_SEL(setOutputNode_, "setOutputNode:"); _MTL_PRIVATE_DEF_SEL(setOutputURL_, "setOutputURL:"); _MTL_PRIVATE_DEF_SEL(setOwnerWithIdentity_, "setOwnerWithIdentity:"); _MTL_PRIVATE_DEF_SEL(setPayloadMemoryLength_, "setPayloadMemoryLength:"); _MTL_PRIVATE_DEF_SEL(setPixelFormat_, "setPixelFormat:"); _MTL_PRIVATE_DEF_SEL(setPreloadedLibraries_, "setPreloadedLibraries:"); _MTL_PRIVATE_DEF_SEL(setPreprocessorMacros_, "setPreprocessorMacros:"); _MTL_PRIVATE_DEF_SEL(setPreserveInvariance_, "setPreserveInvariance:"); _MTL_PRIVATE_DEF_SEL(setPrimitiveDataBuffer_, "setPrimitiveDataBuffer:"); _MTL_PRIVATE_DEF_SEL(setPrimitiveDataBufferOffset_, "setPrimitiveDataBufferOffset:"); _MTL_PRIVATE_DEF_SEL(setPrimitiveDataElementSize_, "setPrimitiveDataElementSize:"); _MTL_PRIVATE_DEF_SEL(setPrimitiveDataStride_, "setPrimitiveDataStride:"); _MTL_PRIVATE_DEF_SEL(setPriority_, "setPriority:"); _MTL_PRIVATE_DEF_SEL(setPrivateFunctions_, "setPrivateFunctions:"); _MTL_PRIVATE_DEF_SEL(setPurgeableState_, "setPurgeableState:"); _MTL_PRIVATE_DEF_SEL(setRAddressMode_, "setRAddressMode:"); _MTL_PRIVATE_DEF_SEL(setRadiusBuffer_, "setRadiusBuffer:"); _MTL_PRIVATE_DEF_SEL(setRadiusBufferOffset_, "setRadiusBufferOffset:"); _MTL_PRIVATE_DEF_SEL(setRadiusBuffers_, "setRadiusBuffers:"); _MTL_PRIVATE_DEF_SEL(setRadiusFormat_, "setRadiusFormat:"); _MTL_PRIVATE_DEF_SEL(setRadiusStride_, "setRadiusStride:"); _MTL_PRIVATE_DEF_SEL(setRasterSampleCount_, "setRasterSampleCount:"); _MTL_PRIVATE_DEF_SEL(setRasterizationEnabled_, "setRasterizationEnabled:"); _MTL_PRIVATE_DEF_SEL(setRasterizationRateMap_, "setRasterizationRateMap:"); _MTL_PRIVATE_DEF_SEL(setReadMask_, "setReadMask:"); _MTL_PRIVATE_DEF_SEL(setRenderPipelineState_, "setRenderPipelineState:"); _MTL_PRIVATE_DEF_SEL(setRenderPipelineState_atIndex_, "setRenderPipelineState:atIndex:"); _MTL_PRIVATE_DEF_SEL(setRenderPipelineStates_withRange_, "setRenderPipelineStates:withRange:"); _MTL_PRIVATE_DEF_SEL(setRenderTargetArrayLength_, "setRenderTargetArrayLength:"); _MTL_PRIVATE_DEF_SEL(setRenderTargetHeight_, "setRenderTargetHeight:"); _MTL_PRIVATE_DEF_SEL(setRenderTargetWidth_, "setRenderTargetWidth:"); _MTL_PRIVATE_DEF_SEL(setResolveDepthPlane_, "setResolveDepthPlane:"); _MTL_PRIVATE_DEF_SEL(setResolveLevel_, "setResolveLevel:"); _MTL_PRIVATE_DEF_SEL(setResolveSlice_, "setResolveSlice:"); _MTL_PRIVATE_DEF_SEL(setResolveTexture_, "setResolveTexture:"); _MTL_PRIVATE_DEF_SEL(setResourceOptions_, "setResourceOptions:"); _MTL_PRIVATE_DEF_SEL(setRetainedReferences_, "setRetainedReferences:"); _MTL_PRIVATE_DEF_SEL(setRgbBlendOperation_, "setRgbBlendOperation:"); _MTL_PRIVATE_DEF_SEL(setSAddressMode_, "setSAddressMode:"); _MTL_PRIVATE_DEF_SEL(setSampleBuffer_, "setSampleBuffer:"); _MTL_PRIVATE_DEF_SEL(setSampleCount_, "setSampleCount:"); _MTL_PRIVATE_DEF_SEL(setSamplePositions_count_, "setSamplePositions:count:"); _MTL_PRIVATE_DEF_SEL(setSamplerState_atIndex_, "setSamplerState:atIndex:"); _MTL_PRIVATE_DEF_SEL(setSamplerState_lodMinClamp_lodMaxClamp_atIndex_, "setSamplerState:lodMinClamp:lodMaxClamp:atIndex:"); _MTL_PRIVATE_DEF_SEL(setSamplerStates_lodMinClamps_lodMaxClamps_withRange_, "setSamplerStates:lodMinClamps:lodMaxClamps:withRange:"); _MTL_PRIVATE_DEF_SEL(setSamplerStates_withRange_, "setSamplerStates:withRange:"); _MTL_PRIVATE_DEF_SEL(setScissorRect_, "setScissorRect:"); _MTL_PRIVATE_DEF_SEL(setScissorRects_count_, "setScissorRects:count:"); _MTL_PRIVATE_DEF_SEL(setScratchBufferAllocator_, "setScratchBufferAllocator:"); _MTL_PRIVATE_DEF_SEL(setScreenSize_, "setScreenSize:"); _MTL_PRIVATE_DEF_SEL(setSegmentControlPointCount_, "setSegmentControlPointCount:"); _MTL_PRIVATE_DEF_SEL(setSegmentCount_, "setSegmentCount:"); _MTL_PRIVATE_DEF_SEL(setShaderValidation_, "setShaderValidation:"); _MTL_PRIVATE_DEF_SEL(setShouldMaximizeConcurrentCompilation_, "setShouldMaximizeConcurrentCompilation:"); _MTL_PRIVATE_DEF_SEL(setSignaledValue_, "setSignaledValue:"); _MTL_PRIVATE_DEF_SEL(setSize_, "setSize:"); _MTL_PRIVATE_DEF_SEL(setSlice_, "setSlice:"); _MTL_PRIVATE_DEF_SEL(setSourceAlphaBlendFactor_, "setSourceAlphaBlendFactor:"); _MTL_PRIVATE_DEF_SEL(setSourceRGBBlendFactor_, "setSourceRGBBlendFactor:"); _MTL_PRIVATE_DEF_SEL(setSparsePageSize_, "setSparsePageSize:"); _MTL_PRIVATE_DEF_SEL(setSpecializedName_, "setSpecializedName:"); _MTL_PRIVATE_DEF_SEL(setStageInRegion_, "setStageInRegion:"); _MTL_PRIVATE_DEF_SEL(setStageInRegionWithIndirectBuffer_indirectBufferOffset_, "setStageInRegionWithIndirectBuffer:indirectBufferOffset:"); _MTL_PRIVATE_DEF_SEL(setStageInputDescriptor_, "setStageInputDescriptor:"); _MTL_PRIVATE_DEF_SEL(setStartOfEncoderSampleIndex_, "setStartOfEncoderSampleIndex:"); _MTL_PRIVATE_DEF_SEL(setStartOfFragmentSampleIndex_, "setStartOfFragmentSampleIndex:"); _MTL_PRIVATE_DEF_SEL(setStartOfVertexSampleIndex_, "setStartOfVertexSampleIndex:"); _MTL_PRIVATE_DEF_SEL(setStencilAttachment_, "setStencilAttachment:"); _MTL_PRIVATE_DEF_SEL(setStencilAttachmentPixelFormat_, "setStencilAttachmentPixelFormat:"); _MTL_PRIVATE_DEF_SEL(setStencilCompareFunction_, "setStencilCompareFunction:"); _MTL_PRIVATE_DEF_SEL(setStencilFailureOperation_, "setStencilFailureOperation:"); _MTL_PRIVATE_DEF_SEL(setStencilFrontReferenceValue_backReferenceValue_, "setStencilFrontReferenceValue:backReferenceValue:"); _MTL_PRIVATE_DEF_SEL(setStencilReferenceValue_, "setStencilReferenceValue:"); _MTL_PRIVATE_DEF_SEL(setStencilResolveFilter_, "setStencilResolveFilter:"); _MTL_PRIVATE_DEF_SEL(setStencilStoreAction_, "setStencilStoreAction:"); _MTL_PRIVATE_DEF_SEL(setStencilStoreActionOptions_, "setStencilStoreActionOptions:"); _MTL_PRIVATE_DEF_SEL(setStepFunction_, "setStepFunction:"); _MTL_PRIVATE_DEF_SEL(setStepRate_, "setStepRate:"); _MTL_PRIVATE_DEF_SEL(setStorageMode_, "setStorageMode:"); _MTL_PRIVATE_DEF_SEL(setStoreAction_, "setStoreAction:"); _MTL_PRIVATE_DEF_SEL(setStoreActionOptions_, "setStoreActionOptions:"); _MTL_PRIVATE_DEF_SEL(setStride_, "setStride:"); _MTL_PRIVATE_DEF_SEL(setSupportAddingBinaryFunctions_, "setSupportAddingBinaryFunctions:"); _MTL_PRIVATE_DEF_SEL(setSupportAddingFragmentBinaryFunctions_, "setSupportAddingFragmentBinaryFunctions:"); _MTL_PRIVATE_DEF_SEL(setSupportAddingVertexBinaryFunctions_, "setSupportAddingVertexBinaryFunctions:"); _MTL_PRIVATE_DEF_SEL(setSupportArgumentBuffers_, "setSupportArgumentBuffers:"); _MTL_PRIVATE_DEF_SEL(setSupportDynamicAttributeStride_, "setSupportDynamicAttributeStride:"); _MTL_PRIVATE_DEF_SEL(setSupportIndirectCommandBuffers_, "setSupportIndirectCommandBuffers:"); _MTL_PRIVATE_DEF_SEL(setSupportRayTracing_, "setSupportRayTracing:"); _MTL_PRIVATE_DEF_SEL(setSwizzle_, "setSwizzle:"); _MTL_PRIVATE_DEF_SEL(setTAddressMode_, "setTAddressMode:"); _MTL_PRIVATE_DEF_SEL(setTessellationControlPointIndexType_, "setTessellationControlPointIndexType:"); _MTL_PRIVATE_DEF_SEL(setTessellationFactorBuffer_offset_instanceStride_, "setTessellationFactorBuffer:offset:instanceStride:"); _MTL_PRIVATE_DEF_SEL(setTessellationFactorFormat_, "setTessellationFactorFormat:"); _MTL_PRIVATE_DEF_SEL(setTessellationFactorScale_, "setTessellationFactorScale:"); _MTL_PRIVATE_DEF_SEL(setTessellationFactorScaleEnabled_, "setTessellationFactorScaleEnabled:"); _MTL_PRIVATE_DEF_SEL(setTessellationFactorStepFunction_, "setTessellationFactorStepFunction:"); _MTL_PRIVATE_DEF_SEL(setTessellationOutputWindingOrder_, "setTessellationOutputWindingOrder:"); _MTL_PRIVATE_DEF_SEL(setTessellationPartitionMode_, "setTessellationPartitionMode:"); _MTL_PRIVATE_DEF_SEL(setTexture_, "setTexture:"); _MTL_PRIVATE_DEF_SEL(setTexture_atIndex_, "setTexture:atIndex:"); _MTL_PRIVATE_DEF_SEL(setTextureType_, "setTextureType:"); _MTL_PRIVATE_DEF_SEL(setTextures_withRange_, "setTextures:withRange:"); _MTL_PRIVATE_DEF_SEL(setThreadGroupSizeIsMultipleOfThreadExecutionWidth_, "setThreadGroupSizeIsMultipleOfThreadExecutionWidth:"); _MTL_PRIVATE_DEF_SEL(setThreadgroupMemoryLength_, "setThreadgroupMemoryLength:"); _MTL_PRIVATE_DEF_SEL(setThreadgroupMemoryLength_atIndex_, "setThreadgroupMemoryLength:atIndex:"); _MTL_PRIVATE_DEF_SEL(setThreadgroupMemoryLength_offset_atIndex_, "setThreadgroupMemoryLength:offset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setThreadgroupSizeMatchesTileSize_, "setThreadgroupSizeMatchesTileSize:"); _MTL_PRIVATE_DEF_SEL(setTileAccelerationStructure_atBufferIndex_, "setTileAccelerationStructure:atBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setTileAdditionalBinaryFunctions_, "setTileAdditionalBinaryFunctions:"); _MTL_PRIVATE_DEF_SEL(setTileBuffer_offset_atIndex_, "setTileBuffer:offset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setTileBufferOffset_atIndex_, "setTileBufferOffset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setTileBuffers_offsets_withRange_, "setTileBuffers:offsets:withRange:"); _MTL_PRIVATE_DEF_SEL(setTileBytes_length_atIndex_, "setTileBytes:length:atIndex:"); _MTL_PRIVATE_DEF_SEL(setTileFunction_, "setTileFunction:"); _MTL_PRIVATE_DEF_SEL(setTileHeight_, "setTileHeight:"); _MTL_PRIVATE_DEF_SEL(setTileIntersectionFunctionTable_atBufferIndex_, "setTileIntersectionFunctionTable:atBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setTileIntersectionFunctionTables_withBufferRange_, "setTileIntersectionFunctionTables:withBufferRange:"); _MTL_PRIVATE_DEF_SEL(setTileSamplerState_atIndex_, "setTileSamplerState:atIndex:"); _MTL_PRIVATE_DEF_SEL(setTileSamplerState_lodMinClamp_lodMaxClamp_atIndex_, "setTileSamplerState:lodMinClamp:lodMaxClamp:atIndex:"); _MTL_PRIVATE_DEF_SEL(setTileSamplerStates_lodMinClamps_lodMaxClamps_withRange_, "setTileSamplerStates:lodMinClamps:lodMaxClamps:withRange:"); _MTL_PRIVATE_DEF_SEL(setTileSamplerStates_withRange_, "setTileSamplerStates:withRange:"); _MTL_PRIVATE_DEF_SEL(setTileTexture_atIndex_, "setTileTexture:atIndex:"); _MTL_PRIVATE_DEF_SEL(setTileTextures_withRange_, "setTileTextures:withRange:"); _MTL_PRIVATE_DEF_SEL(setTileVisibleFunctionTable_atBufferIndex_, "setTileVisibleFunctionTable:atBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setTileVisibleFunctionTables_withBufferRange_, "setTileVisibleFunctionTables:withBufferRange:"); _MTL_PRIVATE_DEF_SEL(setTileWidth_, "setTileWidth:"); _MTL_PRIVATE_DEF_SEL(setTransformationMatrixBuffer_, "setTransformationMatrixBuffer:"); _MTL_PRIVATE_DEF_SEL(setTransformationMatrixBufferOffset_, "setTransformationMatrixBufferOffset:"); _MTL_PRIVATE_DEF_SEL(setTransformationMatrixLayout_, "setTransformationMatrixLayout:"); _MTL_PRIVATE_DEF_SEL(setTriangleCount_, "setTriangleCount:"); _MTL_PRIVATE_DEF_SEL(setTriangleFillMode_, "setTriangleFillMode:"); _MTL_PRIVATE_DEF_SEL(setType_, "setType:"); _MTL_PRIVATE_DEF_SEL(setUrl_, "setUrl:"); _MTL_PRIVATE_DEF_SEL(setUsage_, "setUsage:"); _MTL_PRIVATE_DEF_SEL(setVertexAccelerationStructure_atBufferIndex_, "setVertexAccelerationStructure:atBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setVertexAdditionalBinaryFunctions_, "setVertexAdditionalBinaryFunctions:"); _MTL_PRIVATE_DEF_SEL(setVertexAmplificationCount_viewMappings_, "setVertexAmplificationCount:viewMappings:"); _MTL_PRIVATE_DEF_SEL(setVertexBuffer_, "setVertexBuffer:"); _MTL_PRIVATE_DEF_SEL(setVertexBuffer_offset_atIndex_, "setVertexBuffer:offset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setVertexBuffer_offset_attributeStride_atIndex_, "setVertexBuffer:offset:attributeStride:atIndex:"); _MTL_PRIVATE_DEF_SEL(setVertexBufferOffset_, "setVertexBufferOffset:"); _MTL_PRIVATE_DEF_SEL(setVertexBufferOffset_atIndex_, "setVertexBufferOffset:atIndex:"); _MTL_PRIVATE_DEF_SEL(setVertexBufferOffset_attributeStride_atIndex_, "setVertexBufferOffset:attributeStride:atIndex:"); _MTL_PRIVATE_DEF_SEL(setVertexBuffers_, "setVertexBuffers:"); _MTL_PRIVATE_DEF_SEL(setVertexBuffers_offsets_attributeStrides_withRange_, "setVertexBuffers:offsets:attributeStrides:withRange:"); _MTL_PRIVATE_DEF_SEL(setVertexBuffers_offsets_withRange_, "setVertexBuffers:offsets:withRange:"); _MTL_PRIVATE_DEF_SEL(setVertexBytes_length_atIndex_, "setVertexBytes:length:atIndex:"); _MTL_PRIVATE_DEF_SEL(setVertexBytes_length_attributeStride_atIndex_, "setVertexBytes:length:attributeStride:atIndex:"); _MTL_PRIVATE_DEF_SEL(setVertexDescriptor_, "setVertexDescriptor:"); _MTL_PRIVATE_DEF_SEL(setVertexFormat_, "setVertexFormat:"); _MTL_PRIVATE_DEF_SEL(setVertexFunction_, "setVertexFunction:"); _MTL_PRIVATE_DEF_SEL(setVertexIntersectionFunctionTable_atBufferIndex_, "setVertexIntersectionFunctionTable:atBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setVertexIntersectionFunctionTables_withBufferRange_, "setVertexIntersectionFunctionTables:withBufferRange:"); _MTL_PRIVATE_DEF_SEL(setVertexLinkedFunctions_, "setVertexLinkedFunctions:"); _MTL_PRIVATE_DEF_SEL(setVertexPreloadedLibraries_, "setVertexPreloadedLibraries:"); _MTL_PRIVATE_DEF_SEL(setVertexSamplerState_atIndex_, "setVertexSamplerState:atIndex:"); _MTL_PRIVATE_DEF_SEL(setVertexSamplerState_lodMinClamp_lodMaxClamp_atIndex_, "setVertexSamplerState:lodMinClamp:lodMaxClamp:atIndex:"); _MTL_PRIVATE_DEF_SEL(setVertexSamplerStates_lodMinClamps_lodMaxClamps_withRange_, "setVertexSamplerStates:lodMinClamps:lodMaxClamps:withRange:"); _MTL_PRIVATE_DEF_SEL(setVertexSamplerStates_withRange_, "setVertexSamplerStates:withRange:"); _MTL_PRIVATE_DEF_SEL(setVertexStride_, "setVertexStride:"); _MTL_PRIVATE_DEF_SEL(setVertexTexture_atIndex_, "setVertexTexture:atIndex:"); _MTL_PRIVATE_DEF_SEL(setVertexTextures_withRange_, "setVertexTextures:withRange:"); _MTL_PRIVATE_DEF_SEL(setVertexVisibleFunctionTable_atBufferIndex_, "setVertexVisibleFunctionTable:atBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setVertexVisibleFunctionTables_withBufferRange_, "setVertexVisibleFunctionTables:withBufferRange:"); _MTL_PRIVATE_DEF_SEL(setViewport_, "setViewport:"); _MTL_PRIVATE_DEF_SEL(setViewports_count_, "setViewports:count:"); _MTL_PRIVATE_DEF_SEL(setVisibilityResultBuffer_, "setVisibilityResultBuffer:"); _MTL_PRIVATE_DEF_SEL(setVisibilityResultMode_offset_, "setVisibilityResultMode:offset:"); _MTL_PRIVATE_DEF_SEL(setVisibleFunctionTable_atBufferIndex_, "setVisibleFunctionTable:atBufferIndex:"); _MTL_PRIVATE_DEF_SEL(setVisibleFunctionTable_atIndex_, "setVisibleFunctionTable:atIndex:"); _MTL_PRIVATE_DEF_SEL(setVisibleFunctionTables_withBufferRange_, "setVisibleFunctionTables:withBufferRange:"); _MTL_PRIVATE_DEF_SEL(setVisibleFunctionTables_withRange_, "setVisibleFunctionTables:withRange:"); _MTL_PRIVATE_DEF_SEL(setWidth_, "setWidth:"); _MTL_PRIVATE_DEF_SEL(setWriteMask_, "setWriteMask:"); _MTL_PRIVATE_DEF_SEL(shaderValidation, "shaderValidation"); _MTL_PRIVATE_DEF_SEL(sharedCaptureManager, "sharedCaptureManager"); _MTL_PRIVATE_DEF_SEL(shouldMaximizeConcurrentCompilation, "shouldMaximizeConcurrentCompilation"); _MTL_PRIVATE_DEF_SEL(signalEvent_value_, "signalEvent:value:"); _MTL_PRIVATE_DEF_SEL(signaledValue, "signaledValue"); _MTL_PRIVATE_DEF_SEL(size, "size"); _MTL_PRIVATE_DEF_SEL(slice, "slice"); _MTL_PRIVATE_DEF_SEL(sourceAlphaBlendFactor, "sourceAlphaBlendFactor"); _MTL_PRIVATE_DEF_SEL(sourceRGBBlendFactor, "sourceRGBBlendFactor"); _MTL_PRIVATE_DEF_SEL(sparsePageSize, "sparsePageSize"); _MTL_PRIVATE_DEF_SEL(sparseTileSizeInBytes, "sparseTileSizeInBytes"); _MTL_PRIVATE_DEF_SEL(sparseTileSizeInBytesForSparsePageSize_, "sparseTileSizeInBytesForSparsePageSize:"); _MTL_PRIVATE_DEF_SEL(sparseTileSizeWithTextureType_pixelFormat_sampleCount_, "sparseTileSizeWithTextureType:pixelFormat:sampleCount:"); _MTL_PRIVATE_DEF_SEL(sparseTileSizeWithTextureType_pixelFormat_sampleCount_sparsePageSize_, "sparseTileSizeWithTextureType:pixelFormat:sampleCount:sparsePageSize:"); _MTL_PRIVATE_DEF_SEL(specializedName, "specializedName"); _MTL_PRIVATE_DEF_SEL(stageInputAttributes, "stageInputAttributes"); _MTL_PRIVATE_DEF_SEL(stageInputDescriptor, "stageInputDescriptor"); _MTL_PRIVATE_DEF_SEL(stageInputOutputDescriptor, "stageInputOutputDescriptor"); _MTL_PRIVATE_DEF_SEL(startCaptureWithCommandQueue_, "startCaptureWithCommandQueue:"); _MTL_PRIVATE_DEF_SEL(startCaptureWithDescriptor_error_, "startCaptureWithDescriptor:error:"); _MTL_PRIVATE_DEF_SEL(startCaptureWithDevice_, "startCaptureWithDevice:"); _MTL_PRIVATE_DEF_SEL(startCaptureWithScope_, "startCaptureWithScope:"); _MTL_PRIVATE_DEF_SEL(startOfEncoderSampleIndex, "startOfEncoderSampleIndex"); _MTL_PRIVATE_DEF_SEL(startOfFragmentSampleIndex, "startOfFragmentSampleIndex"); _MTL_PRIVATE_DEF_SEL(startOfVertexSampleIndex, "startOfVertexSampleIndex"); _MTL_PRIVATE_DEF_SEL(staticThreadgroupMemoryLength, "staticThreadgroupMemoryLength"); _MTL_PRIVATE_DEF_SEL(status, "status"); _MTL_PRIVATE_DEF_SEL(stencilAttachment, "stencilAttachment"); _MTL_PRIVATE_DEF_SEL(stencilAttachmentPixelFormat, "stencilAttachmentPixelFormat"); _MTL_PRIVATE_DEF_SEL(stencilCompareFunction, "stencilCompareFunction"); _MTL_PRIVATE_DEF_SEL(stencilFailureOperation, "stencilFailureOperation"); _MTL_PRIVATE_DEF_SEL(stencilResolveFilter, "stencilResolveFilter"); _MTL_PRIVATE_DEF_SEL(stepFunction, "stepFunction"); _MTL_PRIVATE_DEF_SEL(stepRate, "stepRate"); _MTL_PRIVATE_DEF_SEL(stopCapture, "stopCapture"); _MTL_PRIVATE_DEF_SEL(storageMode, "storageMode"); _MTL_PRIVATE_DEF_SEL(storeAction, "storeAction"); _MTL_PRIVATE_DEF_SEL(storeActionOptions, "storeActionOptions"); _MTL_PRIVATE_DEF_SEL(stride, "stride"); _MTL_PRIVATE_DEF_SEL(structType, "structType"); _MTL_PRIVATE_DEF_SEL(supportAddingBinaryFunctions, "supportAddingBinaryFunctions"); _MTL_PRIVATE_DEF_SEL(supportAddingFragmentBinaryFunctions, "supportAddingFragmentBinaryFunctions"); _MTL_PRIVATE_DEF_SEL(supportAddingVertexBinaryFunctions, "supportAddingVertexBinaryFunctions"); _MTL_PRIVATE_DEF_SEL(supportArgumentBuffers, "supportArgumentBuffers"); _MTL_PRIVATE_DEF_SEL(supportDynamicAttributeStride, "supportDynamicAttributeStride"); _MTL_PRIVATE_DEF_SEL(supportIndirectCommandBuffers, "supportIndirectCommandBuffers"); _MTL_PRIVATE_DEF_SEL(supportRayTracing, "supportRayTracing"); _MTL_PRIVATE_DEF_SEL(supports32BitFloatFiltering, "supports32BitFloatFiltering"); _MTL_PRIVATE_DEF_SEL(supports32BitMSAA, "supports32BitMSAA"); _MTL_PRIVATE_DEF_SEL(supportsBCTextureCompression, "supportsBCTextureCompression"); _MTL_PRIVATE_DEF_SEL(supportsCounterSampling_, "supportsCounterSampling:"); _MTL_PRIVATE_DEF_SEL(supportsDestination_, "supportsDestination:"); _MTL_PRIVATE_DEF_SEL(supportsDynamicLibraries, "supportsDynamicLibraries"); _MTL_PRIVATE_DEF_SEL(supportsFamily_, "supportsFamily:"); _MTL_PRIVATE_DEF_SEL(supportsFeatureSet_, "supportsFeatureSet:"); _MTL_PRIVATE_DEF_SEL(supportsFunctionPointers, "supportsFunctionPointers"); _MTL_PRIVATE_DEF_SEL(supportsFunctionPointersFromRender, "supportsFunctionPointersFromRender"); _MTL_PRIVATE_DEF_SEL(supportsPrimitiveMotionBlur, "supportsPrimitiveMotionBlur"); _MTL_PRIVATE_DEF_SEL(supportsPullModelInterpolation, "supportsPullModelInterpolation"); _MTL_PRIVATE_DEF_SEL(supportsQueryTextureLOD, "supportsQueryTextureLOD"); _MTL_PRIVATE_DEF_SEL(supportsRasterizationRateMapWithLayerCount_, "supportsRasterizationRateMapWithLayerCount:"); _MTL_PRIVATE_DEF_SEL(supportsRaytracing, "supportsRaytracing"); _MTL_PRIVATE_DEF_SEL(supportsRaytracingFromRender, "supportsRaytracingFromRender"); _MTL_PRIVATE_DEF_SEL(supportsRenderDynamicLibraries, "supportsRenderDynamicLibraries"); _MTL_PRIVATE_DEF_SEL(supportsShaderBarycentricCoordinates, "supportsShaderBarycentricCoordinates"); _MTL_PRIVATE_DEF_SEL(supportsTextureSampleCount_, "supportsTextureSampleCount:"); _MTL_PRIVATE_DEF_SEL(supportsVertexAmplificationCount_, "supportsVertexAmplificationCount:"); _MTL_PRIVATE_DEF_SEL(swizzle, "swizzle"); _MTL_PRIVATE_DEF_SEL(synchronizeResource_, "synchronizeResource:"); _MTL_PRIVATE_DEF_SEL(synchronizeTexture_slice_level_, "synchronizeTexture:slice:level:"); _MTL_PRIVATE_DEF_SEL(tAddressMode, "tAddressMode"); _MTL_PRIVATE_DEF_SEL(tailSizeInBytes, "tailSizeInBytes"); _MTL_PRIVATE_DEF_SEL(tessellationControlPointIndexType, "tessellationControlPointIndexType"); _MTL_PRIVATE_DEF_SEL(tessellationFactorFormat, "tessellationFactorFormat"); _MTL_PRIVATE_DEF_SEL(tessellationFactorStepFunction, "tessellationFactorStepFunction"); _MTL_PRIVATE_DEF_SEL(tessellationOutputWindingOrder, "tessellationOutputWindingOrder"); _MTL_PRIVATE_DEF_SEL(tessellationPartitionMode, "tessellationPartitionMode"); _MTL_PRIVATE_DEF_SEL(texture, "texture"); _MTL_PRIVATE_DEF_SEL(texture2DDescriptorWithPixelFormat_width_height_mipmapped_, "texture2DDescriptorWithPixelFormat:width:height:mipmapped:"); _MTL_PRIVATE_DEF_SEL(textureBarrier, "textureBarrier"); _MTL_PRIVATE_DEF_SEL(textureBufferDescriptorWithPixelFormat_width_resourceOptions_usage_, "textureBufferDescriptorWithPixelFormat:width:resourceOptions:usage:"); _MTL_PRIVATE_DEF_SEL(textureCubeDescriptorWithPixelFormat_size_mipmapped_, "textureCubeDescriptorWithPixelFormat:size:mipmapped:"); _MTL_PRIVATE_DEF_SEL(textureDataType, "textureDataType"); _MTL_PRIVATE_DEF_SEL(textureReferenceType, "textureReferenceType"); _MTL_PRIVATE_DEF_SEL(textureType, "textureType"); _MTL_PRIVATE_DEF_SEL(threadExecutionWidth, "threadExecutionWidth"); _MTL_PRIVATE_DEF_SEL(threadGroupSizeIsMultipleOfThreadExecutionWidth, "threadGroupSizeIsMultipleOfThreadExecutionWidth"); _MTL_PRIVATE_DEF_SEL(threadgroupMemoryAlignment, "threadgroupMemoryAlignment"); _MTL_PRIVATE_DEF_SEL(threadgroupMemoryDataSize, "threadgroupMemoryDataSize"); _MTL_PRIVATE_DEF_SEL(threadgroupMemoryLength, "threadgroupMemoryLength"); _MTL_PRIVATE_DEF_SEL(threadgroupSizeMatchesTileSize, "threadgroupSizeMatchesTileSize"); _MTL_PRIVATE_DEF_SEL(tileAdditionalBinaryFunctions, "tileAdditionalBinaryFunctions"); _MTL_PRIVATE_DEF_SEL(tileArguments, "tileArguments"); _MTL_PRIVATE_DEF_SEL(tileBindings, "tileBindings"); _MTL_PRIVATE_DEF_SEL(tileBuffers, "tileBuffers"); _MTL_PRIVATE_DEF_SEL(tileFunction, "tileFunction"); _MTL_PRIVATE_DEF_SEL(tileHeight, "tileHeight"); _MTL_PRIVATE_DEF_SEL(tileWidth, "tileWidth"); _MTL_PRIVATE_DEF_SEL(transformationMatrixBuffer, "transformationMatrixBuffer"); _MTL_PRIVATE_DEF_SEL(transformationMatrixBufferOffset, "transformationMatrixBufferOffset"); _MTL_PRIVATE_DEF_SEL(transformationMatrixLayout, "transformationMatrixLayout"); _MTL_PRIVATE_DEF_SEL(triangleCount, "triangleCount"); _MTL_PRIVATE_DEF_SEL(tryCancel, "tryCancel"); _MTL_PRIVATE_DEF_SEL(type, "type"); _MTL_PRIVATE_DEF_SEL(updateFence_, "updateFence:"); _MTL_PRIVATE_DEF_SEL(updateFence_afterStages_, "updateFence:afterStages:"); _MTL_PRIVATE_DEF_SEL(updateTextureMapping_mode_indirectBuffer_indirectBufferOffset_, "updateTextureMapping:mode:indirectBuffer:indirectBufferOffset:"); _MTL_PRIVATE_DEF_SEL(updateTextureMapping_mode_region_mipLevel_slice_, "updateTextureMapping:mode:region:mipLevel:slice:"); _MTL_PRIVATE_DEF_SEL(updateTextureMappings_mode_regions_mipLevels_slices_numRegions_, "updateTextureMappings:mode:regions:mipLevels:slices:numRegions:"); _MTL_PRIVATE_DEF_SEL(url, "url"); _MTL_PRIVATE_DEF_SEL(usage, "usage"); _MTL_PRIVATE_DEF_SEL(useHeap_, "useHeap:"); _MTL_PRIVATE_DEF_SEL(useHeap_stages_, "useHeap:stages:"); _MTL_PRIVATE_DEF_SEL(useHeaps_count_, "useHeaps:count:"); _MTL_PRIVATE_DEF_SEL(useHeaps_count_stages_, "useHeaps:count:stages:"); _MTL_PRIVATE_DEF_SEL(useResidencySet_, "useResidencySet:"); _MTL_PRIVATE_DEF_SEL(useResidencySets_count_, "useResidencySets:count:"); _MTL_PRIVATE_DEF_SEL(useResource_usage_, "useResource:usage:"); _MTL_PRIVATE_DEF_SEL(useResource_usage_stages_, "useResource:usage:stages:"); _MTL_PRIVATE_DEF_SEL(useResources_count_usage_, "useResources:count:usage:"); _MTL_PRIVATE_DEF_SEL(useResources_count_usage_stages_, "useResources:count:usage:stages:"); _MTL_PRIVATE_DEF_SEL(usedSize, "usedSize"); _MTL_PRIVATE_DEF_SEL(vertexAdditionalBinaryFunctions, "vertexAdditionalBinaryFunctions"); _MTL_PRIVATE_DEF_SEL(vertexArguments, "vertexArguments"); _MTL_PRIVATE_DEF_SEL(vertexAttributes, "vertexAttributes"); _MTL_PRIVATE_DEF_SEL(vertexBindings, "vertexBindings"); _MTL_PRIVATE_DEF_SEL(vertexBuffer, "vertexBuffer"); _MTL_PRIVATE_DEF_SEL(vertexBufferOffset, "vertexBufferOffset"); _MTL_PRIVATE_DEF_SEL(vertexBuffers, "vertexBuffers"); _MTL_PRIVATE_DEF_SEL(vertexDescriptor, "vertexDescriptor"); _MTL_PRIVATE_DEF_SEL(vertexFormat, "vertexFormat"); _MTL_PRIVATE_DEF_SEL(vertexFunction, "vertexFunction"); _MTL_PRIVATE_DEF_SEL(vertexLinkedFunctions, "vertexLinkedFunctions"); _MTL_PRIVATE_DEF_SEL(vertexPreloadedLibraries, "vertexPreloadedLibraries"); _MTL_PRIVATE_DEF_SEL(vertexStride, "vertexStride"); _MTL_PRIVATE_DEF_SEL(vertical, "vertical"); _MTL_PRIVATE_DEF_SEL(verticalSampleStorage, "verticalSampleStorage"); _MTL_PRIVATE_DEF_SEL(visibilityResultBuffer, "visibilityResultBuffer"); _MTL_PRIVATE_DEF_SEL(visibleFunctionTableDescriptor, "visibleFunctionTableDescriptor"); _MTL_PRIVATE_DEF_SEL(waitForEvent_value_, "waitForEvent:value:"); _MTL_PRIVATE_DEF_SEL(waitForFence_, "waitForFence:"); _MTL_PRIVATE_DEF_SEL(waitForFence_beforeStages_, "waitForFence:beforeStages:"); _MTL_PRIVATE_DEF_SEL(waitUntilCompleted, "waitUntilCompleted"); _MTL_PRIVATE_DEF_SEL(waitUntilScheduled, "waitUntilScheduled"); _MTL_PRIVATE_DEF_SEL(waitUntilSignaledValue_timeoutMS_, "waitUntilSignaledValue:timeoutMS:"); _MTL_PRIVATE_DEF_SEL(width, "width"); _MTL_PRIVATE_DEF_SEL(writeCompactedAccelerationStructureSize_toBuffer_offset_, "writeCompactedAccelerationStructureSize:toBuffer:offset:"); _MTL_PRIVATE_DEF_SEL(writeCompactedAccelerationStructureSize_toBuffer_offset_sizeDataType_, "writeCompactedAccelerationStructureSize:toBuffer:offset:sizeDataType:"); _MTL_PRIVATE_DEF_SEL(writeMask, "writeMask"); } #include #include namespace MTL { using DrawablePresentedHandler = void (^)(class Drawable*); using DrawablePresentedHandlerFunction = std::function; class Drawable : public NS::Referencing { public: void addPresentedHandler(const MTL::DrawablePresentedHandlerFunction& function); void present(); void presentAtTime(CFTimeInterval presentationTime); void presentAfterMinimumDuration(CFTimeInterval duration); void addPresentedHandler(const MTL::DrawablePresentedHandler block); CFTimeInterval presentedTime() const; NS::UInteger drawableID() const; }; } _MTL_INLINE void MTL::Drawable::addPresentedHandler(const MTL::DrawablePresentedHandlerFunction& function) { __block DrawablePresentedHandlerFunction blockFunction = function; addPresentedHandler(^(Drawable* pDrawable) { blockFunction(pDrawable); }); } _MTL_INLINE void MTL::Drawable::present() { Object::sendMessage(this, _MTL_PRIVATE_SEL(present)); } _MTL_INLINE void MTL::Drawable::presentAtTime(CFTimeInterval presentationTime) { Object::sendMessage(this, _MTL_PRIVATE_SEL(presentAtTime_), presentationTime); } _MTL_INLINE void MTL::Drawable::presentAfterMinimumDuration(CFTimeInterval duration) { Object::sendMessage(this, _MTL_PRIVATE_SEL(presentAfterMinimumDuration_), duration); } _MTL_INLINE void MTL::Drawable::addPresentedHandler(const MTL::DrawablePresentedHandler block) { Object::sendMessage(this, _MTL_PRIVATE_SEL(addPresentedHandler_), block); } _MTL_INLINE CFTimeInterval MTL::Drawable::presentedTime() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(presentedTime)); } _MTL_INLINE NS::UInteger MTL::Drawable::drawableID() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(drawableID)); } #pragma once #pragma once namespace MTL { _MTL_ENUM(NS::UInteger, PixelFormat) { PixelFormatInvalid = 0, PixelFormatA8Unorm = 1, PixelFormatR8Unorm = 10, PixelFormatR8Unorm_sRGB = 11, PixelFormatR8Snorm = 12, PixelFormatR8Uint = 13, PixelFormatR8Sint = 14, PixelFormatR16Unorm = 20, PixelFormatR16Snorm = 22, PixelFormatR16Uint = 23, PixelFormatR16Sint = 24, PixelFormatR16Float = 25, PixelFormatRG8Unorm = 30, PixelFormatRG8Unorm_sRGB = 31, PixelFormatRG8Snorm = 32, PixelFormatRG8Uint = 33, PixelFormatRG8Sint = 34, PixelFormatB5G6R5Unorm = 40, PixelFormatA1BGR5Unorm = 41, PixelFormatABGR4Unorm = 42, PixelFormatBGR5A1Unorm = 43, PixelFormatR32Uint = 53, PixelFormatR32Sint = 54, PixelFormatR32Float = 55, PixelFormatRG16Unorm = 60, PixelFormatRG16Snorm = 62, PixelFormatRG16Uint = 63, PixelFormatRG16Sint = 64, PixelFormatRG16Float = 65, PixelFormatRGBA8Unorm = 70, PixelFormatRGBA8Unorm_sRGB = 71, PixelFormatRGBA8Snorm = 72, PixelFormatRGBA8Uint = 73, PixelFormatRGBA8Sint = 74, PixelFormatBGRA8Unorm = 80, PixelFormatBGRA8Unorm_sRGB = 81, PixelFormatRGB10A2Unorm = 90, PixelFormatRGB10A2Uint = 91, PixelFormatRG11B10Float = 92, PixelFormatRGB9E5Float = 93, PixelFormatBGR10A2Unorm = 94, PixelFormatBGR10_XR = 554, PixelFormatBGR10_XR_sRGB = 555, PixelFormatRG32Uint = 103, PixelFormatRG32Sint = 104, PixelFormatRG32Float = 105, PixelFormatRGBA16Unorm = 110, PixelFormatRGBA16Snorm = 112, PixelFormatRGBA16Uint = 113, PixelFormatRGBA16Sint = 114, PixelFormatRGBA16Float = 115, PixelFormatBGRA10_XR = 552, PixelFormatBGRA10_XR_sRGB = 553, PixelFormatRGBA32Uint = 123, PixelFormatRGBA32Sint = 124, PixelFormatRGBA32Float = 125, PixelFormatBC1_RGBA = 130, PixelFormatBC1_RGBA_sRGB = 131, PixelFormatBC2_RGBA = 132, PixelFormatBC2_RGBA_sRGB = 133, PixelFormatBC3_RGBA = 134, PixelFormatBC3_RGBA_sRGB = 135, PixelFormatBC4_RUnorm = 140, PixelFormatBC4_RSnorm = 141, PixelFormatBC5_RGUnorm = 142, PixelFormatBC5_RGSnorm = 143, PixelFormatBC6H_RGBFloat = 150, PixelFormatBC6H_RGBUfloat = 151, PixelFormatBC7_RGBAUnorm = 152, PixelFormatBC7_RGBAUnorm_sRGB = 153, PixelFormatPVRTC_RGB_2BPP = 160, PixelFormatPVRTC_RGB_2BPP_sRGB = 161, PixelFormatPVRTC_RGB_4BPP = 162, PixelFormatPVRTC_RGB_4BPP_sRGB = 163, PixelFormatPVRTC_RGBA_2BPP = 164, PixelFormatPVRTC_RGBA_2BPP_sRGB = 165, PixelFormatPVRTC_RGBA_4BPP = 166, PixelFormatPVRTC_RGBA_4BPP_sRGB = 167, PixelFormatEAC_R11Unorm = 170, PixelFormatEAC_R11Snorm = 172, PixelFormatEAC_RG11Unorm = 174, PixelFormatEAC_RG11Snorm = 176, PixelFormatEAC_RGBA8 = 178, PixelFormatEAC_RGBA8_sRGB = 179, PixelFormatETC2_RGB8 = 180, PixelFormatETC2_RGB8_sRGB = 181, PixelFormatETC2_RGB8A1 = 182, PixelFormatETC2_RGB8A1_sRGB = 183, PixelFormatASTC_4x4_sRGB = 186, PixelFormatASTC_5x4_sRGB = 187, PixelFormatASTC_5x5_sRGB = 188, PixelFormatASTC_6x5_sRGB = 189, PixelFormatASTC_6x6_sRGB = 190, PixelFormatASTC_8x5_sRGB = 192, PixelFormatASTC_8x6_sRGB = 193, PixelFormatASTC_8x8_sRGB = 194, PixelFormatASTC_10x5_sRGB = 195, PixelFormatASTC_10x6_sRGB = 196, PixelFormatASTC_10x8_sRGB = 197, PixelFormatASTC_10x10_sRGB = 198, PixelFormatASTC_12x10_sRGB = 199, PixelFormatASTC_12x12_sRGB = 200, PixelFormatASTC_4x4_LDR = 204, PixelFormatASTC_5x4_LDR = 205, PixelFormatASTC_5x5_LDR = 206, PixelFormatASTC_6x5_LDR = 207, PixelFormatASTC_6x6_LDR = 208, PixelFormatASTC_8x5_LDR = 210, PixelFormatASTC_8x6_LDR = 211, PixelFormatASTC_8x8_LDR = 212, PixelFormatASTC_10x5_LDR = 213, PixelFormatASTC_10x6_LDR = 214, PixelFormatASTC_10x8_LDR = 215, PixelFormatASTC_10x10_LDR = 216, PixelFormatASTC_12x10_LDR = 217, PixelFormatASTC_12x12_LDR = 218, PixelFormatASTC_4x4_HDR = 222, PixelFormatASTC_5x4_HDR = 223, PixelFormatASTC_5x5_HDR = 224, PixelFormatASTC_6x5_HDR = 225, PixelFormatASTC_6x6_HDR = 226, PixelFormatASTC_8x5_HDR = 228, PixelFormatASTC_8x6_HDR = 229, PixelFormatASTC_8x8_HDR = 230, PixelFormatASTC_10x5_HDR = 231, PixelFormatASTC_10x6_HDR = 232, PixelFormatASTC_10x8_HDR = 233, PixelFormatASTC_10x10_HDR = 234, PixelFormatASTC_12x10_HDR = 235, PixelFormatASTC_12x12_HDR = 236, PixelFormatGBGR422 = 240, PixelFormatBGRG422 = 241, PixelFormatDepth16Unorm = 250, PixelFormatDepth32Float = 252, PixelFormatStencil8 = 253, PixelFormatDepth24Unorm_Stencil8 = 255, PixelFormatDepth32Float_Stencil8 = 260, PixelFormatX32_Stencil8 = 261, PixelFormatX24_Stencil8 = 262, }; } #pragma once #include #pragma once namespace MTL { class Allocation : public NS::Referencing { public: NS::UInteger allocatedSize() const; }; } _MTL_INLINE NS::UInteger MTL::Allocation::allocatedSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(allocatedSize)); } namespace MTL { _MTL_ENUM(NS::UInteger, PurgeableState) { PurgeableStateKeepCurrent = 1, PurgeableStateNonVolatile = 2, PurgeableStateVolatile = 3, PurgeableStateEmpty = 4, }; _MTL_ENUM(NS::UInteger, CPUCacheMode) { CPUCacheModeDefaultCache = 0, CPUCacheModeWriteCombined = 1, }; _MTL_ENUM(NS::UInteger, StorageMode) { StorageModeShared = 0, StorageModeManaged = 1, StorageModePrivate = 2, StorageModeMemoryless = 3, }; _MTL_ENUM(NS::UInteger, HazardTrackingMode) { HazardTrackingModeDefault = 0, HazardTrackingModeUntracked = 1, HazardTrackingModeTracked = 2, }; _MTL_OPTIONS(NS::UInteger, ResourceOptions) { ResourceCPUCacheModeDefaultCache = 0, ResourceCPUCacheModeWriteCombined = 1, ResourceStorageModeShared = 0, ResourceStorageModeManaged = 16, ResourceStorageModePrivate = 32, ResourceStorageModeMemoryless = 48, ResourceHazardTrackingModeDefault = 0, ResourceHazardTrackingModeUntracked = 256, ResourceHazardTrackingModeTracked = 512, ResourceOptionCPUCacheModeDefault = 0, ResourceOptionCPUCacheModeWriteCombined = 1, }; class Resource : public NS::Referencing { public: NS::String* label() const; void setLabel(const NS::String* label); class Device* device() const; MTL::CPUCacheMode cpuCacheMode() const; MTL::StorageMode storageMode() const; MTL::HazardTrackingMode hazardTrackingMode() const; MTL::ResourceOptions resourceOptions() const; MTL::PurgeableState setPurgeableState(MTL::PurgeableState state); class Heap* heap() const; NS::UInteger heapOffset() const; NS::UInteger allocatedSize() const; void makeAliasable(); bool isAliasable(); kern_return_t setOwner(task_id_token_t task_id_token); }; } _MTL_INLINE NS::String* MTL::Resource::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::Resource::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::Device* MTL::Resource::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE MTL::CPUCacheMode MTL::Resource::cpuCacheMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(cpuCacheMode)); } _MTL_INLINE MTL::StorageMode MTL::Resource::storageMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(storageMode)); } _MTL_INLINE MTL::HazardTrackingMode MTL::Resource::hazardTrackingMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(hazardTrackingMode)); } _MTL_INLINE MTL::ResourceOptions MTL::Resource::resourceOptions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(resourceOptions)); } _MTL_INLINE MTL::PurgeableState MTL::Resource::setPurgeableState(MTL::PurgeableState state) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(setPurgeableState_), state); } _MTL_INLINE MTL::Heap* MTL::Resource::heap() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(heap)); } _MTL_INLINE NS::UInteger MTL::Resource::heapOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(heapOffset)); } _MTL_INLINE NS::UInteger MTL::Resource::allocatedSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(allocatedSize)); } _MTL_INLINE void MTL::Resource::makeAliasable() { Object::sendMessage(this, _MTL_PRIVATE_SEL(makeAliasable)); } _MTL_INLINE bool MTL::Resource::isAliasable() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isAliasable)); } _MTL_INLINE kern_return_t MTL::Resource::setOwner(task_id_token_t task_id_token) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(setOwnerWithIdentity_), task_id_token); } #pragma once namespace MTL { struct Origin { Origin() = default; Origin(NS::UInteger x, NS::UInteger y, NS::UInteger z); static Origin Make(NS::UInteger x, NS::UInteger y, NS::UInteger z); NS::UInteger x; NS::UInteger y; NS::UInteger z; } _MTL_PACKED; struct Size { Size() = default; Size(NS::UInteger width, NS::UInteger height, NS::UInteger depth); static Size Make(NS::UInteger width, NS::UInteger height, NS::UInteger depth); NS::UInteger width; NS::UInteger height; NS::UInteger depth; } _MTL_PACKED; struct Region { Region() = default; Region(NS::UInteger x, NS::UInteger width); Region(NS::UInteger x, NS::UInteger y, NS::UInteger width, NS::UInteger height); Region(NS::UInteger x, NS::UInteger y, NS::UInteger z, NS::UInteger width, NS::UInteger height, NS::UInteger depth); static Region Make1D(NS::UInteger x, NS::UInteger width); static Region Make2D(NS::UInteger x, NS::UInteger y, NS::UInteger width, NS::UInteger height); static Region Make3D(NS::UInteger x, NS::UInteger y, NS::UInteger z, NS::UInteger width, NS::UInteger height, NS::UInteger depth); MTL::Origin origin; MTL::Size size; } _MTL_PACKED; struct SamplePosition; using Coordinate2D = SamplePosition; struct SamplePosition { SamplePosition() = default; SamplePosition(float _x, float _y); static SamplePosition Make(float x, float y); float x; float y; } _MTL_PACKED; struct ResourceID { uint64_t _impl; } _MTL_PACKED; } _MTL_INLINE MTL::Origin::Origin(NS::UInteger _x, NS::UInteger _y, NS::UInteger _z) : x(_x) , y(_y) , z(_z) { } _MTL_INLINE MTL::Origin MTL::Origin::Make(NS::UInteger x, NS::UInteger y, NS::UInteger z) { return Origin(x, y, z); } _MTL_INLINE MTL::Size::Size(NS::UInteger _width, NS::UInteger _height, NS::UInteger _depth) : width(_width) , height(_height) , depth(_depth) { } _MTL_INLINE MTL::Size MTL::Size::Make(NS::UInteger width, NS::UInteger height, NS::UInteger depth) { return Size(width, height, depth); } _MTL_INLINE MTL::Region::Region(NS::UInteger x, NS::UInteger width) : origin(x, 0, 0) , size(width, 1, 1) { } _MTL_INLINE MTL::Region::Region(NS::UInteger x, NS::UInteger y, NS::UInteger width, NS::UInteger height) : origin(x, y, 0) , size(width, height, 1) { } _MTL_INLINE MTL::Region::Region(NS::UInteger x, NS::UInteger y, NS::UInteger z, NS::UInteger width, NS::UInteger height, NS::UInteger depth) : origin(x, y, z) , size(width, height, depth) { } _MTL_INLINE MTL::Region MTL::Region::Make1D(NS::UInteger x, NS::UInteger width) { return Region(x, width); } _MTL_INLINE MTL::Region MTL::Region::Make2D(NS::UInteger x, NS::UInteger y, NS::UInteger width, NS::UInteger height) { return Region(x, y, width, height); } _MTL_INLINE MTL::Region MTL::Region::Make3D(NS::UInteger x, NS::UInteger y, NS::UInteger z, NS::UInteger width, NS::UInteger height, NS::UInteger depth) { return Region(x, y, z, width, height, depth); } _MTL_INLINE MTL::SamplePosition::SamplePosition(float _x, float _y) : x(_x) , y(_y) { } _MTL_INLINE MTL::SamplePosition MTL::SamplePosition::Make(float x, float y) { return SamplePosition(x, y); } #include namespace MTL { _MTL_ENUM(NS::UInteger, TextureType) { TextureType1D = 0, TextureType1DArray = 1, TextureType2D = 2, TextureType2DArray = 3, TextureType2DMultisample = 4, TextureTypeCube = 5, TextureTypeCubeArray = 6, TextureType3D = 7, TextureType2DMultisampleArray = 8, TextureTypeTextureBuffer = 9, }; _MTL_ENUM(uint8_t, TextureSwizzle) { TextureSwizzleZero = 0, TextureSwizzleOne = 1, TextureSwizzleRed = 2, TextureSwizzleGreen = 3, TextureSwizzleBlue = 4, TextureSwizzleAlpha = 5, }; struct TextureSwizzleChannels { static TextureSwizzleChannels Default(); static TextureSwizzleChannels Make( TextureSwizzle r, TextureSwizzle g, TextureSwizzle b, TextureSwizzle a ); constexpr TextureSwizzleChannels(); constexpr TextureSwizzleChannels( TextureSwizzle r, TextureSwizzle g, TextureSwizzle b, TextureSwizzle a ); MTL::TextureSwizzle red; MTL::TextureSwizzle green; MTL::TextureSwizzle blue; MTL::TextureSwizzle alpha; } _MTL_PACKED; class SharedTextureHandle : public NS::SecureCoding { public: static class SharedTextureHandle* alloc(); class SharedTextureHandle* init(); class Device* device() const; NS::String* label() const; }; _MTL_OPTIONS(NS::UInteger, TextureUsage) { TextureUsageUnknown = 0, TextureUsageShaderRead = 1, TextureUsageShaderWrite = 2, TextureUsageRenderTarget = 4, TextureUsagePixelFormatView = 16, TextureUsageShaderAtomic = 32, }; _MTL_ENUM(NS::Integer, TextureCompressionType) { TextureCompressionTypeLossless = 0, TextureCompressionTypeLossy = 1, }; class TextureDescriptor : public NS::Copying { public: static class TextureDescriptor* alloc(); class TextureDescriptor* init(); static class TextureDescriptor* texture2DDescriptor(MTL::PixelFormat pixelFormat, NS::UInteger width, NS::UInteger height, bool mipmapped); static class TextureDescriptor* textureCubeDescriptor(MTL::PixelFormat pixelFormat, NS::UInteger size, bool mipmapped); static class TextureDescriptor* textureBufferDescriptor(MTL::PixelFormat pixelFormat, NS::UInteger width, MTL::ResourceOptions resourceOptions, MTL::TextureUsage usage); MTL::TextureType textureType() const; void setTextureType(MTL::TextureType textureType); MTL::PixelFormat pixelFormat() const; void setPixelFormat(MTL::PixelFormat pixelFormat); NS::UInteger width() const; void setWidth(NS::UInteger width); NS::UInteger height() const; void setHeight(NS::UInteger height); NS::UInteger depth() const; void setDepth(NS::UInteger depth); NS::UInteger mipmapLevelCount() const; void setMipmapLevelCount(NS::UInteger mipmapLevelCount); NS::UInteger sampleCount() const; void setSampleCount(NS::UInteger sampleCount); NS::UInteger arrayLength() const; void setArrayLength(NS::UInteger arrayLength); MTL::ResourceOptions resourceOptions() const; void setResourceOptions(MTL::ResourceOptions resourceOptions); MTL::CPUCacheMode cpuCacheMode() const; void setCpuCacheMode(MTL::CPUCacheMode cpuCacheMode); MTL::StorageMode storageMode() const; void setStorageMode(MTL::StorageMode storageMode); MTL::HazardTrackingMode hazardTrackingMode() const; void setHazardTrackingMode(MTL::HazardTrackingMode hazardTrackingMode); MTL::TextureUsage usage() const; void setUsage(MTL::TextureUsage usage); bool allowGPUOptimizedContents() const; void setAllowGPUOptimizedContents(bool allowGPUOptimizedContents); MTL::TextureCompressionType compressionType() const; void setCompressionType(MTL::TextureCompressionType compressionType); MTL::TextureSwizzleChannels swizzle() const; void setSwizzle(MTL::TextureSwizzleChannels swizzle); }; class Texture : public NS::Referencing { public: class Resource* rootResource() const; class Texture* parentTexture() const; NS::UInteger parentRelativeLevel() const; NS::UInteger parentRelativeSlice() const; class Buffer* buffer() const; NS::UInteger bufferOffset() const; NS::UInteger bufferBytesPerRow() const; IOSurfaceRef iosurface() const; NS::UInteger iosurfacePlane() const; MTL::TextureType textureType() const; MTL::PixelFormat pixelFormat() const; NS::UInteger width() const; NS::UInteger height() const; NS::UInteger depth() const; NS::UInteger mipmapLevelCount() const; NS::UInteger sampleCount() const; NS::UInteger arrayLength() const; MTL::TextureUsage usage() const; bool shareable() const; bool framebufferOnly() const; NS::UInteger firstMipmapInTail() const; NS::UInteger tailSizeInBytes() const; bool isSparse() const; bool allowGPUOptimizedContents() const; MTL::TextureCompressionType compressionType() const; MTL::ResourceID gpuResourceID() const; void getBytes(void* pixelBytes, NS::UInteger bytesPerRow, NS::UInteger bytesPerImage, MTL::Region region, NS::UInteger level, NS::UInteger slice); void replaceRegion(MTL::Region region, NS::UInteger level, NS::UInteger slice, const void* pixelBytes, NS::UInteger bytesPerRow, NS::UInteger bytesPerImage); void getBytes(void* pixelBytes, NS::UInteger bytesPerRow, MTL::Region region, NS::UInteger level); void replaceRegion(MTL::Region region, NS::UInteger level, const void* pixelBytes, NS::UInteger bytesPerRow); class Texture* newTextureView(MTL::PixelFormat pixelFormat); class Texture* newTextureView(MTL::PixelFormat pixelFormat, MTL::TextureType textureType, NS::Range levelRange, NS::Range sliceRange); class SharedTextureHandle* newSharedTextureHandle(); class Texture* remoteStorageTexture() const; class Texture* newRemoteTextureViewForDevice(const class Device* device); MTL::TextureSwizzleChannels swizzle() const; class Texture* newTextureView(MTL::PixelFormat pixelFormat, MTL::TextureType textureType, NS::Range levelRange, NS::Range sliceRange, MTL::TextureSwizzleChannels swizzle); }; } _MTL_INLINE MTL::TextureSwizzleChannels MTL::TextureSwizzleChannels::Default() { return MTL::TextureSwizzleChannels(); } _MTL_INLINE constexpr MTL::TextureSwizzleChannels::TextureSwizzleChannels() : red(MTL::TextureSwizzleRed) , green(MTL::TextureSwizzleGreen) , blue(MTL::TextureSwizzleBlue) , alpha(MTL::TextureSwizzleAlpha) { } _MTL_INLINE MTL::TextureSwizzleChannels MTL::TextureSwizzleChannels::Make( TextureSwizzle r, TextureSwizzle g, TextureSwizzle b, TextureSwizzle a ) { return TextureSwizzleChannels(r, g, b, a); } _MTL_INLINE constexpr MTL::TextureSwizzleChannels::TextureSwizzleChannels( TextureSwizzle r, TextureSwizzle g, TextureSwizzle b, TextureSwizzle a ) : red(r) , green(g) , blue(b) , alpha(a) { } _MTL_INLINE MTL::SharedTextureHandle* MTL::SharedTextureHandle::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLSharedTextureHandle)); } _MTL_INLINE MTL::SharedTextureHandle* MTL::SharedTextureHandle::init() { return NS::Object::init(); } _MTL_INLINE MTL::Device* MTL::SharedTextureHandle::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE NS::String* MTL::SharedTextureHandle::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE MTL::TextureDescriptor* MTL::TextureDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLTextureDescriptor)); } _MTL_INLINE MTL::TextureDescriptor* MTL::TextureDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::TextureDescriptor* MTL::TextureDescriptor::texture2DDescriptor(MTL::PixelFormat pixelFormat, NS::UInteger width, NS::UInteger height, bool mipmapped) { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLTextureDescriptor), _MTL_PRIVATE_SEL(texture2DDescriptorWithPixelFormat_width_height_mipmapped_), pixelFormat, width, height, mipmapped); } _MTL_INLINE MTL::TextureDescriptor* MTL::TextureDescriptor::textureCubeDescriptor(MTL::PixelFormat pixelFormat, NS::UInteger size, bool mipmapped) { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLTextureDescriptor), _MTL_PRIVATE_SEL(textureCubeDescriptorWithPixelFormat_size_mipmapped_), pixelFormat, size, mipmapped); } _MTL_INLINE MTL::TextureDescriptor* MTL::TextureDescriptor::textureBufferDescriptor(MTL::PixelFormat pixelFormat, NS::UInteger width, MTL::ResourceOptions resourceOptions, MTL::TextureUsage usage) { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLTextureDescriptor), _MTL_PRIVATE_SEL(textureBufferDescriptorWithPixelFormat_width_resourceOptions_usage_), pixelFormat, width, resourceOptions, usage); } _MTL_INLINE MTL::TextureType MTL::TextureDescriptor::textureType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(textureType)); } _MTL_INLINE void MTL::TextureDescriptor::setTextureType(MTL::TextureType textureType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTextureType_), textureType); } _MTL_INLINE MTL::PixelFormat MTL::TextureDescriptor::pixelFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(pixelFormat)); } _MTL_INLINE void MTL::TextureDescriptor::setPixelFormat(MTL::PixelFormat pixelFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPixelFormat_), pixelFormat); } _MTL_INLINE NS::UInteger MTL::TextureDescriptor::width() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(width)); } _MTL_INLINE void MTL::TextureDescriptor::setWidth(NS::UInteger width) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setWidth_), width); } _MTL_INLINE NS::UInteger MTL::TextureDescriptor::height() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(height)); } _MTL_INLINE void MTL::TextureDescriptor::setHeight(NS::UInteger height) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setHeight_), height); } _MTL_INLINE NS::UInteger MTL::TextureDescriptor::depth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(depth)); } _MTL_INLINE void MTL::TextureDescriptor::setDepth(NS::UInteger depth) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepth_), depth); } _MTL_INLINE NS::UInteger MTL::TextureDescriptor::mipmapLevelCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(mipmapLevelCount)); } _MTL_INLINE void MTL::TextureDescriptor::setMipmapLevelCount(NS::UInteger mipmapLevelCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMipmapLevelCount_), mipmapLevelCount); } _MTL_INLINE NS::UInteger MTL::TextureDescriptor::sampleCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleCount)); } _MTL_INLINE void MTL::TextureDescriptor::setSampleCount(NS::UInteger sampleCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSampleCount_), sampleCount); } _MTL_INLINE NS::UInteger MTL::TextureDescriptor::arrayLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(arrayLength)); } _MTL_INLINE void MTL::TextureDescriptor::setArrayLength(NS::UInteger arrayLength) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setArrayLength_), arrayLength); } _MTL_INLINE MTL::ResourceOptions MTL::TextureDescriptor::resourceOptions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(resourceOptions)); } _MTL_INLINE void MTL::TextureDescriptor::setResourceOptions(MTL::ResourceOptions resourceOptions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setResourceOptions_), resourceOptions); } _MTL_INLINE MTL::CPUCacheMode MTL::TextureDescriptor::cpuCacheMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(cpuCacheMode)); } _MTL_INLINE void MTL::TextureDescriptor::setCpuCacheMode(MTL::CPUCacheMode cpuCacheMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCpuCacheMode_), cpuCacheMode); } _MTL_INLINE MTL::StorageMode MTL::TextureDescriptor::storageMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(storageMode)); } _MTL_INLINE void MTL::TextureDescriptor::setStorageMode(MTL::StorageMode storageMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStorageMode_), storageMode); } _MTL_INLINE MTL::HazardTrackingMode MTL::TextureDescriptor::hazardTrackingMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(hazardTrackingMode)); } _MTL_INLINE void MTL::TextureDescriptor::setHazardTrackingMode(MTL::HazardTrackingMode hazardTrackingMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setHazardTrackingMode_), hazardTrackingMode); } _MTL_INLINE MTL::TextureUsage MTL::TextureDescriptor::usage() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(usage)); } _MTL_INLINE void MTL::TextureDescriptor::setUsage(MTL::TextureUsage usage) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setUsage_), usage); } _MTL_INLINE bool MTL::TextureDescriptor::allowGPUOptimizedContents() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(allowGPUOptimizedContents)); } _MTL_INLINE void MTL::TextureDescriptor::setAllowGPUOptimizedContents(bool allowGPUOptimizedContents) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setAllowGPUOptimizedContents_), allowGPUOptimizedContents); } _MTL_INLINE MTL::TextureCompressionType MTL::TextureDescriptor::compressionType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(compressionType)); } _MTL_INLINE void MTL::TextureDescriptor::setCompressionType(MTL::TextureCompressionType compressionType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCompressionType_), compressionType); } _MTL_INLINE MTL::TextureSwizzleChannels MTL::TextureDescriptor::swizzle() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(swizzle)); } _MTL_INLINE void MTL::TextureDescriptor::setSwizzle(MTL::TextureSwizzleChannels swizzle) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSwizzle_), swizzle); } _MTL_INLINE MTL::Resource* MTL::Texture::rootResource() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(rootResource)); } _MTL_INLINE MTL::Texture* MTL::Texture::parentTexture() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(parentTexture)); } _MTL_INLINE NS::UInteger MTL::Texture::parentRelativeLevel() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(parentRelativeLevel)); } _MTL_INLINE NS::UInteger MTL::Texture::parentRelativeSlice() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(parentRelativeSlice)); } _MTL_INLINE MTL::Buffer* MTL::Texture::buffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(buffer)); } _MTL_INLINE NS::UInteger MTL::Texture::bufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferOffset)); } _MTL_INLINE NS::UInteger MTL::Texture::bufferBytesPerRow() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferBytesPerRow)); } _MTL_INLINE IOSurfaceRef MTL::Texture::iosurface() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(iosurface)); } _MTL_INLINE NS::UInteger MTL::Texture::iosurfacePlane() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(iosurfacePlane)); } _MTL_INLINE MTL::TextureType MTL::Texture::textureType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(textureType)); } _MTL_INLINE MTL::PixelFormat MTL::Texture::pixelFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(pixelFormat)); } _MTL_INLINE NS::UInteger MTL::Texture::width() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(width)); } _MTL_INLINE NS::UInteger MTL::Texture::height() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(height)); } _MTL_INLINE NS::UInteger MTL::Texture::depth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(depth)); } _MTL_INLINE NS::UInteger MTL::Texture::mipmapLevelCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(mipmapLevelCount)); } _MTL_INLINE NS::UInteger MTL::Texture::sampleCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleCount)); } _MTL_INLINE NS::UInteger MTL::Texture::arrayLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(arrayLength)); } _MTL_INLINE MTL::TextureUsage MTL::Texture::usage() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(usage)); } _MTL_INLINE bool MTL::Texture::shareable() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isShareable)); } _MTL_INLINE bool MTL::Texture::framebufferOnly() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isFramebufferOnly)); } _MTL_INLINE NS::UInteger MTL::Texture::firstMipmapInTail() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(firstMipmapInTail)); } _MTL_INLINE NS::UInteger MTL::Texture::tailSizeInBytes() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tailSizeInBytes)); } _MTL_INLINE bool MTL::Texture::isSparse() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isSparse)); } _MTL_INLINE bool MTL::Texture::allowGPUOptimizedContents() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(allowGPUOptimizedContents)); } _MTL_INLINE MTL::TextureCompressionType MTL::Texture::compressionType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(compressionType)); } _MTL_INLINE MTL::ResourceID MTL::Texture::gpuResourceID() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(gpuResourceID)); } _MTL_INLINE void MTL::Texture::getBytes(void* pixelBytes, NS::UInteger bytesPerRow, NS::UInteger bytesPerImage, MTL::Region region, NS::UInteger level, NS::UInteger slice) { Object::sendMessage(this, _MTL_PRIVATE_SEL(getBytes_bytesPerRow_bytesPerImage_fromRegion_mipmapLevel_slice_), pixelBytes, bytesPerRow, bytesPerImage, region, level, slice); } _MTL_INLINE void MTL::Texture::replaceRegion(MTL::Region region, NS::UInteger level, NS::UInteger slice, const void* pixelBytes, NS::UInteger bytesPerRow, NS::UInteger bytesPerImage) { Object::sendMessage(this, _MTL_PRIVATE_SEL(replaceRegion_mipmapLevel_slice_withBytes_bytesPerRow_bytesPerImage_), region, level, slice, pixelBytes, bytesPerRow, bytesPerImage); } _MTL_INLINE void MTL::Texture::getBytes(void* pixelBytes, NS::UInteger bytesPerRow, MTL::Region region, NS::UInteger level) { Object::sendMessage(this, _MTL_PRIVATE_SEL(getBytes_bytesPerRow_fromRegion_mipmapLevel_), pixelBytes, bytesPerRow, region, level); } _MTL_INLINE void MTL::Texture::replaceRegion(MTL::Region region, NS::UInteger level, const void* pixelBytes, NS::UInteger bytesPerRow) { Object::sendMessage(this, _MTL_PRIVATE_SEL(replaceRegion_mipmapLevel_withBytes_bytesPerRow_), region, level, pixelBytes, bytesPerRow); } _MTL_INLINE MTL::Texture* MTL::Texture::newTextureView(MTL::PixelFormat pixelFormat) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newTextureViewWithPixelFormat_), pixelFormat); } _MTL_INLINE MTL::Texture* MTL::Texture::newTextureView(MTL::PixelFormat pixelFormat, MTL::TextureType textureType, NS::Range levelRange, NS::Range sliceRange) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newTextureViewWithPixelFormat_textureType_levels_slices_), pixelFormat, textureType, levelRange, sliceRange); } _MTL_INLINE MTL::SharedTextureHandle* MTL::Texture::newSharedTextureHandle() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newSharedTextureHandle)); } _MTL_INLINE MTL::Texture* MTL::Texture::remoteStorageTexture() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(remoteStorageTexture)); } _MTL_INLINE MTL::Texture* MTL::Texture::newRemoteTextureViewForDevice(const MTL::Device* device) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newRemoteTextureViewForDevice_), device); } _MTL_INLINE MTL::TextureSwizzleChannels MTL::Texture::swizzle() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(swizzle)); } _MTL_INLINE MTL::Texture* MTL::Texture::newTextureView(MTL::PixelFormat pixelFormat, MTL::TextureType textureType, NS::Range levelRange, NS::Range sliceRange, MTL::TextureSwizzleChannels swizzle) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newTextureViewWithPixelFormat_textureType_levels_slices_swizzle_), pixelFormat, textureType, levelRange, sliceRange, swizzle); } #define _CA_EXPORT _NS_EXPORT #define _CA_EXTERN _NS_EXTERN #define _CA_INLINE _NS_INLINE #define _CA_PACKED _NS_PACKED #define _CA_CONST(type, name) _NS_CONST(type, name) #define _CA_ENUM(type, name) _NS_ENUM(type, name) #define _CA_OPTIONS(type, name) _NS_OPTIONS(type, name) #define _CA_VALIDATE_SIZE(ns, name) _NS_VALIDATE_SIZE(ns, name) #define _CA_VALIDATE_ENUM(ns, name) _NS_VALIDATE_ENUM(ns, name) #include #define _CA_PRIVATE_CLS(symbol) (Private::Class::s_k##symbol) #define _CA_PRIVATE_SEL(accessor) (Private::Selector::s_k##accessor) #if defined(CA_PRIVATE_IMPLEMENTATION) #ifdef METALCPP_SYMBOL_VISIBILITY_HIDDEN #define _CA_PRIVATE_VISIBILITY __attribute__((visibility("hidden"))) #else #define _CA_PRIVATE_VISIBILITY __attribute__((visibility("default"))) #endif // METALCPP_SYMBOL_VISIBILITY_HIDDEN #define _CA_PRIVATE_IMPORT __attribute__((weak_import)) #ifdef __OBJC__ #define _CA_PRIVATE_OBJC_LOOKUP_CLASS(symbol) ((__bridge void*)objc_lookUpClass(#symbol)) #define _CA_PRIVATE_OBJC_GET_PROTOCOL(symbol) ((__bridge void*)objc_getProtocol(#symbol)) #else #define _CA_PRIVATE_OBJC_LOOKUP_CLASS(symbol) objc_lookUpClass(#symbol) #define _CA_PRIVATE_OBJC_GET_PROTOCOL(symbol) objc_getProtocol(#symbol) #endif // __OBJC__ #define _CA_PRIVATE_DEF_CLS(symbol) void* s_k##symbol _CA_PRIVATE_VISIBILITY = _CA_PRIVATE_OBJC_LOOKUP_CLASS(symbol) #define _CA_PRIVATE_DEF_PRO(symbol) void* s_k##symbol _CA_PRIVATE_VISIBILITY = _CA_PRIVATE_OBJC_GET_PROTOCOL(symbol) #define _CA_PRIVATE_DEF_SEL(accessor, symbol) SEL s_k##accessor _CA_PRIVATE_VISIBILITY = sel_registerName(symbol) #define _CA_PRIVATE_DEF_STR(type, symbol) \ _CA_EXTERN type const CA##symbol _CA_PRIVATE_IMPORT; \ type const CA::symbol = (nullptr != &CA##symbol) ? CA##symbol : nullptr #else #define _CA_PRIVATE_DEF_CLS(symbol) extern void* s_k##symbol #define _CA_PRIVATE_DEF_PRO(symbol) extern void* s_k##symbol #define _CA_PRIVATE_DEF_SEL(accessor, symbol) extern SEL s_k##accessor #define _CA_PRIVATE_DEF_STR(type, symbol) extern type const CA::symbol #endif // CA_PRIVATE_IMPLEMENTATION namespace CA { namespace Private { namespace Class { _CA_PRIVATE_DEF_CLS(CAMetalLayer); } // Class } // Private } // CA namespace CA { namespace Private { namespace Protocol { _CA_PRIVATE_DEF_PRO(CAMetalDrawable); } // Protocol } // Private } // CA namespace CA { namespace Private { namespace Selector { _CA_PRIVATE_DEF_SEL(device, "device"); _CA_PRIVATE_DEF_SEL(drawableSize, "drawableSize"); _CA_PRIVATE_DEF_SEL(framebufferOnly, "framebufferOnly"); _CA_PRIVATE_DEF_SEL(layer, "layer"); _CA_PRIVATE_DEF_SEL(nextDrawable, "nextDrawable"); _CA_PRIVATE_DEF_SEL(pixelFormat, "pixelFormat"); _CA_PRIVATE_DEF_SEL(setDevice_, "setDevice:"); _CA_PRIVATE_DEF_SEL(setDrawableSize_, "setDrawableSize:"); _CA_PRIVATE_DEF_SEL(setFramebufferOnly_, "setFramebufferOnly:"); _CA_PRIVATE_DEF_SEL(setPixelFormat_, "setPixelFormat:"); _CA_PRIVATE_DEF_SEL(texture, "texture"); } // Class } // Private } // CA namespace CA { class MetalDrawable : public NS::Referencing { public: class MetalLayer* layer() const; MTL::Texture* texture() const; }; } _CA_INLINE CA::MetalLayer* CA::MetalDrawable::layer() const { return Object::sendMessage(this, _CA_PRIVATE_SEL(layer)); } _CA_INLINE MTL::Texture* CA::MetalDrawable::texture() const { return Object::sendMessage(this, _CA_PRIVATE_SEL(texture)); } #include namespace CA { class MetalLayer : public NS::Referencing { public: static class MetalLayer* layer(); MTL::Device* device() const; void setDevice(MTL::Device* device); MTL::PixelFormat pixelFormat() const; void setPixelFormat(MTL::PixelFormat pixelFormat); bool framebufferOnly() const; void setFramebufferOnly(bool framebufferOnly); CGSize drawableSize() const; void setDrawableSize(CGSize drawableSize); class MetalDrawable* nextDrawable(); }; } // namespace CA _CA_INLINE CA::MetalLayer* CA::MetalLayer::layer() { return Object::sendMessage(_CA_PRIVATE_CLS(CAMetalLayer), _CA_PRIVATE_SEL(layer)); } _CA_INLINE MTL::Device* CA::MetalLayer::device() const { return Object::sendMessage(this, _CA_PRIVATE_SEL(device)); } _CA_INLINE void CA::MetalLayer::setDevice(MTL::Device* device) { return Object::sendMessage(this, _CA_PRIVATE_SEL(setDevice_), device); } _CA_INLINE MTL::PixelFormat CA::MetalLayer::pixelFormat() const { return Object::sendMessage(this, _CA_PRIVATE_SEL(pixelFormat)); } _CA_INLINE void CA::MetalLayer::setPixelFormat(MTL::PixelFormat pixelFormat) { return Object::sendMessage(this, _CA_PRIVATE_SEL(setPixelFormat_), pixelFormat); } _CA_INLINE bool CA::MetalLayer::framebufferOnly() const { return Object::sendMessage(this, _CA_PRIVATE_SEL(framebufferOnly)); } _CA_INLINE void CA::MetalLayer::setFramebufferOnly(bool framebufferOnly) { return Object::sendMessage(this, _CA_PRIVATE_SEL(setFramebufferOnly_), framebufferOnly); } _CA_INLINE CGSize CA::MetalLayer::drawableSize() const { return Object::sendMessage(this, _CA_PRIVATE_SEL(drawableSize)); } _CA_INLINE void CA::MetalLayer::setDrawableSize(CGSize drawableSize) { return Object::sendMessage(this, _CA_PRIVATE_SEL(setDrawableSize_), drawableSize); } _CA_INLINE CA::MetalDrawable* CA::MetalLayer::nextDrawable() { return Object::sendMessage(this, _CA_PRIVATE_SEL(nextDrawable)); } #pragma once #pragma once namespace MTL { _MTL_ENUM(NS::UInteger, AttributeFormat) { AttributeFormatInvalid = 0, AttributeFormatUChar2 = 1, AttributeFormatUChar3 = 2, AttributeFormatUChar4 = 3, AttributeFormatChar2 = 4, AttributeFormatChar3 = 5, AttributeFormatChar4 = 6, AttributeFormatUChar2Normalized = 7, AttributeFormatUChar3Normalized = 8, AttributeFormatUChar4Normalized = 9, AttributeFormatChar2Normalized = 10, AttributeFormatChar3Normalized = 11, AttributeFormatChar4Normalized = 12, AttributeFormatUShort2 = 13, AttributeFormatUShort3 = 14, AttributeFormatUShort4 = 15, AttributeFormatShort2 = 16, AttributeFormatShort3 = 17, AttributeFormatShort4 = 18, AttributeFormatUShort2Normalized = 19, AttributeFormatUShort3Normalized = 20, AttributeFormatUShort4Normalized = 21, AttributeFormatShort2Normalized = 22, AttributeFormatShort3Normalized = 23, AttributeFormatShort4Normalized = 24, AttributeFormatHalf2 = 25, AttributeFormatHalf3 = 26, AttributeFormatHalf4 = 27, AttributeFormatFloat = 28, AttributeFormatFloat2 = 29, AttributeFormatFloat3 = 30, AttributeFormatFloat4 = 31, AttributeFormatInt = 32, AttributeFormatInt2 = 33, AttributeFormatInt3 = 34, AttributeFormatInt4 = 35, AttributeFormatUInt = 36, AttributeFormatUInt2 = 37, AttributeFormatUInt3 = 38, AttributeFormatUInt4 = 39, AttributeFormatInt1010102Normalized = 40, AttributeFormatUInt1010102Normalized = 41, AttributeFormatUChar4Normalized_BGRA = 42, AttributeFormatUChar = 45, AttributeFormatChar = 46, AttributeFormatUCharNormalized = 47, AttributeFormatCharNormalized = 48, AttributeFormatUShort = 49, AttributeFormatShort = 50, AttributeFormatUShortNormalized = 51, AttributeFormatShortNormalized = 52, AttributeFormatHalf = 53, AttributeFormatFloatRG11B10 = 54, AttributeFormatFloatRGB9E5 = 55, }; _MTL_ENUM(NS::UInteger, IndexType) { IndexTypeUInt16 = 0, IndexTypeUInt32 = 1, }; _MTL_ENUM(NS::UInteger, StepFunction) { StepFunctionConstant = 0, StepFunctionPerVertex = 1, StepFunctionPerInstance = 2, StepFunctionPerPatch = 3, StepFunctionPerPatchControlPoint = 4, StepFunctionThreadPositionInGridX = 5, StepFunctionThreadPositionInGridY = 6, StepFunctionThreadPositionInGridXIndexed = 7, StepFunctionThreadPositionInGridYIndexed = 8, }; class BufferLayoutDescriptor : public NS::Copying { public: static class BufferLayoutDescriptor* alloc(); class BufferLayoutDescriptor* init(); NS::UInteger stride() const; void setStride(NS::UInteger stride); MTL::StepFunction stepFunction() const; void setStepFunction(MTL::StepFunction stepFunction); NS::UInteger stepRate() const; void setStepRate(NS::UInteger stepRate); }; class BufferLayoutDescriptorArray : public NS::Referencing { public: static class BufferLayoutDescriptorArray* alloc(); class BufferLayoutDescriptorArray* init(); class BufferLayoutDescriptor* object(NS::UInteger index); void setObject(const class BufferLayoutDescriptor* bufferDesc, NS::UInteger index); }; class AttributeDescriptor : public NS::Copying { public: static class AttributeDescriptor* alloc(); class AttributeDescriptor* init(); MTL::AttributeFormat format() const; void setFormat(MTL::AttributeFormat format); NS::UInteger offset() const; void setOffset(NS::UInteger offset); NS::UInteger bufferIndex() const; void setBufferIndex(NS::UInteger bufferIndex); }; class AttributeDescriptorArray : public NS::Referencing { public: static class AttributeDescriptorArray* alloc(); class AttributeDescriptorArray* init(); class AttributeDescriptor* object(NS::UInteger index); void setObject(const class AttributeDescriptor* attributeDesc, NS::UInteger index); }; class StageInputOutputDescriptor : public NS::Copying { public: static class StageInputOutputDescriptor* alloc(); class StageInputOutputDescriptor* init(); static class StageInputOutputDescriptor* stageInputOutputDescriptor(); class BufferLayoutDescriptorArray* layouts() const; class AttributeDescriptorArray* attributes() const; MTL::IndexType indexType() const; void setIndexType(MTL::IndexType indexType); NS::UInteger indexBufferIndex() const; void setIndexBufferIndex(NS::UInteger indexBufferIndex); void reset(); }; } _MTL_INLINE MTL::BufferLayoutDescriptor* MTL::BufferLayoutDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLBufferLayoutDescriptor)); } _MTL_INLINE MTL::BufferLayoutDescriptor* MTL::BufferLayoutDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::UInteger MTL::BufferLayoutDescriptor::stride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stride)); } _MTL_INLINE void MTL::BufferLayoutDescriptor::setStride(NS::UInteger stride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStride_), stride); } _MTL_INLINE MTL::StepFunction MTL::BufferLayoutDescriptor::stepFunction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stepFunction)); } _MTL_INLINE void MTL::BufferLayoutDescriptor::setStepFunction(MTL::StepFunction stepFunction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStepFunction_), stepFunction); } _MTL_INLINE NS::UInteger MTL::BufferLayoutDescriptor::stepRate() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stepRate)); } _MTL_INLINE void MTL::BufferLayoutDescriptor::setStepRate(NS::UInteger stepRate) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStepRate_), stepRate); } _MTL_INLINE MTL::BufferLayoutDescriptorArray* MTL::BufferLayoutDescriptorArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLBufferLayoutDescriptorArray)); } _MTL_INLINE MTL::BufferLayoutDescriptorArray* MTL::BufferLayoutDescriptorArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::BufferLayoutDescriptor* MTL::BufferLayoutDescriptorArray::object(NS::UInteger index) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), index); } _MTL_INLINE void MTL::BufferLayoutDescriptorArray::setObject(const MTL::BufferLayoutDescriptor* bufferDesc, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), bufferDesc, index); } _MTL_INLINE MTL::AttributeDescriptor* MTL::AttributeDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAttributeDescriptor)); } _MTL_INLINE MTL::AttributeDescriptor* MTL::AttributeDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::AttributeFormat MTL::AttributeDescriptor::format() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(format)); } _MTL_INLINE void MTL::AttributeDescriptor::setFormat(MTL::AttributeFormat format) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFormat_), format); } _MTL_INLINE NS::UInteger MTL::AttributeDescriptor::offset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(offset)); } _MTL_INLINE void MTL::AttributeDescriptor::setOffset(NS::UInteger offset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setOffset_), offset); } _MTL_INLINE NS::UInteger MTL::AttributeDescriptor::bufferIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferIndex)); } _MTL_INLINE void MTL::AttributeDescriptor::setBufferIndex(NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBufferIndex_), bufferIndex); } _MTL_INLINE MTL::AttributeDescriptorArray* MTL::AttributeDescriptorArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAttributeDescriptorArray)); } _MTL_INLINE MTL::AttributeDescriptorArray* MTL::AttributeDescriptorArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::AttributeDescriptor* MTL::AttributeDescriptorArray::object(NS::UInteger index) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), index); } _MTL_INLINE void MTL::AttributeDescriptorArray::setObject(const MTL::AttributeDescriptor* attributeDesc, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attributeDesc, index); } _MTL_INLINE MTL::StageInputOutputDescriptor* MTL::StageInputOutputDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLStageInputOutputDescriptor)); } _MTL_INLINE MTL::StageInputOutputDescriptor* MTL::StageInputOutputDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::StageInputOutputDescriptor* MTL::StageInputOutputDescriptor::stageInputOutputDescriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLStageInputOutputDescriptor), _MTL_PRIVATE_SEL(stageInputOutputDescriptor)); } _MTL_INLINE MTL::BufferLayoutDescriptorArray* MTL::StageInputOutputDescriptor::layouts() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(layouts)); } _MTL_INLINE MTL::AttributeDescriptorArray* MTL::StageInputOutputDescriptor::attributes() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(attributes)); } _MTL_INLINE MTL::IndexType MTL::StageInputOutputDescriptor::indexType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexType)); } _MTL_INLINE void MTL::StageInputOutputDescriptor::setIndexType(MTL::IndexType indexType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexType_), indexType); } _MTL_INLINE NS::UInteger MTL::StageInputOutputDescriptor::indexBufferIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexBufferIndex)); } _MTL_INLINE void MTL::StageInputOutputDescriptor::setIndexBufferIndex(NS::UInteger indexBufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexBufferIndex_), indexBufferIndex); } _MTL_INLINE void MTL::StageInputOutputDescriptor::reset() { Object::sendMessage(this, _MTL_PRIVATE_SEL(reset)); } namespace MTL { #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wnested-anon-types" struct PackedFloat3 { PackedFloat3(); PackedFloat3(float x, float y, float z); float& operator[](int idx); float operator[](int idx) const; union { struct { float x; float y; float z; }; float elements[3]; }; } _MTL_PACKED; #pragma clang diagnostic pop struct PackedFloat4x3 { PackedFloat4x3(); PackedFloat4x3(const PackedFloat3& col0, const PackedFloat3& col1, const PackedFloat3& col2, const PackedFloat3& col3); PackedFloat3& operator[](int idx); const PackedFloat3& operator[](int idx) const; PackedFloat3 columns[4]; } _MTL_PACKED; struct AxisAlignedBoundingBox { AxisAlignedBoundingBox(); AxisAlignedBoundingBox(PackedFloat3 p); AxisAlignedBoundingBox(PackedFloat3 min, PackedFloat3 max); PackedFloat3 min; PackedFloat3 max; } _MTL_PACKED; #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wnested-anon-types" struct PackedFloatQuaternion { PackedFloatQuaternion(); PackedFloatQuaternion(float x, float y, float z, float w); float& operator[](int idx); const float& operator[](int idx) const; union { struct { float x; float y; float z; float w; }; float elements[4]; }; } _MTL_PACKED; #pragma clang diagnostic pop struct ComponentTransform { PackedFloat3 scale; PackedFloat3 shear; PackedFloat3 pivot; PackedFloatQuaternion rotation; PackedFloat3 translation; } _MTL_PACKED; } _MTL_INLINE MTL::PackedFloat3::PackedFloat3() : x(0.0f) , y(0.0f) , z(0.0f) { } _MTL_INLINE MTL::PackedFloat3::PackedFloat3(float _x, float _y, float _z) : x(_x) , y(_y) , z(_z) { } _MTL_INLINE float& MTL::PackedFloat3::operator[](int idx) { return elements[idx]; } _MTL_INLINE float MTL::PackedFloat3::operator[](int idx) const { return elements[idx]; } _MTL_INLINE MTL::PackedFloat4x3::PackedFloat4x3() { columns[0] = PackedFloat3(0.0f, 0.0f, 0.0f); columns[1] = PackedFloat3(0.0f, 0.0f, 0.0f); columns[2] = PackedFloat3(0.0f, 0.0f, 0.0f); columns[3] = PackedFloat3(0.0f, 0.0f, 0.0f); } _MTL_INLINE MTL::PackedFloat4x3::PackedFloat4x3(const PackedFloat3& col0, const PackedFloat3& col1, const PackedFloat3& col2, const PackedFloat3& col3) { columns[0] = col0; columns[1] = col1; columns[2] = col2; columns[3] = col3; } _MTL_INLINE MTL::PackedFloat3& MTL::PackedFloat4x3::operator[](int idx) { return columns[idx]; } _MTL_INLINE const MTL::PackedFloat3& MTL::PackedFloat4x3::operator[](int idx) const { return columns[idx]; } _MTL_INLINE MTL::AxisAlignedBoundingBox::AxisAlignedBoundingBox() : min(INFINITY, INFINITY, INFINITY) , max(-INFINITY, -INFINITY, -INFINITY) { } _MTL_INLINE MTL::AxisAlignedBoundingBox::AxisAlignedBoundingBox(PackedFloat3 p) : min(p) , max(p) { } _MTL_INLINE MTL::AxisAlignedBoundingBox::AxisAlignedBoundingBox(PackedFloat3 _min, PackedFloat3 _max) : min(_min) , max(_max) { } _MTL_INLINE MTL::PackedFloatQuaternion::PackedFloatQuaternion() : x(0.0f) , y(0.0f) , z(0.0f) , w(0.0f) { } _MTL_INLINE MTL::PackedFloatQuaternion::PackedFloatQuaternion(float x, float y, float z, float w) : x(x) , y(y) , z(z) , w(w) { } _MTL_INLINE float& MTL::PackedFloatQuaternion::operator[](int idx) { return elements[idx]; } _MTL_INLINE const float& MTL::PackedFloatQuaternion::operator[](int idx) const { return elements[idx]; } namespace MTL { _MTL_OPTIONS(NS::UInteger, AccelerationStructureUsage) { AccelerationStructureUsageNone = 0, AccelerationStructureUsageRefit = 1, AccelerationStructureUsagePreferFastBuild = 2, AccelerationStructureUsageExtendedLimits = 4, }; _MTL_OPTIONS(uint32_t, AccelerationStructureInstanceOptions) { AccelerationStructureInstanceOptionNone = 0, AccelerationStructureInstanceOptionDisableTriangleCulling = 1, AccelerationStructureInstanceOptionTriangleFrontFacingWindingCounterClockwise = 2, AccelerationStructureInstanceOptionOpaque = 4, AccelerationStructureInstanceOptionNonOpaque = 8, }; _MTL_ENUM(NS::Integer, MatrixLayout) { MatrixLayoutColumnMajor = 0, MatrixLayoutRowMajor = 1, }; class AccelerationStructureDescriptor : public NS::Copying { public: static class AccelerationStructureDescriptor* alloc(); class AccelerationStructureDescriptor* init(); MTL::AccelerationStructureUsage usage() const; void setUsage(MTL::AccelerationStructureUsage usage); }; class AccelerationStructureGeometryDescriptor : public NS::Copying { public: static class AccelerationStructureGeometryDescriptor* alloc(); class AccelerationStructureGeometryDescriptor* init(); NS::UInteger intersectionFunctionTableOffset() const; void setIntersectionFunctionTableOffset(NS::UInteger intersectionFunctionTableOffset); bool opaque() const; void setOpaque(bool opaque); bool allowDuplicateIntersectionFunctionInvocation() const; void setAllowDuplicateIntersectionFunctionInvocation(bool allowDuplicateIntersectionFunctionInvocation); NS::String* label() const; void setLabel(const NS::String* label); class Buffer* primitiveDataBuffer() const; void setPrimitiveDataBuffer(const class Buffer* primitiveDataBuffer); NS::UInteger primitiveDataBufferOffset() const; void setPrimitiveDataBufferOffset(NS::UInteger primitiveDataBufferOffset); NS::UInteger primitiveDataStride() const; void setPrimitiveDataStride(NS::UInteger primitiveDataStride); NS::UInteger primitiveDataElementSize() const; void setPrimitiveDataElementSize(NS::UInteger primitiveDataElementSize); }; _MTL_ENUM(uint32_t, MotionBorderMode) { MotionBorderModeClamp = 0, MotionBorderModeVanish = 1, }; class PrimitiveAccelerationStructureDescriptor : public NS::Copying { public: static class PrimitiveAccelerationStructureDescriptor* alloc(); class PrimitiveAccelerationStructureDescriptor* init(); NS::Array* geometryDescriptors() const; void setGeometryDescriptors(const NS::Array* geometryDescriptors); MTL::MotionBorderMode motionStartBorderMode() const; void setMotionStartBorderMode(MTL::MotionBorderMode motionStartBorderMode); MTL::MotionBorderMode motionEndBorderMode() const; void setMotionEndBorderMode(MTL::MotionBorderMode motionEndBorderMode); float motionStartTime() const; void setMotionStartTime(float motionStartTime); float motionEndTime() const; void setMotionEndTime(float motionEndTime); NS::UInteger motionKeyframeCount() const; void setMotionKeyframeCount(NS::UInteger motionKeyframeCount); static MTL::PrimitiveAccelerationStructureDescriptor* descriptor(); }; class AccelerationStructureTriangleGeometryDescriptor : public NS::Copying { public: static class AccelerationStructureTriangleGeometryDescriptor* alloc(); class AccelerationStructureTriangleGeometryDescriptor* init(); class Buffer* vertexBuffer() const; void setVertexBuffer(const class Buffer* vertexBuffer); NS::UInteger vertexBufferOffset() const; void setVertexBufferOffset(NS::UInteger vertexBufferOffset); MTL::AttributeFormat vertexFormat() const; void setVertexFormat(MTL::AttributeFormat vertexFormat); NS::UInteger vertexStride() const; void setVertexStride(NS::UInteger vertexStride); class Buffer* indexBuffer() const; void setIndexBuffer(const class Buffer* indexBuffer); NS::UInteger indexBufferOffset() const; void setIndexBufferOffset(NS::UInteger indexBufferOffset); MTL::IndexType indexType() const; void setIndexType(MTL::IndexType indexType); NS::UInteger triangleCount() const; void setTriangleCount(NS::UInteger triangleCount); class Buffer* transformationMatrixBuffer() const; void setTransformationMatrixBuffer(const class Buffer* transformationMatrixBuffer); NS::UInteger transformationMatrixBufferOffset() const; void setTransformationMatrixBufferOffset(NS::UInteger transformationMatrixBufferOffset); MTL::MatrixLayout transformationMatrixLayout() const; void setTransformationMatrixLayout(MTL::MatrixLayout transformationMatrixLayout); static MTL::AccelerationStructureTriangleGeometryDescriptor* descriptor(); }; class AccelerationStructureBoundingBoxGeometryDescriptor : public NS::Copying { public: static class AccelerationStructureBoundingBoxGeometryDescriptor* alloc(); class AccelerationStructureBoundingBoxGeometryDescriptor* init(); class Buffer* boundingBoxBuffer() const; void setBoundingBoxBuffer(const class Buffer* boundingBoxBuffer); NS::UInteger boundingBoxBufferOffset() const; void setBoundingBoxBufferOffset(NS::UInteger boundingBoxBufferOffset); NS::UInteger boundingBoxStride() const; void setBoundingBoxStride(NS::UInteger boundingBoxStride); NS::UInteger boundingBoxCount() const; void setBoundingBoxCount(NS::UInteger boundingBoxCount); static MTL::AccelerationStructureBoundingBoxGeometryDescriptor* descriptor(); }; class MotionKeyframeData : public NS::Referencing { public: static class MotionKeyframeData* alloc(); class MotionKeyframeData* init(); class Buffer* buffer() const; void setBuffer(const class Buffer* buffer); NS::UInteger offset() const; void setOffset(NS::UInteger offset); static MTL::MotionKeyframeData* data(); }; class AccelerationStructureMotionTriangleGeometryDescriptor : public NS::Copying { public: static class AccelerationStructureMotionTriangleGeometryDescriptor* alloc(); class AccelerationStructureMotionTriangleGeometryDescriptor* init(); NS::Array* vertexBuffers() const; void setVertexBuffers(const NS::Array* vertexBuffers); MTL::AttributeFormat vertexFormat() const; void setVertexFormat(MTL::AttributeFormat vertexFormat); NS::UInteger vertexStride() const; void setVertexStride(NS::UInteger vertexStride); class Buffer* indexBuffer() const; void setIndexBuffer(const class Buffer* indexBuffer); NS::UInteger indexBufferOffset() const; void setIndexBufferOffset(NS::UInteger indexBufferOffset); MTL::IndexType indexType() const; void setIndexType(MTL::IndexType indexType); NS::UInteger triangleCount() const; void setTriangleCount(NS::UInteger triangleCount); class Buffer* transformationMatrixBuffer() const; void setTransformationMatrixBuffer(const class Buffer* transformationMatrixBuffer); NS::UInteger transformationMatrixBufferOffset() const; void setTransformationMatrixBufferOffset(NS::UInteger transformationMatrixBufferOffset); MTL::MatrixLayout transformationMatrixLayout() const; void setTransformationMatrixLayout(MTL::MatrixLayout transformationMatrixLayout); static MTL::AccelerationStructureMotionTriangleGeometryDescriptor* descriptor(); }; class AccelerationStructureMotionBoundingBoxGeometryDescriptor : public NS::Copying { public: static class AccelerationStructureMotionBoundingBoxGeometryDescriptor* alloc(); class AccelerationStructureMotionBoundingBoxGeometryDescriptor* init(); NS::Array* boundingBoxBuffers() const; void setBoundingBoxBuffers(const NS::Array* boundingBoxBuffers); NS::UInteger boundingBoxStride() const; void setBoundingBoxStride(NS::UInteger boundingBoxStride); NS::UInteger boundingBoxCount() const; void setBoundingBoxCount(NS::UInteger boundingBoxCount); static MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor* descriptor(); }; _MTL_ENUM(NS::Integer, CurveType) { CurveTypeRound = 0, CurveTypeFlat = 1, }; _MTL_ENUM(NS::Integer, CurveBasis) { CurveBasisBSpline = 0, CurveBasisCatmullRom = 1, CurveBasisLinear = 2, CurveBasisBezier = 3, }; _MTL_ENUM(NS::Integer, CurveEndCaps) { CurveEndCapsNone = 0, CurveEndCapsDisk = 1, CurveEndCapsSphere = 2, }; class AccelerationStructureCurveGeometryDescriptor : public NS::Copying { public: static class AccelerationStructureCurveGeometryDescriptor* alloc(); class AccelerationStructureCurveGeometryDescriptor* init(); class Buffer* controlPointBuffer() const; void setControlPointBuffer(const class Buffer* controlPointBuffer); NS::UInteger controlPointBufferOffset() const; void setControlPointBufferOffset(NS::UInteger controlPointBufferOffset); NS::UInteger controlPointCount() const; void setControlPointCount(NS::UInteger controlPointCount); NS::UInteger controlPointStride() const; void setControlPointStride(NS::UInteger controlPointStride); MTL::AttributeFormat controlPointFormat() const; void setControlPointFormat(MTL::AttributeFormat controlPointFormat); class Buffer* radiusBuffer() const; void setRadiusBuffer(const class Buffer* radiusBuffer); NS::UInteger radiusBufferOffset() const; void setRadiusBufferOffset(NS::UInteger radiusBufferOffset); MTL::AttributeFormat radiusFormat() const; void setRadiusFormat(MTL::AttributeFormat radiusFormat); NS::UInteger radiusStride() const; void setRadiusStride(NS::UInteger radiusStride); class Buffer* indexBuffer() const; void setIndexBuffer(const class Buffer* indexBuffer); NS::UInteger indexBufferOffset() const; void setIndexBufferOffset(NS::UInteger indexBufferOffset); MTL::IndexType indexType() const; void setIndexType(MTL::IndexType indexType); NS::UInteger segmentCount() const; void setSegmentCount(NS::UInteger segmentCount); NS::UInteger segmentControlPointCount() const; void setSegmentControlPointCount(NS::UInteger segmentControlPointCount); MTL::CurveType curveType() const; void setCurveType(MTL::CurveType curveType); MTL::CurveBasis curveBasis() const; void setCurveBasis(MTL::CurveBasis curveBasis); MTL::CurveEndCaps curveEndCaps() const; void setCurveEndCaps(MTL::CurveEndCaps curveEndCaps); static MTL::AccelerationStructureCurveGeometryDescriptor* descriptor(); }; class AccelerationStructureMotionCurveGeometryDescriptor : public NS::Copying { public: static class AccelerationStructureMotionCurveGeometryDescriptor* alloc(); class AccelerationStructureMotionCurveGeometryDescriptor* init(); NS::Array* controlPointBuffers() const; void setControlPointBuffers(const NS::Array* controlPointBuffers); NS::UInteger controlPointCount() const; void setControlPointCount(NS::UInteger controlPointCount); NS::UInteger controlPointStride() const; void setControlPointStride(NS::UInteger controlPointStride); MTL::AttributeFormat controlPointFormat() const; void setControlPointFormat(MTL::AttributeFormat controlPointFormat); NS::Array* radiusBuffers() const; void setRadiusBuffers(const NS::Array* radiusBuffers); MTL::AttributeFormat radiusFormat() const; void setRadiusFormat(MTL::AttributeFormat radiusFormat); NS::UInteger radiusStride() const; void setRadiusStride(NS::UInteger radiusStride); class Buffer* indexBuffer() const; void setIndexBuffer(const class Buffer* indexBuffer); NS::UInteger indexBufferOffset() const; void setIndexBufferOffset(NS::UInteger indexBufferOffset); MTL::IndexType indexType() const; void setIndexType(MTL::IndexType indexType); NS::UInteger segmentCount() const; void setSegmentCount(NS::UInteger segmentCount); NS::UInteger segmentControlPointCount() const; void setSegmentControlPointCount(NS::UInteger segmentControlPointCount); MTL::CurveType curveType() const; void setCurveType(MTL::CurveType curveType); MTL::CurveBasis curveBasis() const; void setCurveBasis(MTL::CurveBasis curveBasis); MTL::CurveEndCaps curveEndCaps() const; void setCurveEndCaps(MTL::CurveEndCaps curveEndCaps); static MTL::AccelerationStructureMotionCurveGeometryDescriptor* descriptor(); }; struct AccelerationStructureInstanceDescriptor { MTL::PackedFloat4x3 transformationMatrix; MTL::AccelerationStructureInstanceOptions options; uint32_t mask; uint32_t intersectionFunctionTableOffset; uint32_t accelerationStructureIndex; } _MTL_PACKED; struct AccelerationStructureUserIDInstanceDescriptor { MTL::PackedFloat4x3 transformationMatrix; MTL::AccelerationStructureInstanceOptions options; uint32_t mask; uint32_t intersectionFunctionTableOffset; uint32_t accelerationStructureIndex; uint32_t userID; } _MTL_PACKED; _MTL_ENUM(NS::UInteger, AccelerationStructureInstanceDescriptorType) { AccelerationStructureInstanceDescriptorTypeDefault = 0, AccelerationStructureInstanceDescriptorTypeUserID = 1, AccelerationStructureInstanceDescriptorTypeMotion = 2, AccelerationStructureInstanceDescriptorTypeIndirect = 3, AccelerationStructureInstanceDescriptorTypeIndirectMotion = 4, }; struct AccelerationStructureMotionInstanceDescriptor { MTL::AccelerationStructureInstanceOptions options; uint32_t mask; uint32_t intersectionFunctionTableOffset; uint32_t accelerationStructureIndex; uint32_t userID; uint32_t motionTransformsStartIndex; uint32_t motionTransformsCount; MTL::MotionBorderMode motionStartBorderMode; MTL::MotionBorderMode motionEndBorderMode; float motionStartTime; float motionEndTime; } _MTL_PACKED; struct IndirectAccelerationStructureInstanceDescriptor { MTL::PackedFloat4x3 transformationMatrix; MTL::AccelerationStructureInstanceOptions options; uint32_t mask; uint32_t intersectionFunctionTableOffset; uint32_t userID; MTL::ResourceID accelerationStructureID; } _MTL_PACKED; struct IndirectAccelerationStructureMotionInstanceDescriptor { MTL::AccelerationStructureInstanceOptions options; uint32_t mask; uint32_t intersectionFunctionTableOffset; uint32_t userID; MTL::ResourceID accelerationStructureID; uint32_t motionTransformsStartIndex; uint32_t motionTransformsCount; MTL::MotionBorderMode motionStartBorderMode; MTL::MotionBorderMode motionEndBorderMode; float motionStartTime; float motionEndTime; } _MTL_PACKED; _MTL_ENUM(NS::Integer, TransformType) { TransformTypePackedFloat4x3 = 0, TransformTypeComponent = 1, }; class InstanceAccelerationStructureDescriptor : public NS::Copying { public: static class InstanceAccelerationStructureDescriptor* alloc(); class InstanceAccelerationStructureDescriptor* init(); class Buffer* instanceDescriptorBuffer() const; void setInstanceDescriptorBuffer(const class Buffer* instanceDescriptorBuffer); NS::UInteger instanceDescriptorBufferOffset() const; void setInstanceDescriptorBufferOffset(NS::UInteger instanceDescriptorBufferOffset); NS::UInteger instanceDescriptorStride() const; void setInstanceDescriptorStride(NS::UInteger instanceDescriptorStride); NS::UInteger instanceCount() const; void setInstanceCount(NS::UInteger instanceCount); NS::Array* instancedAccelerationStructures() const; void setInstancedAccelerationStructures(const NS::Array* instancedAccelerationStructures); MTL::AccelerationStructureInstanceDescriptorType instanceDescriptorType() const; void setInstanceDescriptorType(MTL::AccelerationStructureInstanceDescriptorType instanceDescriptorType); class Buffer* motionTransformBuffer() const; void setMotionTransformBuffer(const class Buffer* motionTransformBuffer); NS::UInteger motionTransformBufferOffset() const; void setMotionTransformBufferOffset(NS::UInteger motionTransformBufferOffset); NS::UInteger motionTransformCount() const; void setMotionTransformCount(NS::UInteger motionTransformCount); MTL::MatrixLayout instanceTransformationMatrixLayout() const; void setInstanceTransformationMatrixLayout(MTL::MatrixLayout instanceTransformationMatrixLayout); MTL::TransformType motionTransformType() const; void setMotionTransformType(MTL::TransformType motionTransformType); NS::UInteger motionTransformStride() const; void setMotionTransformStride(NS::UInteger motionTransformStride); static MTL::InstanceAccelerationStructureDescriptor* descriptor(); }; class IndirectInstanceAccelerationStructureDescriptor : public NS::Copying { public: static class IndirectInstanceAccelerationStructureDescriptor* alloc(); class IndirectInstanceAccelerationStructureDescriptor* init(); class Buffer* instanceDescriptorBuffer() const; void setInstanceDescriptorBuffer(const class Buffer* instanceDescriptorBuffer); NS::UInteger instanceDescriptorBufferOffset() const; void setInstanceDescriptorBufferOffset(NS::UInteger instanceDescriptorBufferOffset); NS::UInteger instanceDescriptorStride() const; void setInstanceDescriptorStride(NS::UInteger instanceDescriptorStride); NS::UInteger maxInstanceCount() const; void setMaxInstanceCount(NS::UInteger maxInstanceCount); class Buffer* instanceCountBuffer() const; void setInstanceCountBuffer(const class Buffer* instanceCountBuffer); NS::UInteger instanceCountBufferOffset() const; void setInstanceCountBufferOffset(NS::UInteger instanceCountBufferOffset); MTL::AccelerationStructureInstanceDescriptorType instanceDescriptorType() const; void setInstanceDescriptorType(MTL::AccelerationStructureInstanceDescriptorType instanceDescriptorType); class Buffer* motionTransformBuffer() const; void setMotionTransformBuffer(const class Buffer* motionTransformBuffer); NS::UInteger motionTransformBufferOffset() const; void setMotionTransformBufferOffset(NS::UInteger motionTransformBufferOffset); NS::UInteger maxMotionTransformCount() const; void setMaxMotionTransformCount(NS::UInteger maxMotionTransformCount); class Buffer* motionTransformCountBuffer() const; void setMotionTransformCountBuffer(const class Buffer* motionTransformCountBuffer); NS::UInteger motionTransformCountBufferOffset() const; void setMotionTransformCountBufferOffset(NS::UInteger motionTransformCountBufferOffset); MTL::MatrixLayout instanceTransformationMatrixLayout() const; void setInstanceTransformationMatrixLayout(MTL::MatrixLayout instanceTransformationMatrixLayout); MTL::TransformType motionTransformType() const; void setMotionTransformType(MTL::TransformType motionTransformType); NS::UInteger motionTransformStride() const; void setMotionTransformStride(NS::UInteger motionTransformStride); static MTL::IndirectInstanceAccelerationStructureDescriptor* descriptor(); }; class AccelerationStructure : public NS::Referencing { public: NS::UInteger size() const; MTL::ResourceID gpuResourceID() const; }; } _MTL_INLINE MTL::AccelerationStructureDescriptor* MTL::AccelerationStructureDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAccelerationStructureDescriptor)); } _MTL_INLINE MTL::AccelerationStructureDescriptor* MTL::AccelerationStructureDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::AccelerationStructureUsage MTL::AccelerationStructureDescriptor::usage() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(usage)); } _MTL_INLINE void MTL::AccelerationStructureDescriptor::setUsage(MTL::AccelerationStructureUsage usage) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setUsage_), usage); } _MTL_INLINE MTL::AccelerationStructureGeometryDescriptor* MTL::AccelerationStructureGeometryDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAccelerationStructureGeometryDescriptor)); } _MTL_INLINE MTL::AccelerationStructureGeometryDescriptor* MTL::AccelerationStructureGeometryDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureGeometryDescriptor::intersectionFunctionTableOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(intersectionFunctionTableOffset)); } _MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setIntersectionFunctionTableOffset(NS::UInteger intersectionFunctionTableOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIntersectionFunctionTableOffset_), intersectionFunctionTableOffset); } _MTL_INLINE bool MTL::AccelerationStructureGeometryDescriptor::opaque() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(opaque)); } _MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setOpaque(bool opaque) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setOpaque_), opaque); } _MTL_INLINE bool MTL::AccelerationStructureGeometryDescriptor::allowDuplicateIntersectionFunctionInvocation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(allowDuplicateIntersectionFunctionInvocation)); } _MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setAllowDuplicateIntersectionFunctionInvocation(bool allowDuplicateIntersectionFunctionInvocation) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setAllowDuplicateIntersectionFunctionInvocation_), allowDuplicateIntersectionFunctionInvocation); } _MTL_INLINE NS::String* MTL::AccelerationStructureGeometryDescriptor::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::Buffer* MTL::AccelerationStructureGeometryDescriptor::primitiveDataBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(primitiveDataBuffer)); } _MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setPrimitiveDataBuffer(const MTL::Buffer* primitiveDataBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPrimitiveDataBuffer_), primitiveDataBuffer); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureGeometryDescriptor::primitiveDataBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(primitiveDataBufferOffset)); } _MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setPrimitiveDataBufferOffset(NS::UInteger primitiveDataBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPrimitiveDataBufferOffset_), primitiveDataBufferOffset); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureGeometryDescriptor::primitiveDataStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(primitiveDataStride)); } _MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setPrimitiveDataStride(NS::UInteger primitiveDataStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPrimitiveDataStride_), primitiveDataStride); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureGeometryDescriptor::primitiveDataElementSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(primitiveDataElementSize)); } _MTL_INLINE void MTL::AccelerationStructureGeometryDescriptor::setPrimitiveDataElementSize(NS::UInteger primitiveDataElementSize) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPrimitiveDataElementSize_), primitiveDataElementSize); } _MTL_INLINE MTL::PrimitiveAccelerationStructureDescriptor* MTL::PrimitiveAccelerationStructureDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLPrimitiveAccelerationStructureDescriptor)); } _MTL_INLINE MTL::PrimitiveAccelerationStructureDescriptor* MTL::PrimitiveAccelerationStructureDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::Array* MTL::PrimitiveAccelerationStructureDescriptor::geometryDescriptors() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(geometryDescriptors)); } _MTL_INLINE void MTL::PrimitiveAccelerationStructureDescriptor::setGeometryDescriptors(const NS::Array* geometryDescriptors) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setGeometryDescriptors_), geometryDescriptors); } _MTL_INLINE MTL::MotionBorderMode MTL::PrimitiveAccelerationStructureDescriptor::motionStartBorderMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionStartBorderMode)); } _MTL_INLINE void MTL::PrimitiveAccelerationStructureDescriptor::setMotionStartBorderMode(MTL::MotionBorderMode motionStartBorderMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionStartBorderMode_), motionStartBorderMode); } _MTL_INLINE MTL::MotionBorderMode MTL::PrimitiveAccelerationStructureDescriptor::motionEndBorderMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionEndBorderMode)); } _MTL_INLINE void MTL::PrimitiveAccelerationStructureDescriptor::setMotionEndBorderMode(MTL::MotionBorderMode motionEndBorderMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionEndBorderMode_), motionEndBorderMode); } _MTL_INLINE float MTL::PrimitiveAccelerationStructureDescriptor::motionStartTime() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionStartTime)); } _MTL_INLINE void MTL::PrimitiveAccelerationStructureDescriptor::setMotionStartTime(float motionStartTime) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionStartTime_), motionStartTime); } _MTL_INLINE float MTL::PrimitiveAccelerationStructureDescriptor::motionEndTime() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionEndTime)); } _MTL_INLINE void MTL::PrimitiveAccelerationStructureDescriptor::setMotionEndTime(float motionEndTime) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionEndTime_), motionEndTime); } _MTL_INLINE NS::UInteger MTL::PrimitiveAccelerationStructureDescriptor::motionKeyframeCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionKeyframeCount)); } _MTL_INLINE void MTL::PrimitiveAccelerationStructureDescriptor::setMotionKeyframeCount(NS::UInteger motionKeyframeCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionKeyframeCount_), motionKeyframeCount); } _MTL_INLINE MTL::PrimitiveAccelerationStructureDescriptor* MTL::PrimitiveAccelerationStructureDescriptor::descriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLPrimitiveAccelerationStructureDescriptor), _MTL_PRIVATE_SEL(descriptor)); } _MTL_INLINE MTL::AccelerationStructureTriangleGeometryDescriptor* MTL::AccelerationStructureTriangleGeometryDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAccelerationStructureTriangleGeometryDescriptor)); } _MTL_INLINE MTL::AccelerationStructureTriangleGeometryDescriptor* MTL::AccelerationStructureTriangleGeometryDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::Buffer* MTL::AccelerationStructureTriangleGeometryDescriptor::vertexBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexBuffer)); } _MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setVertexBuffer(const MTL::Buffer* vertexBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexBuffer_), vertexBuffer); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureTriangleGeometryDescriptor::vertexBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexBufferOffset)); } _MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setVertexBufferOffset(NS::UInteger vertexBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexBufferOffset_), vertexBufferOffset); } _MTL_INLINE MTL::AttributeFormat MTL::AccelerationStructureTriangleGeometryDescriptor::vertexFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexFormat)); } _MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setVertexFormat(MTL::AttributeFormat vertexFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexFormat_), vertexFormat); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureTriangleGeometryDescriptor::vertexStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexStride)); } _MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setVertexStride(NS::UInteger vertexStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexStride_), vertexStride); } _MTL_INLINE MTL::Buffer* MTL::AccelerationStructureTriangleGeometryDescriptor::indexBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexBuffer)); } _MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setIndexBuffer(const MTL::Buffer* indexBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexBuffer_), indexBuffer); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureTriangleGeometryDescriptor::indexBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexBufferOffset)); } _MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setIndexBufferOffset(NS::UInteger indexBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexBufferOffset_), indexBufferOffset); } _MTL_INLINE MTL::IndexType MTL::AccelerationStructureTriangleGeometryDescriptor::indexType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexType)); } _MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setIndexType(MTL::IndexType indexType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexType_), indexType); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureTriangleGeometryDescriptor::triangleCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(triangleCount)); } _MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setTriangleCount(NS::UInteger triangleCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTriangleCount_), triangleCount); } _MTL_INLINE MTL::Buffer* MTL::AccelerationStructureTriangleGeometryDescriptor::transformationMatrixBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(transformationMatrixBuffer)); } _MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setTransformationMatrixBuffer(const MTL::Buffer* transformationMatrixBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTransformationMatrixBuffer_), transformationMatrixBuffer); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureTriangleGeometryDescriptor::transformationMatrixBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(transformationMatrixBufferOffset)); } _MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setTransformationMatrixBufferOffset(NS::UInteger transformationMatrixBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTransformationMatrixBufferOffset_), transformationMatrixBufferOffset); } _MTL_INLINE MTL::MatrixLayout MTL::AccelerationStructureTriangleGeometryDescriptor::transformationMatrixLayout() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(transformationMatrixLayout)); } _MTL_INLINE void MTL::AccelerationStructureTriangleGeometryDescriptor::setTransformationMatrixLayout(MTL::MatrixLayout transformationMatrixLayout) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTransformationMatrixLayout_), transformationMatrixLayout); } _MTL_INLINE MTL::AccelerationStructureTriangleGeometryDescriptor* MTL::AccelerationStructureTriangleGeometryDescriptor::descriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLAccelerationStructureTriangleGeometryDescriptor), _MTL_PRIVATE_SEL(descriptor)); } _MTL_INLINE MTL::AccelerationStructureBoundingBoxGeometryDescriptor* MTL::AccelerationStructureBoundingBoxGeometryDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAccelerationStructureBoundingBoxGeometryDescriptor)); } _MTL_INLINE MTL::AccelerationStructureBoundingBoxGeometryDescriptor* MTL::AccelerationStructureBoundingBoxGeometryDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::Buffer* MTL::AccelerationStructureBoundingBoxGeometryDescriptor::boundingBoxBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(boundingBoxBuffer)); } _MTL_INLINE void MTL::AccelerationStructureBoundingBoxGeometryDescriptor::setBoundingBoxBuffer(const MTL::Buffer* boundingBoxBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBoundingBoxBuffer_), boundingBoxBuffer); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureBoundingBoxGeometryDescriptor::boundingBoxBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(boundingBoxBufferOffset)); } _MTL_INLINE void MTL::AccelerationStructureBoundingBoxGeometryDescriptor::setBoundingBoxBufferOffset(NS::UInteger boundingBoxBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBoundingBoxBufferOffset_), boundingBoxBufferOffset); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureBoundingBoxGeometryDescriptor::boundingBoxStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(boundingBoxStride)); } _MTL_INLINE void MTL::AccelerationStructureBoundingBoxGeometryDescriptor::setBoundingBoxStride(NS::UInteger boundingBoxStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBoundingBoxStride_), boundingBoxStride); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureBoundingBoxGeometryDescriptor::boundingBoxCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(boundingBoxCount)); } _MTL_INLINE void MTL::AccelerationStructureBoundingBoxGeometryDescriptor::setBoundingBoxCount(NS::UInteger boundingBoxCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBoundingBoxCount_), boundingBoxCount); } _MTL_INLINE MTL::AccelerationStructureBoundingBoxGeometryDescriptor* MTL::AccelerationStructureBoundingBoxGeometryDescriptor::descriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLAccelerationStructureBoundingBoxGeometryDescriptor), _MTL_PRIVATE_SEL(descriptor)); } _MTL_INLINE MTL::MotionKeyframeData* MTL::MotionKeyframeData::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLMotionKeyframeData)); } _MTL_INLINE MTL::MotionKeyframeData* MTL::MotionKeyframeData::init() { return NS::Object::init(); } _MTL_INLINE MTL::Buffer* MTL::MotionKeyframeData::buffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(buffer)); } _MTL_INLINE void MTL::MotionKeyframeData::setBuffer(const MTL::Buffer* buffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBuffer_), buffer); } _MTL_INLINE NS::UInteger MTL::MotionKeyframeData::offset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(offset)); } _MTL_INLINE void MTL::MotionKeyframeData::setOffset(NS::UInteger offset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setOffset_), offset); } _MTL_INLINE MTL::MotionKeyframeData* MTL::MotionKeyframeData::data() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLMotionKeyframeData), _MTL_PRIVATE_SEL(data)); } _MTL_INLINE MTL::AccelerationStructureMotionTriangleGeometryDescriptor* MTL::AccelerationStructureMotionTriangleGeometryDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAccelerationStructureMotionTriangleGeometryDescriptor)); } _MTL_INLINE MTL::AccelerationStructureMotionTriangleGeometryDescriptor* MTL::AccelerationStructureMotionTriangleGeometryDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::Array* MTL::AccelerationStructureMotionTriangleGeometryDescriptor::vertexBuffers() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexBuffers)); } _MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setVertexBuffers(const NS::Array* vertexBuffers) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexBuffers_), vertexBuffers); } _MTL_INLINE MTL::AttributeFormat MTL::AccelerationStructureMotionTriangleGeometryDescriptor::vertexFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexFormat)); } _MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setVertexFormat(MTL::AttributeFormat vertexFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexFormat_), vertexFormat); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionTriangleGeometryDescriptor::vertexStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexStride)); } _MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setVertexStride(NS::UInteger vertexStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexStride_), vertexStride); } _MTL_INLINE MTL::Buffer* MTL::AccelerationStructureMotionTriangleGeometryDescriptor::indexBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexBuffer)); } _MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setIndexBuffer(const MTL::Buffer* indexBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexBuffer_), indexBuffer); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionTriangleGeometryDescriptor::indexBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexBufferOffset)); } _MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setIndexBufferOffset(NS::UInteger indexBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexBufferOffset_), indexBufferOffset); } _MTL_INLINE MTL::IndexType MTL::AccelerationStructureMotionTriangleGeometryDescriptor::indexType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexType)); } _MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setIndexType(MTL::IndexType indexType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexType_), indexType); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionTriangleGeometryDescriptor::triangleCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(triangleCount)); } _MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setTriangleCount(NS::UInteger triangleCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTriangleCount_), triangleCount); } _MTL_INLINE MTL::Buffer* MTL::AccelerationStructureMotionTriangleGeometryDescriptor::transformationMatrixBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(transformationMatrixBuffer)); } _MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setTransformationMatrixBuffer(const MTL::Buffer* transformationMatrixBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTransformationMatrixBuffer_), transformationMatrixBuffer); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionTriangleGeometryDescriptor::transformationMatrixBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(transformationMatrixBufferOffset)); } _MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setTransformationMatrixBufferOffset(NS::UInteger transformationMatrixBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTransformationMatrixBufferOffset_), transformationMatrixBufferOffset); } _MTL_INLINE MTL::MatrixLayout MTL::AccelerationStructureMotionTriangleGeometryDescriptor::transformationMatrixLayout() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(transformationMatrixLayout)); } _MTL_INLINE void MTL::AccelerationStructureMotionTriangleGeometryDescriptor::setTransformationMatrixLayout(MTL::MatrixLayout transformationMatrixLayout) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTransformationMatrixLayout_), transformationMatrixLayout); } _MTL_INLINE MTL::AccelerationStructureMotionTriangleGeometryDescriptor* MTL::AccelerationStructureMotionTriangleGeometryDescriptor::descriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLAccelerationStructureMotionTriangleGeometryDescriptor), _MTL_PRIVATE_SEL(descriptor)); } _MTL_INLINE MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor* MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAccelerationStructureMotionBoundingBoxGeometryDescriptor)); } _MTL_INLINE MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor* MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::Array* MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::boundingBoxBuffers() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(boundingBoxBuffers)); } _MTL_INLINE void MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::setBoundingBoxBuffers(const NS::Array* boundingBoxBuffers) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBoundingBoxBuffers_), boundingBoxBuffers); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::boundingBoxStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(boundingBoxStride)); } _MTL_INLINE void MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::setBoundingBoxStride(NS::UInteger boundingBoxStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBoundingBoxStride_), boundingBoxStride); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::boundingBoxCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(boundingBoxCount)); } _MTL_INLINE void MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::setBoundingBoxCount(NS::UInteger boundingBoxCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBoundingBoxCount_), boundingBoxCount); } _MTL_INLINE MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor* MTL::AccelerationStructureMotionBoundingBoxGeometryDescriptor::descriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLAccelerationStructureMotionBoundingBoxGeometryDescriptor), _MTL_PRIVATE_SEL(descriptor)); } _MTL_INLINE MTL::AccelerationStructureCurveGeometryDescriptor* MTL::AccelerationStructureCurveGeometryDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAccelerationStructureCurveGeometryDescriptor)); } _MTL_INLINE MTL::AccelerationStructureCurveGeometryDescriptor* MTL::AccelerationStructureCurveGeometryDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::Buffer* MTL::AccelerationStructureCurveGeometryDescriptor::controlPointBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(controlPointBuffer)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setControlPointBuffer(const MTL::Buffer* controlPointBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setControlPointBuffer_), controlPointBuffer); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::controlPointBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(controlPointBufferOffset)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setControlPointBufferOffset(NS::UInteger controlPointBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setControlPointBufferOffset_), controlPointBufferOffset); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::controlPointCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(controlPointCount)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setControlPointCount(NS::UInteger controlPointCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setControlPointCount_), controlPointCount); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::controlPointStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(controlPointStride)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setControlPointStride(NS::UInteger controlPointStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setControlPointStride_), controlPointStride); } _MTL_INLINE MTL::AttributeFormat MTL::AccelerationStructureCurveGeometryDescriptor::controlPointFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(controlPointFormat)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setControlPointFormat(MTL::AttributeFormat controlPointFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setControlPointFormat_), controlPointFormat); } _MTL_INLINE MTL::Buffer* MTL::AccelerationStructureCurveGeometryDescriptor::radiusBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(radiusBuffer)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setRadiusBuffer(const MTL::Buffer* radiusBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRadiusBuffer_), radiusBuffer); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::radiusBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(radiusBufferOffset)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setRadiusBufferOffset(NS::UInteger radiusBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRadiusBufferOffset_), radiusBufferOffset); } _MTL_INLINE MTL::AttributeFormat MTL::AccelerationStructureCurveGeometryDescriptor::radiusFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(radiusFormat)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setRadiusFormat(MTL::AttributeFormat radiusFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRadiusFormat_), radiusFormat); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::radiusStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(radiusStride)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setRadiusStride(NS::UInteger radiusStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRadiusStride_), radiusStride); } _MTL_INLINE MTL::Buffer* MTL::AccelerationStructureCurveGeometryDescriptor::indexBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexBuffer)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setIndexBuffer(const MTL::Buffer* indexBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexBuffer_), indexBuffer); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::indexBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexBufferOffset)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setIndexBufferOffset(NS::UInteger indexBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexBufferOffset_), indexBufferOffset); } _MTL_INLINE MTL::IndexType MTL::AccelerationStructureCurveGeometryDescriptor::indexType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexType)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setIndexType(MTL::IndexType indexType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexType_), indexType); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::segmentCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(segmentCount)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setSegmentCount(NS::UInteger segmentCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSegmentCount_), segmentCount); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureCurveGeometryDescriptor::segmentControlPointCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(segmentControlPointCount)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setSegmentControlPointCount(NS::UInteger segmentControlPointCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSegmentControlPointCount_), segmentControlPointCount); } _MTL_INLINE MTL::CurveType MTL::AccelerationStructureCurveGeometryDescriptor::curveType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(curveType)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setCurveType(MTL::CurveType curveType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCurveType_), curveType); } _MTL_INLINE MTL::CurveBasis MTL::AccelerationStructureCurveGeometryDescriptor::curveBasis() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(curveBasis)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setCurveBasis(MTL::CurveBasis curveBasis) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCurveBasis_), curveBasis); } _MTL_INLINE MTL::CurveEndCaps MTL::AccelerationStructureCurveGeometryDescriptor::curveEndCaps() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(curveEndCaps)); } _MTL_INLINE void MTL::AccelerationStructureCurveGeometryDescriptor::setCurveEndCaps(MTL::CurveEndCaps curveEndCaps) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCurveEndCaps_), curveEndCaps); } _MTL_INLINE MTL::AccelerationStructureCurveGeometryDescriptor* MTL::AccelerationStructureCurveGeometryDescriptor::descriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLAccelerationStructureCurveGeometryDescriptor), _MTL_PRIVATE_SEL(descriptor)); } _MTL_INLINE MTL::AccelerationStructureMotionCurveGeometryDescriptor* MTL::AccelerationStructureMotionCurveGeometryDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAccelerationStructureMotionCurveGeometryDescriptor)); } _MTL_INLINE MTL::AccelerationStructureMotionCurveGeometryDescriptor* MTL::AccelerationStructureMotionCurveGeometryDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::Array* MTL::AccelerationStructureMotionCurveGeometryDescriptor::controlPointBuffers() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(controlPointBuffers)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setControlPointBuffers(const NS::Array* controlPointBuffers) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setControlPointBuffers_), controlPointBuffers); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionCurveGeometryDescriptor::controlPointCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(controlPointCount)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setControlPointCount(NS::UInteger controlPointCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setControlPointCount_), controlPointCount); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionCurveGeometryDescriptor::controlPointStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(controlPointStride)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setControlPointStride(NS::UInteger controlPointStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setControlPointStride_), controlPointStride); } _MTL_INLINE MTL::AttributeFormat MTL::AccelerationStructureMotionCurveGeometryDescriptor::controlPointFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(controlPointFormat)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setControlPointFormat(MTL::AttributeFormat controlPointFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setControlPointFormat_), controlPointFormat); } _MTL_INLINE NS::Array* MTL::AccelerationStructureMotionCurveGeometryDescriptor::radiusBuffers() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(radiusBuffers)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setRadiusBuffers(const NS::Array* radiusBuffers) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRadiusBuffers_), radiusBuffers); } _MTL_INLINE MTL::AttributeFormat MTL::AccelerationStructureMotionCurveGeometryDescriptor::radiusFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(radiusFormat)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setRadiusFormat(MTL::AttributeFormat radiusFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRadiusFormat_), radiusFormat); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionCurveGeometryDescriptor::radiusStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(radiusStride)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setRadiusStride(NS::UInteger radiusStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRadiusStride_), radiusStride); } _MTL_INLINE MTL::Buffer* MTL::AccelerationStructureMotionCurveGeometryDescriptor::indexBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexBuffer)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setIndexBuffer(const MTL::Buffer* indexBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexBuffer_), indexBuffer); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionCurveGeometryDescriptor::indexBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexBufferOffset)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setIndexBufferOffset(NS::UInteger indexBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexBufferOffset_), indexBufferOffset); } _MTL_INLINE MTL::IndexType MTL::AccelerationStructureMotionCurveGeometryDescriptor::indexType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indexType)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setIndexType(MTL::IndexType indexType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndexType_), indexType); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionCurveGeometryDescriptor::segmentCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(segmentCount)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setSegmentCount(NS::UInteger segmentCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSegmentCount_), segmentCount); } _MTL_INLINE NS::UInteger MTL::AccelerationStructureMotionCurveGeometryDescriptor::segmentControlPointCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(segmentControlPointCount)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setSegmentControlPointCount(NS::UInteger segmentControlPointCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSegmentControlPointCount_), segmentControlPointCount); } _MTL_INLINE MTL::CurveType MTL::AccelerationStructureMotionCurveGeometryDescriptor::curveType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(curveType)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setCurveType(MTL::CurveType curveType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCurveType_), curveType); } _MTL_INLINE MTL::CurveBasis MTL::AccelerationStructureMotionCurveGeometryDescriptor::curveBasis() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(curveBasis)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setCurveBasis(MTL::CurveBasis curveBasis) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCurveBasis_), curveBasis); } _MTL_INLINE MTL::CurveEndCaps MTL::AccelerationStructureMotionCurveGeometryDescriptor::curveEndCaps() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(curveEndCaps)); } _MTL_INLINE void MTL::AccelerationStructureMotionCurveGeometryDescriptor::setCurveEndCaps(MTL::CurveEndCaps curveEndCaps) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCurveEndCaps_), curveEndCaps); } _MTL_INLINE MTL::AccelerationStructureMotionCurveGeometryDescriptor* MTL::AccelerationStructureMotionCurveGeometryDescriptor::descriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLAccelerationStructureMotionCurveGeometryDescriptor), _MTL_PRIVATE_SEL(descriptor)); } _MTL_INLINE MTL::InstanceAccelerationStructureDescriptor* MTL::InstanceAccelerationStructureDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLInstanceAccelerationStructureDescriptor)); } _MTL_INLINE MTL::InstanceAccelerationStructureDescriptor* MTL::InstanceAccelerationStructureDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::Buffer* MTL::InstanceAccelerationStructureDescriptor::instanceDescriptorBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instanceDescriptorBuffer)); } _MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstanceDescriptorBuffer(const MTL::Buffer* instanceDescriptorBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstanceDescriptorBuffer_), instanceDescriptorBuffer); } _MTL_INLINE NS::UInteger MTL::InstanceAccelerationStructureDescriptor::instanceDescriptorBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instanceDescriptorBufferOffset)); } _MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstanceDescriptorBufferOffset(NS::UInteger instanceDescriptorBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstanceDescriptorBufferOffset_), instanceDescriptorBufferOffset); } _MTL_INLINE NS::UInteger MTL::InstanceAccelerationStructureDescriptor::instanceDescriptorStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instanceDescriptorStride)); } _MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstanceDescriptorStride(NS::UInteger instanceDescriptorStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstanceDescriptorStride_), instanceDescriptorStride); } _MTL_INLINE NS::UInteger MTL::InstanceAccelerationStructureDescriptor::instanceCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instanceCount)); } _MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstanceCount(NS::UInteger instanceCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstanceCount_), instanceCount); } _MTL_INLINE NS::Array* MTL::InstanceAccelerationStructureDescriptor::instancedAccelerationStructures() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instancedAccelerationStructures)); } _MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstancedAccelerationStructures(const NS::Array* instancedAccelerationStructures) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstancedAccelerationStructures_), instancedAccelerationStructures); } _MTL_INLINE MTL::AccelerationStructureInstanceDescriptorType MTL::InstanceAccelerationStructureDescriptor::instanceDescriptorType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instanceDescriptorType)); } _MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstanceDescriptorType(MTL::AccelerationStructureInstanceDescriptorType instanceDescriptorType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstanceDescriptorType_), instanceDescriptorType); } _MTL_INLINE MTL::Buffer* MTL::InstanceAccelerationStructureDescriptor::motionTransformBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionTransformBuffer)); } _MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setMotionTransformBuffer(const MTL::Buffer* motionTransformBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionTransformBuffer_), motionTransformBuffer); } _MTL_INLINE NS::UInteger MTL::InstanceAccelerationStructureDescriptor::motionTransformBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionTransformBufferOffset)); } _MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setMotionTransformBufferOffset(NS::UInteger motionTransformBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionTransformBufferOffset_), motionTransformBufferOffset); } _MTL_INLINE NS::UInteger MTL::InstanceAccelerationStructureDescriptor::motionTransformCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionTransformCount)); } _MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setMotionTransformCount(NS::UInteger motionTransformCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionTransformCount_), motionTransformCount); } _MTL_INLINE MTL::MatrixLayout MTL::InstanceAccelerationStructureDescriptor::instanceTransformationMatrixLayout() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instanceTransformationMatrixLayout)); } _MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setInstanceTransformationMatrixLayout(MTL::MatrixLayout instanceTransformationMatrixLayout) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstanceTransformationMatrixLayout_), instanceTransformationMatrixLayout); } _MTL_INLINE MTL::TransformType MTL::InstanceAccelerationStructureDescriptor::motionTransformType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionTransformType)); } _MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setMotionTransformType(MTL::TransformType motionTransformType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionTransformType_), motionTransformType); } _MTL_INLINE NS::UInteger MTL::InstanceAccelerationStructureDescriptor::motionTransformStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionTransformStride)); } _MTL_INLINE void MTL::InstanceAccelerationStructureDescriptor::setMotionTransformStride(NS::UInteger motionTransformStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionTransformStride_), motionTransformStride); } _MTL_INLINE MTL::InstanceAccelerationStructureDescriptor* MTL::InstanceAccelerationStructureDescriptor::descriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLInstanceAccelerationStructureDescriptor), _MTL_PRIVATE_SEL(descriptor)); } _MTL_INLINE MTL::IndirectInstanceAccelerationStructureDescriptor* MTL::IndirectInstanceAccelerationStructureDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLIndirectInstanceAccelerationStructureDescriptor)); } _MTL_INLINE MTL::IndirectInstanceAccelerationStructureDescriptor* MTL::IndirectInstanceAccelerationStructureDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::Buffer* MTL::IndirectInstanceAccelerationStructureDescriptor::instanceDescriptorBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instanceDescriptorBuffer)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceDescriptorBuffer(const MTL::Buffer* instanceDescriptorBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstanceDescriptorBuffer_), instanceDescriptorBuffer); } _MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::instanceDescriptorBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instanceDescriptorBufferOffset)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceDescriptorBufferOffset(NS::UInteger instanceDescriptorBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstanceDescriptorBufferOffset_), instanceDescriptorBufferOffset); } _MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::instanceDescriptorStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instanceDescriptorStride)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceDescriptorStride(NS::UInteger instanceDescriptorStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstanceDescriptorStride_), instanceDescriptorStride); } _MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::maxInstanceCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxInstanceCount)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMaxInstanceCount(NS::UInteger maxInstanceCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxInstanceCount_), maxInstanceCount); } _MTL_INLINE MTL::Buffer* MTL::IndirectInstanceAccelerationStructureDescriptor::instanceCountBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instanceCountBuffer)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceCountBuffer(const MTL::Buffer* instanceCountBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstanceCountBuffer_), instanceCountBuffer); } _MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::instanceCountBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instanceCountBufferOffset)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceCountBufferOffset(NS::UInteger instanceCountBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstanceCountBufferOffset_), instanceCountBufferOffset); } _MTL_INLINE MTL::AccelerationStructureInstanceDescriptorType MTL::IndirectInstanceAccelerationStructureDescriptor::instanceDescriptorType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instanceDescriptorType)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceDescriptorType(MTL::AccelerationStructureInstanceDescriptorType instanceDescriptorType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstanceDescriptorType_), instanceDescriptorType); } _MTL_INLINE MTL::Buffer* MTL::IndirectInstanceAccelerationStructureDescriptor::motionTransformBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionTransformBuffer)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMotionTransformBuffer(const MTL::Buffer* motionTransformBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionTransformBuffer_), motionTransformBuffer); } _MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::motionTransformBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionTransformBufferOffset)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMotionTransformBufferOffset(NS::UInteger motionTransformBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionTransformBufferOffset_), motionTransformBufferOffset); } _MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::maxMotionTransformCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxMotionTransformCount)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMaxMotionTransformCount(NS::UInteger maxMotionTransformCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxMotionTransformCount_), maxMotionTransformCount); } _MTL_INLINE MTL::Buffer* MTL::IndirectInstanceAccelerationStructureDescriptor::motionTransformCountBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionTransformCountBuffer)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMotionTransformCountBuffer(const MTL::Buffer* motionTransformCountBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionTransformCountBuffer_), motionTransformCountBuffer); } _MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::motionTransformCountBufferOffset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionTransformCountBufferOffset)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMotionTransformCountBufferOffset(NS::UInteger motionTransformCountBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionTransformCountBufferOffset_), motionTransformCountBufferOffset); } _MTL_INLINE MTL::MatrixLayout MTL::IndirectInstanceAccelerationStructureDescriptor::instanceTransformationMatrixLayout() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(instanceTransformationMatrixLayout)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setInstanceTransformationMatrixLayout(MTL::MatrixLayout instanceTransformationMatrixLayout) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstanceTransformationMatrixLayout_), instanceTransformationMatrixLayout); } _MTL_INLINE MTL::TransformType MTL::IndirectInstanceAccelerationStructureDescriptor::motionTransformType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionTransformType)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMotionTransformType(MTL::TransformType motionTransformType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionTransformType_), motionTransformType); } _MTL_INLINE NS::UInteger MTL::IndirectInstanceAccelerationStructureDescriptor::motionTransformStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(motionTransformStride)); } _MTL_INLINE void MTL::IndirectInstanceAccelerationStructureDescriptor::setMotionTransformStride(NS::UInteger motionTransformStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMotionTransformStride_), motionTransformStride); } _MTL_INLINE MTL::IndirectInstanceAccelerationStructureDescriptor* MTL::IndirectInstanceAccelerationStructureDescriptor::descriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLIndirectInstanceAccelerationStructureDescriptor), _MTL_PRIVATE_SEL(descriptor)); } _MTL_INLINE NS::UInteger MTL::AccelerationStructure::size() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(size)); } _MTL_INLINE MTL::ResourceID MTL::AccelerationStructure::gpuResourceID() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(gpuResourceID)); } #pragma once #pragma once namespace MTL { _MTL_ENUM(NS::UInteger, DataType) { DataTypeNone = 0, DataTypeStruct = 1, DataTypeArray = 2, DataTypeFloat = 3, DataTypeFloat2 = 4, DataTypeFloat3 = 5, DataTypeFloat4 = 6, DataTypeFloat2x2 = 7, DataTypeFloat2x3 = 8, DataTypeFloat2x4 = 9, DataTypeFloat3x2 = 10, DataTypeFloat3x3 = 11, DataTypeFloat3x4 = 12, DataTypeFloat4x2 = 13, DataTypeFloat4x3 = 14, DataTypeFloat4x4 = 15, DataTypeHalf = 16, DataTypeHalf2 = 17, DataTypeHalf3 = 18, DataTypeHalf4 = 19, DataTypeHalf2x2 = 20, DataTypeHalf2x3 = 21, DataTypeHalf2x4 = 22, DataTypeHalf3x2 = 23, DataTypeHalf3x3 = 24, DataTypeHalf3x4 = 25, DataTypeHalf4x2 = 26, DataTypeHalf4x3 = 27, DataTypeHalf4x4 = 28, DataTypeInt = 29, DataTypeInt2 = 30, DataTypeInt3 = 31, DataTypeInt4 = 32, DataTypeUInt = 33, DataTypeUInt2 = 34, DataTypeUInt3 = 35, DataTypeUInt4 = 36, DataTypeShort = 37, DataTypeShort2 = 38, DataTypeShort3 = 39, DataTypeShort4 = 40, DataTypeUShort = 41, DataTypeUShort2 = 42, DataTypeUShort3 = 43, DataTypeUShort4 = 44, DataTypeChar = 45, DataTypeChar2 = 46, DataTypeChar3 = 47, DataTypeChar4 = 48, DataTypeUChar = 49, DataTypeUChar2 = 50, DataTypeUChar3 = 51, DataTypeUChar4 = 52, DataTypeBool = 53, DataTypeBool2 = 54, DataTypeBool3 = 55, DataTypeBool4 = 56, DataTypeTexture = 58, DataTypeSampler = 59, DataTypePointer = 60, DataTypeR8Unorm = 62, DataTypeR8Snorm = 63, DataTypeR16Unorm = 64, DataTypeR16Snorm = 65, DataTypeRG8Unorm = 66, DataTypeRG8Snorm = 67, DataTypeRG16Unorm = 68, DataTypeRG16Snorm = 69, DataTypeRGBA8Unorm = 70, DataTypeRGBA8Unorm_sRGB = 71, DataTypeRGBA8Snorm = 72, DataTypeRGBA16Unorm = 73, DataTypeRGBA16Snorm = 74, DataTypeRGB10A2Unorm = 75, DataTypeRG11B10Float = 76, DataTypeRGB9E5Float = 77, DataTypeRenderPipeline = 78, DataTypeComputePipeline = 79, DataTypeIndirectCommandBuffer = 80, DataTypeLong = 81, DataTypeLong2 = 82, DataTypeLong3 = 83, DataTypeLong4 = 84, DataTypeULong = 85, DataTypeULong2 = 86, DataTypeULong3 = 87, DataTypeULong4 = 88, DataTypeVisibleFunctionTable = 115, DataTypeIntersectionFunctionTable = 116, DataTypePrimitiveAccelerationStructure = 117, DataTypeInstanceAccelerationStructure = 118, DataTypeBFloat = 121, DataTypeBFloat2 = 122, DataTypeBFloat3 = 123, DataTypeBFloat4 = 124, }; _MTL_ENUM(NS::Integer, BindingType) { BindingTypeBuffer = 0, BindingTypeThreadgroupMemory = 1, BindingTypeTexture = 2, BindingTypeSampler = 3, BindingTypeImageblockData = 16, BindingTypeImageblock = 17, BindingTypeVisibleFunctionTable = 24, BindingTypePrimitiveAccelerationStructure = 25, BindingTypeInstanceAccelerationStructure = 26, BindingTypeIntersectionFunctionTable = 27, BindingTypeObjectPayload = 34, }; _MTL_ENUM(NS::UInteger, ArgumentType) { ArgumentTypeBuffer = 0, ArgumentTypeThreadgroupMemory = 1, ArgumentTypeTexture = 2, ArgumentTypeSampler = 3, ArgumentTypeImageblockData = 16, ArgumentTypeImageblock = 17, ArgumentTypeVisibleFunctionTable = 24, ArgumentTypePrimitiveAccelerationStructure = 25, ArgumentTypeInstanceAccelerationStructure = 26, ArgumentTypeIntersectionFunctionTable = 27, }; _MTL_ENUM(NS::UInteger, BindingAccess) { BindingAccessReadOnly = 0, BindingAccessReadWrite = 1, BindingAccessWriteOnly = 2, ArgumentAccessReadOnly = 0, ArgumentAccessReadWrite = 1, ArgumentAccessWriteOnly = 2, }; class Type : public NS::Referencing { public: static class Type* alloc(); class Type* init(); MTL::DataType dataType() const; }; class StructMember : public NS::Referencing { public: static class StructMember* alloc(); class StructMember* init(); NS::String* name() const; NS::UInteger offset() const; MTL::DataType dataType() const; class StructType* structType(); class ArrayType* arrayType(); class TextureReferenceType* textureReferenceType(); class PointerType* pointerType(); NS::UInteger argumentIndex() const; }; class StructType : public NS::Referencing { public: static class StructType* alloc(); class StructType* init(); NS::Array* members() const; class StructMember* memberByName(const NS::String* name); }; class ArrayType : public NS::Referencing { public: static class ArrayType* alloc(); class ArrayType* init(); MTL::DataType elementType() const; NS::UInteger arrayLength() const; NS::UInteger stride() const; NS::UInteger argumentIndexStride() const; class StructType* elementStructType(); class ArrayType* elementArrayType(); class TextureReferenceType* elementTextureReferenceType(); class PointerType* elementPointerType(); }; class PointerType : public NS::Referencing { public: static class PointerType* alloc(); class PointerType* init(); MTL::DataType elementType() const; MTL::BindingAccess access() const; NS::UInteger alignment() const; NS::UInteger dataSize() const; bool elementIsArgumentBuffer() const; class StructType* elementStructType(); class ArrayType* elementArrayType(); }; class TextureReferenceType : public NS::Referencing { public: static class TextureReferenceType* alloc(); class TextureReferenceType* init(); MTL::DataType textureDataType() const; MTL::TextureType textureType() const; MTL::BindingAccess access() const; bool isDepthTexture() const; }; class Argument : public NS::Referencing { public: static class Argument* alloc(); class Argument* init(); NS::String* name() const; MTL::ArgumentType type() const; MTL::BindingAccess access() const; NS::UInteger index() const; bool active() const; NS::UInteger bufferAlignment() const; NS::UInteger bufferDataSize() const; MTL::DataType bufferDataType() const; class StructType* bufferStructType() const; class PointerType* bufferPointerType() const; NS::UInteger threadgroupMemoryAlignment() const; NS::UInteger threadgroupMemoryDataSize() const; MTL::TextureType textureType() const; MTL::DataType textureDataType() const; bool isDepthTexture() const; NS::UInteger arrayLength() const; }; class Binding : public NS::Referencing { public: NS::String* name() const; MTL::BindingType type() const; MTL::BindingAccess access() const; NS::UInteger index() const; bool used() const; bool argument() const; }; class BufferBinding : public NS::Referencing { public: NS::UInteger bufferAlignment() const; NS::UInteger bufferDataSize() const; MTL::DataType bufferDataType() const; class StructType* bufferStructType() const; class PointerType* bufferPointerType() const; }; class ThreadgroupBinding : public NS::Referencing { public: NS::UInteger threadgroupMemoryAlignment() const; NS::UInteger threadgroupMemoryDataSize() const; }; class TextureBinding : public NS::Referencing { public: MTL::TextureType textureType() const; MTL::DataType textureDataType() const; bool depthTexture() const; NS::UInteger arrayLength() const; }; class ObjectPayloadBinding : public NS::Referencing { public: NS::UInteger objectPayloadAlignment() const; NS::UInteger objectPayloadDataSize() const; }; } _MTL_INLINE MTL::Type* MTL::Type::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLType)); } _MTL_INLINE MTL::Type* MTL::Type::init() { return NS::Object::init(); } _MTL_INLINE MTL::DataType MTL::Type::dataType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(dataType)); } _MTL_INLINE MTL::StructMember* MTL::StructMember::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLStructMember)); } _MTL_INLINE MTL::StructMember* MTL::StructMember::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::StructMember::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_INLINE NS::UInteger MTL::StructMember::offset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(offset)); } _MTL_INLINE MTL::DataType MTL::StructMember::dataType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(dataType)); } _MTL_INLINE MTL::StructType* MTL::StructMember::structType() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(structType)); } _MTL_INLINE MTL::ArrayType* MTL::StructMember::arrayType() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(arrayType)); } _MTL_INLINE MTL::TextureReferenceType* MTL::StructMember::textureReferenceType() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(textureReferenceType)); } _MTL_INLINE MTL::PointerType* MTL::StructMember::pointerType() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(pointerType)); } _MTL_INLINE NS::UInteger MTL::StructMember::argumentIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(argumentIndex)); } _MTL_INLINE MTL::StructType* MTL::StructType::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLStructType)); } _MTL_INLINE MTL::StructType* MTL::StructType::init() { return NS::Object::init(); } _MTL_INLINE NS::Array* MTL::StructType::members() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(members)); } _MTL_INLINE MTL::StructMember* MTL::StructType::memberByName(const NS::String* name) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(memberByName_), name); } _MTL_INLINE MTL::ArrayType* MTL::ArrayType::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLArrayType)); } _MTL_INLINE MTL::ArrayType* MTL::ArrayType::init() { return NS::Object::init(); } _MTL_INLINE MTL::DataType MTL::ArrayType::elementType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(elementType)); } _MTL_INLINE NS::UInteger MTL::ArrayType::arrayLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(arrayLength)); } _MTL_INLINE NS::UInteger MTL::ArrayType::stride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stride)); } _MTL_INLINE NS::UInteger MTL::ArrayType::argumentIndexStride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(argumentIndexStride)); } _MTL_INLINE MTL::StructType* MTL::ArrayType::elementStructType() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(elementStructType)); } _MTL_INLINE MTL::ArrayType* MTL::ArrayType::elementArrayType() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(elementArrayType)); } _MTL_INLINE MTL::TextureReferenceType* MTL::ArrayType::elementTextureReferenceType() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(elementTextureReferenceType)); } _MTL_INLINE MTL::PointerType* MTL::ArrayType::elementPointerType() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(elementPointerType)); } _MTL_INLINE MTL::PointerType* MTL::PointerType::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLPointerType)); } _MTL_INLINE MTL::PointerType* MTL::PointerType::init() { return NS::Object::init(); } _MTL_INLINE MTL::DataType MTL::PointerType::elementType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(elementType)); } _MTL_INLINE MTL::BindingAccess MTL::PointerType::access() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(access)); } _MTL_INLINE NS::UInteger MTL::PointerType::alignment() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(alignment)); } _MTL_INLINE NS::UInteger MTL::PointerType::dataSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(dataSize)); } _MTL_INLINE bool MTL::PointerType::elementIsArgumentBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(elementIsArgumentBuffer)); } _MTL_INLINE MTL::StructType* MTL::PointerType::elementStructType() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(elementStructType)); } _MTL_INLINE MTL::ArrayType* MTL::PointerType::elementArrayType() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(elementArrayType)); } _MTL_INLINE MTL::TextureReferenceType* MTL::TextureReferenceType::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLTextureReferenceType)); } _MTL_INLINE MTL::TextureReferenceType* MTL::TextureReferenceType::init() { return NS::Object::init(); } _MTL_INLINE MTL::DataType MTL::TextureReferenceType::textureDataType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(textureDataType)); } _MTL_INLINE MTL::TextureType MTL::TextureReferenceType::textureType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(textureType)); } _MTL_INLINE MTL::BindingAccess MTL::TextureReferenceType::access() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(access)); } _MTL_INLINE bool MTL::TextureReferenceType::isDepthTexture() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isDepthTexture)); } _MTL_INLINE MTL::Argument* MTL::Argument::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLArgument)); } _MTL_INLINE MTL::Argument* MTL::Argument::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::Argument::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_INLINE MTL::ArgumentType MTL::Argument::type() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(type)); } _MTL_INLINE MTL::BindingAccess MTL::Argument::access() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(access)); } _MTL_INLINE NS::UInteger MTL::Argument::index() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(index)); } _MTL_INLINE bool MTL::Argument::active() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isActive)); } _MTL_INLINE NS::UInteger MTL::Argument::bufferAlignment() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferAlignment)); } _MTL_INLINE NS::UInteger MTL::Argument::bufferDataSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferDataSize)); } _MTL_INLINE MTL::DataType MTL::Argument::bufferDataType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferDataType)); } _MTL_INLINE MTL::StructType* MTL::Argument::bufferStructType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferStructType)); } _MTL_INLINE MTL::PointerType* MTL::Argument::bufferPointerType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferPointerType)); } _MTL_INLINE NS::UInteger MTL::Argument::threadgroupMemoryAlignment() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(threadgroupMemoryAlignment)); } _MTL_INLINE NS::UInteger MTL::Argument::threadgroupMemoryDataSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(threadgroupMemoryDataSize)); } _MTL_INLINE MTL::TextureType MTL::Argument::textureType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(textureType)); } _MTL_INLINE MTL::DataType MTL::Argument::textureDataType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(textureDataType)); } _MTL_INLINE bool MTL::Argument::isDepthTexture() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isDepthTexture)); } _MTL_INLINE NS::UInteger MTL::Argument::arrayLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(arrayLength)); } _MTL_INLINE NS::String* MTL::Binding::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_INLINE MTL::BindingType MTL::Binding::type() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(type)); } _MTL_INLINE MTL::BindingAccess MTL::Binding::access() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(access)); } _MTL_INLINE NS::UInteger MTL::Binding::index() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(index)); } _MTL_INLINE bool MTL::Binding::used() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isUsed)); } _MTL_INLINE bool MTL::Binding::argument() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isArgument)); } _MTL_INLINE NS::UInteger MTL::BufferBinding::bufferAlignment() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferAlignment)); } _MTL_INLINE NS::UInteger MTL::BufferBinding::bufferDataSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferDataSize)); } _MTL_INLINE MTL::DataType MTL::BufferBinding::bufferDataType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferDataType)); } _MTL_INLINE MTL::StructType* MTL::BufferBinding::bufferStructType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferStructType)); } _MTL_INLINE MTL::PointerType* MTL::BufferBinding::bufferPointerType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferPointerType)); } _MTL_INLINE NS::UInteger MTL::ThreadgroupBinding::threadgroupMemoryAlignment() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(threadgroupMemoryAlignment)); } _MTL_INLINE NS::UInteger MTL::ThreadgroupBinding::threadgroupMemoryDataSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(threadgroupMemoryDataSize)); } _MTL_INLINE MTL::TextureType MTL::TextureBinding::textureType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(textureType)); } _MTL_INLINE MTL::DataType MTL::TextureBinding::textureDataType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(textureDataType)); } _MTL_INLINE bool MTL::TextureBinding::depthTexture() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isDepthTexture)); } _MTL_INLINE NS::UInteger MTL::TextureBinding::arrayLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(arrayLength)); } _MTL_INLINE NS::UInteger MTL::ObjectPayloadBinding::objectPayloadAlignment() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectPayloadAlignment)); } _MTL_INLINE NS::UInteger MTL::ObjectPayloadBinding::objectPayloadDataSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectPayloadDataSize)); } #pragma once namespace MTL { _MTL_OPTIONS(NS::UInteger, ResourceUsage) { ResourceUsageRead = 1, ResourceUsageWrite = 2, ResourceUsageSample = 4, }; _MTL_OPTIONS(NS::UInteger, BarrierScope) { BarrierScopeBuffers = 1, BarrierScopeTextures = 2, BarrierScopeRenderTargets = 4, }; class CommandEncoder : public NS::Referencing { public: class Device* device() const; NS::String* label() const; void setLabel(const NS::String* label); void endEncoding(); void insertDebugSignpost(const NS::String* string); void pushDebugGroup(const NS::String* string); void popDebugGroup(); }; } _MTL_INLINE MTL::Device* MTL::CommandEncoder::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE NS::String* MTL::CommandEncoder::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::CommandEncoder::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE void MTL::CommandEncoder::endEncoding() { Object::sendMessage(this, _MTL_PRIVATE_SEL(endEncoding)); } _MTL_INLINE void MTL::CommandEncoder::insertDebugSignpost(const NS::String* string) { Object::sendMessage(this, _MTL_PRIVATE_SEL(insertDebugSignpost_), string); } _MTL_INLINE void MTL::CommandEncoder::pushDebugGroup(const NS::String* string) { Object::sendMessage(this, _MTL_PRIVATE_SEL(pushDebugGroup_), string); } _MTL_INLINE void MTL::CommandEncoder::popDebugGroup() { Object::sendMessage(this, _MTL_PRIVATE_SEL(popDebugGroup)); } namespace MTL { _MTL_OPTIONS(NS::UInteger, AccelerationStructureRefitOptions) { AccelerationStructureRefitOptionVertexData = 1, AccelerationStructureRefitOptionPerPrimitiveData = 2, }; class AccelerationStructureCommandEncoder : public NS::Referencing { public: void buildAccelerationStructure(const class AccelerationStructure* accelerationStructure, const class AccelerationStructureDescriptor* descriptor, const class Buffer* scratchBuffer, NS::UInteger scratchBufferOffset); void refitAccelerationStructure(const class AccelerationStructure* sourceAccelerationStructure, const class AccelerationStructureDescriptor* descriptor, const class AccelerationStructure* destinationAccelerationStructure, const class Buffer* scratchBuffer, NS::UInteger scratchBufferOffset); void refitAccelerationStructure(const class AccelerationStructure* sourceAccelerationStructure, const class AccelerationStructureDescriptor* descriptor, const class AccelerationStructure* destinationAccelerationStructure, const class Buffer* scratchBuffer, NS::UInteger scratchBufferOffset, MTL::AccelerationStructureRefitOptions options); void copyAccelerationStructure(const class AccelerationStructure* sourceAccelerationStructure, const class AccelerationStructure* destinationAccelerationStructure); void writeCompactedAccelerationStructureSize(const class AccelerationStructure* accelerationStructure, const class Buffer* buffer, NS::UInteger offset); void writeCompactedAccelerationStructureSize(const class AccelerationStructure* accelerationStructure, const class Buffer* buffer, NS::UInteger offset, MTL::DataType sizeDataType); void copyAndCompactAccelerationStructure(const class AccelerationStructure* sourceAccelerationStructure, const class AccelerationStructure* destinationAccelerationStructure); void updateFence(const class Fence* fence); void waitForFence(const class Fence* fence); void useResource(const class Resource* resource, MTL::ResourceUsage usage); void useResources(const class Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage); void useHeap(const class Heap* heap); void useHeaps(const class Heap* const heaps[], NS::UInteger count); void sampleCountersInBuffer(const class CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier); }; class AccelerationStructurePassSampleBufferAttachmentDescriptor : public NS::Copying { public: static class AccelerationStructurePassSampleBufferAttachmentDescriptor* alloc(); class AccelerationStructurePassSampleBufferAttachmentDescriptor* init(); class CounterSampleBuffer* sampleBuffer() const; void setSampleBuffer(const class CounterSampleBuffer* sampleBuffer); NS::UInteger startOfEncoderSampleIndex() const; void setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex); NS::UInteger endOfEncoderSampleIndex() const; void setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex); }; class AccelerationStructurePassSampleBufferAttachmentDescriptorArray : public NS::Referencing { public: static class AccelerationStructurePassSampleBufferAttachmentDescriptorArray* alloc(); class AccelerationStructurePassSampleBufferAttachmentDescriptorArray* init(); class AccelerationStructurePassSampleBufferAttachmentDescriptor* object(NS::UInteger attachmentIndex); void setObject(const class AccelerationStructurePassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex); }; class AccelerationStructurePassDescriptor : public NS::Copying { public: static class AccelerationStructurePassDescriptor* alloc(); class AccelerationStructurePassDescriptor* init(); static class AccelerationStructurePassDescriptor* accelerationStructurePassDescriptor(); class AccelerationStructurePassSampleBufferAttachmentDescriptorArray* sampleBufferAttachments() const; }; } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::buildAccelerationStructure(const MTL::AccelerationStructure* accelerationStructure, const MTL::AccelerationStructureDescriptor* descriptor, const MTL::Buffer* scratchBuffer, NS::UInteger scratchBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(buildAccelerationStructure_descriptor_scratchBuffer_scratchBufferOffset_), accelerationStructure, descriptor, scratchBuffer, scratchBufferOffset); } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::refitAccelerationStructure(const MTL::AccelerationStructure* sourceAccelerationStructure, const MTL::AccelerationStructureDescriptor* descriptor, const MTL::AccelerationStructure* destinationAccelerationStructure, const MTL::Buffer* scratchBuffer, NS::UInteger scratchBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(refitAccelerationStructure_descriptor_destination_scratchBuffer_scratchBufferOffset_), sourceAccelerationStructure, descriptor, destinationAccelerationStructure, scratchBuffer, scratchBufferOffset); } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::refitAccelerationStructure(const MTL::AccelerationStructure* sourceAccelerationStructure, const MTL::AccelerationStructureDescriptor* descriptor, const MTL::AccelerationStructure* destinationAccelerationStructure, const MTL::Buffer* scratchBuffer, NS::UInteger scratchBufferOffset, MTL::AccelerationStructureRefitOptions options) { Object::sendMessage(this, _MTL_PRIVATE_SEL(refitAccelerationStructure_descriptor_destination_scratchBuffer_scratchBufferOffset_options_), sourceAccelerationStructure, descriptor, destinationAccelerationStructure, scratchBuffer, scratchBufferOffset, options); } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::copyAccelerationStructure(const MTL::AccelerationStructure* sourceAccelerationStructure, const MTL::AccelerationStructure* destinationAccelerationStructure) { Object::sendMessage(this, _MTL_PRIVATE_SEL(copyAccelerationStructure_toAccelerationStructure_), sourceAccelerationStructure, destinationAccelerationStructure); } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::writeCompactedAccelerationStructureSize(const MTL::AccelerationStructure* accelerationStructure, const MTL::Buffer* buffer, NS::UInteger offset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(writeCompactedAccelerationStructureSize_toBuffer_offset_), accelerationStructure, buffer, offset); } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::writeCompactedAccelerationStructureSize(const MTL::AccelerationStructure* accelerationStructure, const MTL::Buffer* buffer, NS::UInteger offset, MTL::DataType sizeDataType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(writeCompactedAccelerationStructureSize_toBuffer_offset_sizeDataType_), accelerationStructure, buffer, offset, sizeDataType); } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::copyAndCompactAccelerationStructure(const MTL::AccelerationStructure* sourceAccelerationStructure, const MTL::AccelerationStructure* destinationAccelerationStructure) { Object::sendMessage(this, _MTL_PRIVATE_SEL(copyAndCompactAccelerationStructure_toAccelerationStructure_), sourceAccelerationStructure, destinationAccelerationStructure); } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::updateFence(const MTL::Fence* fence) { Object::sendMessage(this, _MTL_PRIVATE_SEL(updateFence_), fence); } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::waitForFence(const MTL::Fence* fence) { Object::sendMessage(this, _MTL_PRIVATE_SEL(waitForFence_), fence); } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::useResource(const MTL::Resource* resource, MTL::ResourceUsage usage) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useResource_usage_), resource, usage); } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::useResources(const MTL::Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useResources_count_usage_), resources, count, usage); } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::useHeap(const MTL::Heap* heap) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useHeap_), heap); } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::useHeaps(const MTL::Heap* const heaps[], NS::UInteger count) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useHeaps_count_), heaps, count); } _MTL_INLINE void MTL::AccelerationStructureCommandEncoder::sampleCountersInBuffer(const MTL::CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier) { Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleCountersInBuffer_atSampleIndex_withBarrier_), sampleBuffer, sampleIndex, barrier); } _MTL_INLINE MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor* MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAccelerationStructurePassSampleBufferAttachmentDescriptor)); } _MTL_INLINE MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor* MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::CounterSampleBuffer* MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::sampleBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleBuffer)); } _MTL_INLINE void MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::setSampleBuffer(const MTL::CounterSampleBuffer* sampleBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSampleBuffer_), sampleBuffer); } _MTL_INLINE NS::UInteger MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::startOfEncoderSampleIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(startOfEncoderSampleIndex)); } _MTL_INLINE void MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStartOfEncoderSampleIndex_), startOfEncoderSampleIndex); } _MTL_INLINE NS::UInteger MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::endOfEncoderSampleIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(endOfEncoderSampleIndex)); } _MTL_INLINE void MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor::setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setEndOfEncoderSampleIndex_), endOfEncoderSampleIndex); } _MTL_INLINE MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray* MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAccelerationStructurePassSampleBufferAttachmentDescriptorArray)); } _MTL_INLINE MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray* MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor* MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray::object(NS::UInteger attachmentIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex); } _MTL_INLINE void MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray::setObject(const MTL::AccelerationStructurePassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex); } _MTL_INLINE MTL::AccelerationStructurePassDescriptor* MTL::AccelerationStructurePassDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAccelerationStructurePassDescriptor)); } _MTL_INLINE MTL::AccelerationStructurePassDescriptor* MTL::AccelerationStructurePassDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::AccelerationStructurePassDescriptor* MTL::AccelerationStructurePassDescriptor::accelerationStructurePassDescriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLAccelerationStructurePassDescriptor), _MTL_PRIVATE_SEL(accelerationStructurePassDescriptor)); } _MTL_INLINE MTL::AccelerationStructurePassSampleBufferAttachmentDescriptorArray* MTL::AccelerationStructurePassDescriptor::sampleBufferAttachments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleBufferAttachments)); } #pragma once namespace MTL { static const NS::UInteger AttributeStrideStatic = NS::UIntegerMax; class ArgumentEncoder : public NS::Referencing { public: class Device* device() const; NS::String* label() const; void setLabel(const NS::String* label); NS::UInteger encodedLength() const; NS::UInteger alignment() const; void setArgumentBuffer(const class Buffer* argumentBuffer, NS::UInteger offset); void setArgumentBuffer(const class Buffer* argumentBuffer, NS::UInteger startOffset, NS::UInteger arrayElement); void setBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index); void setBuffers(const class Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range); void setTexture(const class Texture* texture, NS::UInteger index); void setTextures(const class Texture* const textures[], NS::Range range); void setSamplerState(const class SamplerState* sampler, NS::UInteger index); void setSamplerStates(const class SamplerState* const samplers[], NS::Range range); void* constantData(NS::UInteger index); void setRenderPipelineState(const class RenderPipelineState* pipeline, NS::UInteger index); void setRenderPipelineStates(const class RenderPipelineState* const pipelines[], NS::Range range); void setComputePipelineState(const class ComputePipelineState* pipeline, NS::UInteger index); void setComputePipelineStates(const class ComputePipelineState* const pipelines[], NS::Range range); void setIndirectCommandBuffer(const class IndirectCommandBuffer* indirectCommandBuffer, NS::UInteger index); void setIndirectCommandBuffers(const class IndirectCommandBuffer* const buffers[], NS::Range range); void setAccelerationStructure(const class AccelerationStructure* accelerationStructure, NS::UInteger index); class ArgumentEncoder* newArgumentEncoder(NS::UInteger index); void setVisibleFunctionTable(const class VisibleFunctionTable* visibleFunctionTable, NS::UInteger index); void setVisibleFunctionTables(const class VisibleFunctionTable* const visibleFunctionTables[], NS::Range range); void setIntersectionFunctionTable(const class IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger index); void setIntersectionFunctionTables(const class IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range); }; } _MTL_INLINE MTL::Device* MTL::ArgumentEncoder::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE NS::String* MTL::ArgumentEncoder::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::ArgumentEncoder::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE NS::UInteger MTL::ArgumentEncoder::encodedLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(encodedLength)); } _MTL_INLINE NS::UInteger MTL::ArgumentEncoder::alignment() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(alignment)); } _MTL_INLINE void MTL::ArgumentEncoder::setArgumentBuffer(const MTL::Buffer* argumentBuffer, NS::UInteger offset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setArgumentBuffer_offset_), argumentBuffer, offset); } _MTL_INLINE void MTL::ArgumentEncoder::setArgumentBuffer(const MTL::Buffer* argumentBuffer, NS::UInteger startOffset, NS::UInteger arrayElement) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setArgumentBuffer_startOffset_arrayElement_), argumentBuffer, startOffset, arrayElement); } _MTL_INLINE void MTL::ArgumentEncoder::setBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBuffer_offset_atIndex_), buffer, offset, index); } _MTL_INLINE void MTL::ArgumentEncoder::setBuffers(const MTL::Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBuffers_offsets_withRange_), buffers, offsets, range); } _MTL_INLINE void MTL::ArgumentEncoder::setTexture(const MTL::Texture* texture, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTexture_atIndex_), texture, index); } _MTL_INLINE void MTL::ArgumentEncoder::setTextures(const MTL::Texture* const textures[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTextures_withRange_), textures, range); } _MTL_INLINE void MTL::ArgumentEncoder::setSamplerState(const MTL::SamplerState* sampler, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSamplerState_atIndex_), sampler, index); } _MTL_INLINE void MTL::ArgumentEncoder::setSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSamplerStates_withRange_), samplers, range); } _MTL_INLINE void* MTL::ArgumentEncoder::constantData(NS::UInteger index) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(constantDataAtIndex_), index); } _MTL_INLINE void MTL::ArgumentEncoder::setRenderPipelineState(const MTL::RenderPipelineState* pipeline, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRenderPipelineState_atIndex_), pipeline, index); } _MTL_INLINE void MTL::ArgumentEncoder::setRenderPipelineStates(const MTL::RenderPipelineState* const pipelines[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRenderPipelineStates_withRange_), pipelines, range); } _MTL_INLINE void MTL::ArgumentEncoder::setComputePipelineState(const MTL::ComputePipelineState* pipeline, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setComputePipelineState_atIndex_), pipeline, index); } _MTL_INLINE void MTL::ArgumentEncoder::setComputePipelineStates(const MTL::ComputePipelineState* const pipelines[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setComputePipelineStates_withRange_), pipelines, range); } _MTL_INLINE void MTL::ArgumentEncoder::setIndirectCommandBuffer(const MTL::IndirectCommandBuffer* indirectCommandBuffer, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndirectCommandBuffer_atIndex_), indirectCommandBuffer, index); } _MTL_INLINE void MTL::ArgumentEncoder::setIndirectCommandBuffers(const MTL::IndirectCommandBuffer* const buffers[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndirectCommandBuffers_withRange_), buffers, range); } _MTL_INLINE void MTL::ArgumentEncoder::setAccelerationStructure(const MTL::AccelerationStructure* accelerationStructure, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setAccelerationStructure_atIndex_), accelerationStructure, index); } _MTL_INLINE MTL::ArgumentEncoder* MTL::ArgumentEncoder::newArgumentEncoder(NS::UInteger index) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newArgumentEncoderForBufferAtIndex_), index); } _MTL_INLINE void MTL::ArgumentEncoder::setVisibleFunctionTable(const MTL::VisibleFunctionTable* visibleFunctionTable, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVisibleFunctionTable_atIndex_), visibleFunctionTable, index); } _MTL_INLINE void MTL::ArgumentEncoder::setVisibleFunctionTables(const MTL::VisibleFunctionTable* const visibleFunctionTables[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVisibleFunctionTables_withRange_), visibleFunctionTables, range); } _MTL_INLINE void MTL::ArgumentEncoder::setIntersectionFunctionTable(const MTL::IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIntersectionFunctionTable_atIndex_), intersectionFunctionTable, index); } _MTL_INLINE void MTL::ArgumentEncoder::setIntersectionFunctionTables(const MTL::IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIntersectionFunctionTables_withRange_), intersectionFunctionTables, range); } #pragma once namespace MTL { _MTL_CONST(NS::ErrorDomain, BinaryArchiveDomain); _MTL_ENUM(NS::UInteger, BinaryArchiveError) { BinaryArchiveErrorNone = 0, BinaryArchiveErrorInvalidFile = 1, BinaryArchiveErrorUnexpectedElement = 2, BinaryArchiveErrorCompilationFailure = 3, BinaryArchiveErrorInternalError = 4, }; class BinaryArchiveDescriptor : public NS::Copying { public: static class BinaryArchiveDescriptor* alloc(); class BinaryArchiveDescriptor* init(); NS::URL* url() const; void setUrl(const NS::URL* url); }; class BinaryArchive : public NS::Referencing { public: NS::String* label() const; void setLabel(const NS::String* label); class Device* device() const; bool addComputePipelineFunctions(const class ComputePipelineDescriptor* descriptor, NS::Error** error); bool addRenderPipelineFunctions(const class RenderPipelineDescriptor* descriptor, NS::Error** error); bool addTileRenderPipelineFunctions(const class TileRenderPipelineDescriptor* descriptor, NS::Error** error); bool addMeshRenderPipelineFunctions(const class MeshRenderPipelineDescriptor* descriptor, NS::Error** error); bool addLibrary(const class StitchedLibraryDescriptor* descriptor, NS::Error** error); bool serializeToURL(const NS::URL* url, NS::Error** error); bool addFunction(const class FunctionDescriptor* descriptor, const class Library* library, NS::Error** error); }; } _MTL_PRIVATE_DEF_STR(NS::ErrorDomain, BinaryArchiveDomain); _MTL_INLINE MTL::BinaryArchiveDescriptor* MTL::BinaryArchiveDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLBinaryArchiveDescriptor)); } _MTL_INLINE MTL::BinaryArchiveDescriptor* MTL::BinaryArchiveDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::URL* MTL::BinaryArchiveDescriptor::url() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(url)); } _MTL_INLINE void MTL::BinaryArchiveDescriptor::setUrl(const NS::URL* url) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setUrl_), url); } _MTL_INLINE NS::String* MTL::BinaryArchive::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::BinaryArchive::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::Device* MTL::BinaryArchive::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE bool MTL::BinaryArchive::addComputePipelineFunctions(const MTL::ComputePipelineDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(addComputePipelineFunctionsWithDescriptor_error_), descriptor, error); } _MTL_INLINE bool MTL::BinaryArchive::addRenderPipelineFunctions(const MTL::RenderPipelineDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(addRenderPipelineFunctionsWithDescriptor_error_), descriptor, error); } _MTL_INLINE bool MTL::BinaryArchive::addTileRenderPipelineFunctions(const MTL::TileRenderPipelineDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(addTileRenderPipelineFunctionsWithDescriptor_error_), descriptor, error); } _MTL_INLINE bool MTL::BinaryArchive::addMeshRenderPipelineFunctions(const MTL::MeshRenderPipelineDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(addMeshRenderPipelineFunctionsWithDescriptor_error_), descriptor, error); } _MTL_INLINE bool MTL::BinaryArchive::addLibrary(const MTL::StitchedLibraryDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(addLibraryWithDescriptor_error_), descriptor, error); } _MTL_INLINE bool MTL::BinaryArchive::serializeToURL(const NS::URL* url, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(serializeToURL_error_), url, error); } _MTL_INLINE bool MTL::BinaryArchive::addFunction(const MTL::FunctionDescriptor* descriptor, const MTL::Library* library, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(addFunctionWithDescriptor_library_error_), descriptor, library, error); } #pragma once namespace MTL { _MTL_OPTIONS(NS::UInteger, BlitOption) { BlitOptionNone = 0, BlitOptionDepthFromDepthStencil = 1, BlitOptionStencilFromDepthStencil = 2, BlitOptionRowLinearPVRTC = 4, }; class BlitCommandEncoder : public NS::Referencing { public: void synchronizeResource(const class Resource* resource); void synchronizeTexture(const class Texture* texture, NS::UInteger slice, NS::UInteger level); void copyFromTexture(const class Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const class Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin); void copyFromBuffer(const class Buffer* sourceBuffer, NS::UInteger sourceOffset, NS::UInteger sourceBytesPerRow, NS::UInteger sourceBytesPerImage, MTL::Size sourceSize, const class Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin); void copyFromBuffer(const class Buffer* sourceBuffer, NS::UInteger sourceOffset, NS::UInteger sourceBytesPerRow, NS::UInteger sourceBytesPerImage, MTL::Size sourceSize, const class Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin, MTL::BlitOption options); void copyFromTexture(const class Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const class Buffer* destinationBuffer, NS::UInteger destinationOffset, NS::UInteger destinationBytesPerRow, NS::UInteger destinationBytesPerImage); void copyFromTexture(const class Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const class Buffer* destinationBuffer, NS::UInteger destinationOffset, NS::UInteger destinationBytesPerRow, NS::UInteger destinationBytesPerImage, MTL::BlitOption options); void generateMipmaps(const class Texture* texture); void fillBuffer(const class Buffer* buffer, NS::Range range, uint8_t value); void copyFromTexture(const class Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, const class Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, NS::UInteger sliceCount, NS::UInteger levelCount); void copyFromTexture(const class Texture* sourceTexture, const class Texture* destinationTexture); void copyFromBuffer(const class Buffer* sourceBuffer, NS::UInteger sourceOffset, const class Buffer* destinationBuffer, NS::UInteger destinationOffset, NS::UInteger size); void updateFence(const class Fence* fence); void waitForFence(const class Fence* fence); void getTextureAccessCounters(const class Texture* texture, MTL::Region region, NS::UInteger mipLevel, NS::UInteger slice, bool resetCounters, const class Buffer* countersBuffer, NS::UInteger countersBufferOffset); void resetTextureAccessCounters(const class Texture* texture, MTL::Region region, NS::UInteger mipLevel, NS::UInteger slice); void optimizeContentsForGPUAccess(const class Texture* texture); void optimizeContentsForGPUAccess(const class Texture* texture, NS::UInteger slice, NS::UInteger level); void optimizeContentsForCPUAccess(const class Texture* texture); void optimizeContentsForCPUAccess(const class Texture* texture, NS::UInteger slice, NS::UInteger level); void resetCommandsInBuffer(const class IndirectCommandBuffer* buffer, NS::Range range); void copyIndirectCommandBuffer(const class IndirectCommandBuffer* source, NS::Range sourceRange, const class IndirectCommandBuffer* destination, NS::UInteger destinationIndex); void optimizeIndirectCommandBuffer(const class IndirectCommandBuffer* indirectCommandBuffer, NS::Range range); void sampleCountersInBuffer(const class CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier); void resolveCounters(const class CounterSampleBuffer* sampleBuffer, NS::Range range, const class Buffer* destinationBuffer, NS::UInteger destinationOffset); }; } _MTL_INLINE void MTL::BlitCommandEncoder::synchronizeResource(const MTL::Resource* resource) { Object::sendMessage(this, _MTL_PRIVATE_SEL(synchronizeResource_), resource); } _MTL_INLINE void MTL::BlitCommandEncoder::synchronizeTexture(const MTL::Texture* texture, NS::UInteger slice, NS::UInteger level) { Object::sendMessage(this, _MTL_PRIVATE_SEL(synchronizeTexture_slice_level_), texture, slice, level); } _MTL_INLINE void MTL::BlitCommandEncoder::copyFromTexture(const MTL::Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const MTL::Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin) { Object::sendMessage(this, _MTL_PRIVATE_SEL(copyFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_), sourceTexture, sourceSlice, sourceLevel, sourceOrigin, sourceSize, destinationTexture, destinationSlice, destinationLevel, destinationOrigin); } _MTL_INLINE void MTL::BlitCommandEncoder::copyFromBuffer(const MTL::Buffer* sourceBuffer, NS::UInteger sourceOffset, NS::UInteger sourceBytesPerRow, NS::UInteger sourceBytesPerImage, MTL::Size sourceSize, const MTL::Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin) { Object::sendMessage(this, _MTL_PRIVATE_SEL(copyFromBuffer_sourceOffset_sourceBytesPerRow_sourceBytesPerImage_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_), sourceBuffer, sourceOffset, sourceBytesPerRow, sourceBytesPerImage, sourceSize, destinationTexture, destinationSlice, destinationLevel, destinationOrigin); } _MTL_INLINE void MTL::BlitCommandEncoder::copyFromBuffer(const MTL::Buffer* sourceBuffer, NS::UInteger sourceOffset, NS::UInteger sourceBytesPerRow, NS::UInteger sourceBytesPerImage, MTL::Size sourceSize, const MTL::Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin, MTL::BlitOption options) { Object::sendMessage(this, _MTL_PRIVATE_SEL(copyFromBuffer_sourceOffset_sourceBytesPerRow_sourceBytesPerImage_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_options_), sourceBuffer, sourceOffset, sourceBytesPerRow, sourceBytesPerImage, sourceSize, destinationTexture, destinationSlice, destinationLevel, destinationOrigin, options); } _MTL_INLINE void MTL::BlitCommandEncoder::copyFromTexture(const MTL::Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const MTL::Buffer* destinationBuffer, NS::UInteger destinationOffset, NS::UInteger destinationBytesPerRow, NS::UInteger destinationBytesPerImage) { Object::sendMessage(this, _MTL_PRIVATE_SEL(copyFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toBuffer_destinationOffset_destinationBytesPerRow_destinationBytesPerImage_), sourceTexture, sourceSlice, sourceLevel, sourceOrigin, sourceSize, destinationBuffer, destinationOffset, destinationBytesPerRow, destinationBytesPerImage); } _MTL_INLINE void MTL::BlitCommandEncoder::copyFromTexture(const MTL::Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const MTL::Buffer* destinationBuffer, NS::UInteger destinationOffset, NS::UInteger destinationBytesPerRow, NS::UInteger destinationBytesPerImage, MTL::BlitOption options) { Object::sendMessage(this, _MTL_PRIVATE_SEL(copyFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toBuffer_destinationOffset_destinationBytesPerRow_destinationBytesPerImage_options_), sourceTexture, sourceSlice, sourceLevel, sourceOrigin, sourceSize, destinationBuffer, destinationOffset, destinationBytesPerRow, destinationBytesPerImage, options); } _MTL_INLINE void MTL::BlitCommandEncoder::generateMipmaps(const MTL::Texture* texture) { Object::sendMessage(this, _MTL_PRIVATE_SEL(generateMipmapsForTexture_), texture); } _MTL_INLINE void MTL::BlitCommandEncoder::fillBuffer(const MTL::Buffer* buffer, NS::Range range, uint8_t value) { Object::sendMessage(this, _MTL_PRIVATE_SEL(fillBuffer_range_value_), buffer, range, value); } _MTL_INLINE void MTL::BlitCommandEncoder::copyFromTexture(const MTL::Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, const MTL::Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, NS::UInteger sliceCount, NS::UInteger levelCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(copyFromTexture_sourceSlice_sourceLevel_toTexture_destinationSlice_destinationLevel_sliceCount_levelCount_), sourceTexture, sourceSlice, sourceLevel, destinationTexture, destinationSlice, destinationLevel, sliceCount, levelCount); } _MTL_INLINE void MTL::BlitCommandEncoder::copyFromTexture(const MTL::Texture* sourceTexture, const MTL::Texture* destinationTexture) { Object::sendMessage(this, _MTL_PRIVATE_SEL(copyFromTexture_toTexture_), sourceTexture, destinationTexture); } _MTL_INLINE void MTL::BlitCommandEncoder::copyFromBuffer(const MTL::Buffer* sourceBuffer, NS::UInteger sourceOffset, const MTL::Buffer* destinationBuffer, NS::UInteger destinationOffset, NS::UInteger size) { Object::sendMessage(this, _MTL_PRIVATE_SEL(copyFromBuffer_sourceOffset_toBuffer_destinationOffset_size_), sourceBuffer, sourceOffset, destinationBuffer, destinationOffset, size); } _MTL_INLINE void MTL::BlitCommandEncoder::updateFence(const MTL::Fence* fence) { Object::sendMessage(this, _MTL_PRIVATE_SEL(updateFence_), fence); } _MTL_INLINE void MTL::BlitCommandEncoder::waitForFence(const MTL::Fence* fence) { Object::sendMessage(this, _MTL_PRIVATE_SEL(waitForFence_), fence); } _MTL_INLINE void MTL::BlitCommandEncoder::getTextureAccessCounters(const MTL::Texture* texture, MTL::Region region, NS::UInteger mipLevel, NS::UInteger slice, bool resetCounters, const MTL::Buffer* countersBuffer, NS::UInteger countersBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(getTextureAccessCounters_region_mipLevel_slice_resetCounters_countersBuffer_countersBufferOffset_), texture, region, mipLevel, slice, resetCounters, countersBuffer, countersBufferOffset); } _MTL_INLINE void MTL::BlitCommandEncoder::resetTextureAccessCounters(const MTL::Texture* texture, MTL::Region region, NS::UInteger mipLevel, NS::UInteger slice) { Object::sendMessage(this, _MTL_PRIVATE_SEL(resetTextureAccessCounters_region_mipLevel_slice_), texture, region, mipLevel, slice); } _MTL_INLINE void MTL::BlitCommandEncoder::optimizeContentsForGPUAccess(const MTL::Texture* texture) { Object::sendMessage(this, _MTL_PRIVATE_SEL(optimizeContentsForGPUAccess_), texture); } _MTL_INLINE void MTL::BlitCommandEncoder::optimizeContentsForGPUAccess(const MTL::Texture* texture, NS::UInteger slice, NS::UInteger level) { Object::sendMessage(this, _MTL_PRIVATE_SEL(optimizeContentsForGPUAccess_slice_level_), texture, slice, level); } _MTL_INLINE void MTL::BlitCommandEncoder::optimizeContentsForCPUAccess(const MTL::Texture* texture) { Object::sendMessage(this, _MTL_PRIVATE_SEL(optimizeContentsForCPUAccess_), texture); } _MTL_INLINE void MTL::BlitCommandEncoder::optimizeContentsForCPUAccess(const MTL::Texture* texture, NS::UInteger slice, NS::UInteger level) { Object::sendMessage(this, _MTL_PRIVATE_SEL(optimizeContentsForCPUAccess_slice_level_), texture, slice, level); } _MTL_INLINE void MTL::BlitCommandEncoder::resetCommandsInBuffer(const MTL::IndirectCommandBuffer* buffer, NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(resetCommandsInBuffer_withRange_), buffer, range); } _MTL_INLINE void MTL::BlitCommandEncoder::copyIndirectCommandBuffer(const MTL::IndirectCommandBuffer* source, NS::Range sourceRange, const MTL::IndirectCommandBuffer* destination, NS::UInteger destinationIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(copyIndirectCommandBuffer_sourceRange_destination_destinationIndex_), source, sourceRange, destination, destinationIndex); } _MTL_INLINE void MTL::BlitCommandEncoder::optimizeIndirectCommandBuffer(const MTL::IndirectCommandBuffer* indirectCommandBuffer, NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(optimizeIndirectCommandBuffer_withRange_), indirectCommandBuffer, range); } _MTL_INLINE void MTL::BlitCommandEncoder::sampleCountersInBuffer(const MTL::CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier) { Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleCountersInBuffer_atSampleIndex_withBarrier_), sampleBuffer, sampleIndex, barrier); } _MTL_INLINE void MTL::BlitCommandEncoder::resolveCounters(const MTL::CounterSampleBuffer* sampleBuffer, NS::Range range, const MTL::Buffer* destinationBuffer, NS::UInteger destinationOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(resolveCounters_inRange_destinationBuffer_destinationOffset_), sampleBuffer, range, destinationBuffer, destinationOffset); } #pragma once namespace MTL { class BlitPassSampleBufferAttachmentDescriptor : public NS::Copying { public: static class BlitPassSampleBufferAttachmentDescriptor* alloc(); class BlitPassSampleBufferAttachmentDescriptor* init(); class CounterSampleBuffer* sampleBuffer() const; void setSampleBuffer(const class CounterSampleBuffer* sampleBuffer); NS::UInteger startOfEncoderSampleIndex() const; void setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex); NS::UInteger endOfEncoderSampleIndex() const; void setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex); }; class BlitPassSampleBufferAttachmentDescriptorArray : public NS::Referencing { public: static class BlitPassSampleBufferAttachmentDescriptorArray* alloc(); class BlitPassSampleBufferAttachmentDescriptorArray* init(); class BlitPassSampleBufferAttachmentDescriptor* object(NS::UInteger attachmentIndex); void setObject(const class BlitPassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex); }; class BlitPassDescriptor : public NS::Copying { public: static class BlitPassDescriptor* alloc(); class BlitPassDescriptor* init(); static class BlitPassDescriptor* blitPassDescriptor(); class BlitPassSampleBufferAttachmentDescriptorArray* sampleBufferAttachments() const; }; } _MTL_INLINE MTL::BlitPassSampleBufferAttachmentDescriptor* MTL::BlitPassSampleBufferAttachmentDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLBlitPassSampleBufferAttachmentDescriptor)); } _MTL_INLINE MTL::BlitPassSampleBufferAttachmentDescriptor* MTL::BlitPassSampleBufferAttachmentDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::CounterSampleBuffer* MTL::BlitPassSampleBufferAttachmentDescriptor::sampleBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleBuffer)); } _MTL_INLINE void MTL::BlitPassSampleBufferAttachmentDescriptor::setSampleBuffer(const MTL::CounterSampleBuffer* sampleBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSampleBuffer_), sampleBuffer); } _MTL_INLINE NS::UInteger MTL::BlitPassSampleBufferAttachmentDescriptor::startOfEncoderSampleIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(startOfEncoderSampleIndex)); } _MTL_INLINE void MTL::BlitPassSampleBufferAttachmentDescriptor::setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStartOfEncoderSampleIndex_), startOfEncoderSampleIndex); } _MTL_INLINE NS::UInteger MTL::BlitPassSampleBufferAttachmentDescriptor::endOfEncoderSampleIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(endOfEncoderSampleIndex)); } _MTL_INLINE void MTL::BlitPassSampleBufferAttachmentDescriptor::setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setEndOfEncoderSampleIndex_), endOfEncoderSampleIndex); } _MTL_INLINE MTL::BlitPassSampleBufferAttachmentDescriptorArray* MTL::BlitPassSampleBufferAttachmentDescriptorArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLBlitPassSampleBufferAttachmentDescriptorArray)); } _MTL_INLINE MTL::BlitPassSampleBufferAttachmentDescriptorArray* MTL::BlitPassSampleBufferAttachmentDescriptorArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::BlitPassSampleBufferAttachmentDescriptor* MTL::BlitPassSampleBufferAttachmentDescriptorArray::object(NS::UInteger attachmentIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex); } _MTL_INLINE void MTL::BlitPassSampleBufferAttachmentDescriptorArray::setObject(const MTL::BlitPassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex); } _MTL_INLINE MTL::BlitPassDescriptor* MTL::BlitPassDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLBlitPassDescriptor)); } _MTL_INLINE MTL::BlitPassDescriptor* MTL::BlitPassDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::BlitPassDescriptor* MTL::BlitPassDescriptor::blitPassDescriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLBlitPassDescriptor), _MTL_PRIVATE_SEL(blitPassDescriptor)); } _MTL_INLINE MTL::BlitPassSampleBufferAttachmentDescriptorArray* MTL::BlitPassDescriptor::sampleBufferAttachments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleBufferAttachments)); } #pragma once namespace MTL { class Buffer : public NS::Referencing { public: NS::UInteger length() const; void* contents(); void didModifyRange(NS::Range range); class Texture* newTexture(const class TextureDescriptor* descriptor, NS::UInteger offset, NS::UInteger bytesPerRow); void addDebugMarker(const NS::String* marker, NS::Range range); void removeAllDebugMarkers(); class Buffer* remoteStorageBuffer() const; class Buffer* newRemoteBufferViewForDevice(const class Device* device); uint64_t gpuAddress() const; }; } _MTL_INLINE NS::UInteger MTL::Buffer::length() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(length)); } _MTL_INLINE void* MTL::Buffer::contents() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(contents)); } _MTL_INLINE void MTL::Buffer::didModifyRange(NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(didModifyRange_), range); } _MTL_INLINE MTL::Texture* MTL::Buffer::newTexture(const MTL::TextureDescriptor* descriptor, NS::UInteger offset, NS::UInteger bytesPerRow) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newTextureWithDescriptor_offset_bytesPerRow_), descriptor, offset, bytesPerRow); } _MTL_INLINE void MTL::Buffer::addDebugMarker(const NS::String* marker, NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(addDebugMarker_range_), marker, range); } _MTL_INLINE void MTL::Buffer::removeAllDebugMarkers() { Object::sendMessage(this, _MTL_PRIVATE_SEL(removeAllDebugMarkers)); } _MTL_INLINE MTL::Buffer* MTL::Buffer::remoteStorageBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(remoteStorageBuffer)); } _MTL_INLINE MTL::Buffer* MTL::Buffer::newRemoteBufferViewForDevice(const MTL::Device* device) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newRemoteBufferViewForDevice_), device); } _MTL_INLINE uint64_t MTL::Buffer::gpuAddress() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(gpuAddress)); } #pragma once namespace MTL { _MTL_ENUM(NS::Integer, CaptureError) { CaptureErrorNotSupported = 1, CaptureErrorAlreadyCapturing = 2, CaptureErrorInvalidDescriptor = 3, }; _MTL_ENUM(NS::Integer, CaptureDestination) { CaptureDestinationDeveloperTools = 1, CaptureDestinationGPUTraceDocument = 2, }; class CaptureDescriptor : public NS::Copying { public: static class CaptureDescriptor* alloc(); class CaptureDescriptor* init(); id captureObject() const; void setCaptureObject(id captureObject); MTL::CaptureDestination destination() const; void setDestination(MTL::CaptureDestination destination); NS::URL* outputURL() const; void setOutputURL(const NS::URL* outputURL); }; class CaptureManager : public NS::Referencing { public: static class CaptureManager* alloc(); static class CaptureManager* sharedCaptureManager(); MTL::CaptureManager* init(); class CaptureScope* newCaptureScope(const class Device* device); class CaptureScope* newCaptureScope(const class CommandQueue* commandQueue); bool supportsDestination(MTL::CaptureDestination destination); bool startCapture(const class CaptureDescriptor* descriptor, NS::Error** error); void startCapture(const class Device* device); void startCapture(const class CommandQueue* commandQueue); void startCapture(const class CaptureScope* captureScope); void stopCapture(); class CaptureScope* defaultCaptureScope() const; void setDefaultCaptureScope(const class CaptureScope* defaultCaptureScope); bool isCapturing() const; }; } _MTL_INLINE MTL::CaptureDescriptor* MTL::CaptureDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLCaptureDescriptor)); } _MTL_INLINE MTL::CaptureDescriptor* MTL::CaptureDescriptor::init() { return NS::Object::init(); } _MTL_INLINE id MTL::CaptureDescriptor::captureObject() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(captureObject)); } _MTL_INLINE void MTL::CaptureDescriptor::setCaptureObject(id captureObject) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCaptureObject_), captureObject); } _MTL_INLINE MTL::CaptureDestination MTL::CaptureDescriptor::destination() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(destination)); } _MTL_INLINE void MTL::CaptureDescriptor::setDestination(MTL::CaptureDestination destination) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDestination_), destination); } _MTL_INLINE NS::URL* MTL::CaptureDescriptor::outputURL() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(outputURL)); } _MTL_INLINE void MTL::CaptureDescriptor::setOutputURL(const NS::URL* outputURL) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setOutputURL_), outputURL); } _MTL_INLINE MTL::CaptureManager* MTL::CaptureManager::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLCaptureManager)); } _MTL_INLINE MTL::CaptureManager* MTL::CaptureManager::sharedCaptureManager() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLCaptureManager), _MTL_PRIVATE_SEL(sharedCaptureManager)); } _MTL_INLINE MTL::CaptureManager* MTL::CaptureManager::init() { return NS::Object::init(); } _MTL_INLINE MTL::CaptureScope* MTL::CaptureManager::newCaptureScope(const MTL::Device* device) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newCaptureScopeWithDevice_), device); } _MTL_INLINE MTL::CaptureScope* MTL::CaptureManager::newCaptureScope(const MTL::CommandQueue* commandQueue) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newCaptureScopeWithCommandQueue_), commandQueue); } _MTL_INLINE bool MTL::CaptureManager::supportsDestination(MTL::CaptureDestination destination) { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsDestination_), destination); } _MTL_INLINE bool MTL::CaptureManager::startCapture(const MTL::CaptureDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(startCaptureWithDescriptor_error_), descriptor, error); } _MTL_INLINE void MTL::CaptureManager::startCapture(const MTL::Device* device) { Object::sendMessage(this, _MTL_PRIVATE_SEL(startCaptureWithDevice_), device); } _MTL_INLINE void MTL::CaptureManager::startCapture(const MTL::CommandQueue* commandQueue) { Object::sendMessage(this, _MTL_PRIVATE_SEL(startCaptureWithCommandQueue_), commandQueue); } _MTL_INLINE void MTL::CaptureManager::startCapture(const MTL::CaptureScope* captureScope) { Object::sendMessage(this, _MTL_PRIVATE_SEL(startCaptureWithScope_), captureScope); } _MTL_INLINE void MTL::CaptureManager::stopCapture() { Object::sendMessage(this, _MTL_PRIVATE_SEL(stopCapture)); } _MTL_INLINE MTL::CaptureScope* MTL::CaptureManager::defaultCaptureScope() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(defaultCaptureScope)); } _MTL_INLINE void MTL::CaptureManager::setDefaultCaptureScope(const MTL::CaptureScope* defaultCaptureScope) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDefaultCaptureScope_), defaultCaptureScope); } _MTL_INLINE bool MTL::CaptureManager::isCapturing() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isCapturing)); } namespace MTL { class CaptureScope : public NS::Referencing { public: class Device* device() const; NS::String* label() const; void setLabel(const NS::String* pLabel); class CommandQueue* commandQueue() const; void beginScope(); void endScope(); }; } _MTL_INLINE MTL::Device* MTL::CaptureScope::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE NS::String* MTL::CaptureScope::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::CaptureScope::setLabel(const NS::String* pLabel) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), pLabel); } _MTL_INLINE MTL::CommandQueue* MTL::CaptureScope::commandQueue() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(commandQueue)); } _MTL_INLINE void MTL::CaptureScope::beginScope() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(beginScope)); } _MTL_INLINE void MTL::CaptureScope::endScope() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(endScope)); } #pragma once #include namespace MTL { _MTL_ENUM(NS::UInteger, CommandBufferStatus) { CommandBufferStatusNotEnqueued = 0, CommandBufferStatusEnqueued = 1, CommandBufferStatusCommitted = 2, CommandBufferStatusScheduled = 3, CommandBufferStatusCompleted = 4, CommandBufferStatusError = 5, }; _MTL_ENUM(NS::UInteger, CommandBufferError) { CommandBufferErrorNone = 0, CommandBufferErrorInternal = 1, CommandBufferErrorTimeout = 2, CommandBufferErrorPageFault = 3, CommandBufferErrorBlacklisted = 4, CommandBufferErrorAccessRevoked = 4, CommandBufferErrorNotPermitted = 7, CommandBufferErrorOutOfMemory = 8, CommandBufferErrorInvalidResource = 9, CommandBufferErrorMemoryless = 10, CommandBufferErrorDeviceRemoved = 11, CommandBufferErrorStackOverflow = 12, }; _MTL_OPTIONS(NS::UInteger, CommandBufferErrorOption) { CommandBufferErrorOptionNone = 0, CommandBufferErrorOptionEncoderExecutionStatus = 1, }; _MTL_ENUM(NS::Integer, CommandEncoderErrorState) { CommandEncoderErrorStateUnknown = 0, CommandEncoderErrorStateCompleted = 1, CommandEncoderErrorStateAffected = 2, CommandEncoderErrorStatePending = 3, CommandEncoderErrorStateFaulted = 4, }; class CommandBufferDescriptor : public NS::Copying { public: static class CommandBufferDescriptor* alloc(); class CommandBufferDescriptor* init(); bool retainedReferences() const; void setRetainedReferences(bool retainedReferences); MTL::CommandBufferErrorOption errorOptions() const; void setErrorOptions(MTL::CommandBufferErrorOption errorOptions); class LogState* logState() const; void setLogState(const class LogState* logState); }; class CommandBufferEncoderInfo : public NS::Referencing { public: NS::String* label() const; NS::Array* debugSignposts() const; MTL::CommandEncoderErrorState errorState() const; }; _MTL_ENUM(NS::UInteger, DispatchType) { DispatchTypeSerial = 0, DispatchTypeConcurrent = 1, }; class CommandBuffer; using CommandBufferHandler = void (^)(CommandBuffer*); using HandlerFunction = std::function; class CommandBuffer : public NS::Referencing { public: void addScheduledHandler(const HandlerFunction& function); void addCompletedHandler(const HandlerFunction& function); class Device* device() const; class CommandQueue* commandQueue() const; bool retainedReferences() const; MTL::CommandBufferErrorOption errorOptions() const; NS::String* label() const; void setLabel(const NS::String* label); CFTimeInterval kernelStartTime() const; CFTimeInterval kernelEndTime() const; class LogContainer* logs() const; CFTimeInterval GPUStartTime() const; CFTimeInterval GPUEndTime() const; void enqueue(); void commit(); void addScheduledHandler(const MTL::CommandBufferHandler block); void presentDrawable(const class Drawable* drawable); void presentDrawableAtTime(const class Drawable* drawable, CFTimeInterval presentationTime); void presentDrawableAfterMinimumDuration(const class Drawable* drawable, CFTimeInterval duration); void waitUntilScheduled(); void addCompletedHandler(const MTL::CommandBufferHandler block); void waitUntilCompleted(); MTL::CommandBufferStatus status() const; NS::Error* error() const; class BlitCommandEncoder* blitCommandEncoder(); class RenderCommandEncoder* renderCommandEncoder(const class RenderPassDescriptor* renderPassDescriptor); class ComputeCommandEncoder* computeCommandEncoder(const class ComputePassDescriptor* computePassDescriptor); class BlitCommandEncoder* blitCommandEncoder(const class BlitPassDescriptor* blitPassDescriptor); class ComputeCommandEncoder* computeCommandEncoder(); class ComputeCommandEncoder* computeCommandEncoder(MTL::DispatchType dispatchType); void encodeWait(const class Event* event, uint64_t value); void encodeSignalEvent(const class Event* event, uint64_t value); class ParallelRenderCommandEncoder* parallelRenderCommandEncoder(const class RenderPassDescriptor* renderPassDescriptor); class ResourceStateCommandEncoder* resourceStateCommandEncoder(); class ResourceStateCommandEncoder* resourceStateCommandEncoder(const class ResourceStatePassDescriptor* resourceStatePassDescriptor); class AccelerationStructureCommandEncoder* accelerationStructureCommandEncoder(); class AccelerationStructureCommandEncoder* accelerationStructureCommandEncoder(const class AccelerationStructurePassDescriptor* descriptor); void pushDebugGroup(const NS::String* string); void popDebugGroup(); void useResidencySet(const class ResidencySet* residencySet); void useResidencySets(const class ResidencySet* const residencySets[], NS::UInteger count); }; } _MTL_INLINE MTL::CommandBufferDescriptor* MTL::CommandBufferDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLCommandBufferDescriptor)); } _MTL_INLINE MTL::CommandBufferDescriptor* MTL::CommandBufferDescriptor::init() { return NS::Object::init(); } _MTL_INLINE bool MTL::CommandBufferDescriptor::retainedReferences() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(retainedReferences)); } _MTL_INLINE void MTL::CommandBufferDescriptor::setRetainedReferences(bool retainedReferences) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRetainedReferences_), retainedReferences); } _MTL_INLINE MTL::CommandBufferErrorOption MTL::CommandBufferDescriptor::errorOptions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(errorOptions)); } _MTL_INLINE void MTL::CommandBufferDescriptor::setErrorOptions(MTL::CommandBufferErrorOption errorOptions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setErrorOptions_), errorOptions); } _MTL_INLINE MTL::LogState* MTL::CommandBufferDescriptor::logState() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(logState)); } _MTL_INLINE void MTL::CommandBufferDescriptor::setLogState(const MTL::LogState* logState) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLogState_), logState); } _MTL_INLINE NS::String* MTL::CommandBufferEncoderInfo::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE NS::Array* MTL::CommandBufferEncoderInfo::debugSignposts() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(debugSignposts)); } _MTL_INLINE MTL::CommandEncoderErrorState MTL::CommandBufferEncoderInfo::errorState() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(errorState)); } _MTL_INLINE void MTL::CommandBuffer::addScheduledHandler(const HandlerFunction& function) { __block HandlerFunction blockFunction = function; addScheduledHandler(^(MTL::CommandBuffer* pCommandBuffer) { blockFunction(pCommandBuffer); }); } _MTL_INLINE void MTL::CommandBuffer::addCompletedHandler(const HandlerFunction& function) { __block HandlerFunction blockFunction = function; addCompletedHandler(^(MTL::CommandBuffer* pCommandBuffer) { blockFunction(pCommandBuffer); }); } _MTL_INLINE MTL::Device* MTL::CommandBuffer::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE MTL::CommandQueue* MTL::CommandBuffer::commandQueue() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(commandQueue)); } _MTL_INLINE bool MTL::CommandBuffer::retainedReferences() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(retainedReferences)); } _MTL_INLINE MTL::CommandBufferErrorOption MTL::CommandBuffer::errorOptions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(errorOptions)); } _MTL_INLINE NS::String* MTL::CommandBuffer::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::CommandBuffer::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE CFTimeInterval MTL::CommandBuffer::kernelStartTime() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(kernelStartTime)); } _MTL_INLINE CFTimeInterval MTL::CommandBuffer::kernelEndTime() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(kernelEndTime)); } _MTL_INLINE MTL::LogContainer* MTL::CommandBuffer::logs() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(logs)); } _MTL_INLINE CFTimeInterval MTL::CommandBuffer::GPUStartTime() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(GPUStartTime)); } _MTL_INLINE CFTimeInterval MTL::CommandBuffer::GPUEndTime() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(GPUEndTime)); } _MTL_INLINE void MTL::CommandBuffer::enqueue() { Object::sendMessage(this, _MTL_PRIVATE_SEL(enqueue)); } _MTL_INLINE void MTL::CommandBuffer::commit() { Object::sendMessage(this, _MTL_PRIVATE_SEL(commit)); } _MTL_INLINE void MTL::CommandBuffer::addScheduledHandler(const MTL::CommandBufferHandler block) { Object::sendMessage(this, _MTL_PRIVATE_SEL(addScheduledHandler_), block); } _MTL_INLINE void MTL::CommandBuffer::presentDrawable(const MTL::Drawable* drawable) { Object::sendMessage(this, _MTL_PRIVATE_SEL(presentDrawable_), drawable); } _MTL_INLINE void MTL::CommandBuffer::presentDrawableAtTime(const MTL::Drawable* drawable, CFTimeInterval presentationTime) { Object::sendMessage(this, _MTL_PRIVATE_SEL(presentDrawable_atTime_), drawable, presentationTime); } _MTL_INLINE void MTL::CommandBuffer::presentDrawableAfterMinimumDuration(const MTL::Drawable* drawable, CFTimeInterval duration) { Object::sendMessage(this, _MTL_PRIVATE_SEL(presentDrawable_afterMinimumDuration_), drawable, duration); } _MTL_INLINE void MTL::CommandBuffer::waitUntilScheduled() { Object::sendMessage(this, _MTL_PRIVATE_SEL(waitUntilScheduled)); } _MTL_INLINE void MTL::CommandBuffer::addCompletedHandler(const MTL::CommandBufferHandler block) { Object::sendMessage(this, _MTL_PRIVATE_SEL(addCompletedHandler_), block); } _MTL_INLINE void MTL::CommandBuffer::waitUntilCompleted() { Object::sendMessage(this, _MTL_PRIVATE_SEL(waitUntilCompleted)); } _MTL_INLINE MTL::CommandBufferStatus MTL::CommandBuffer::status() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(status)); } _MTL_INLINE NS::Error* MTL::CommandBuffer::error() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(error)); } _MTL_INLINE MTL::BlitCommandEncoder* MTL::CommandBuffer::blitCommandEncoder() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(blitCommandEncoder)); } _MTL_INLINE MTL::RenderCommandEncoder* MTL::CommandBuffer::renderCommandEncoder(const MTL::RenderPassDescriptor* renderPassDescriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(renderCommandEncoderWithDescriptor_), renderPassDescriptor); } _MTL_INLINE MTL::ComputeCommandEncoder* MTL::CommandBuffer::computeCommandEncoder(const MTL::ComputePassDescriptor* computePassDescriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(computeCommandEncoderWithDescriptor_), computePassDescriptor); } _MTL_INLINE MTL::BlitCommandEncoder* MTL::CommandBuffer::blitCommandEncoder(const MTL::BlitPassDescriptor* blitPassDescriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(blitCommandEncoderWithDescriptor_), blitPassDescriptor); } _MTL_INLINE MTL::ComputeCommandEncoder* MTL::CommandBuffer::computeCommandEncoder() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(computeCommandEncoder)); } _MTL_INLINE MTL::ComputeCommandEncoder* MTL::CommandBuffer::computeCommandEncoder(MTL::DispatchType dispatchType) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(computeCommandEncoderWithDispatchType_), dispatchType); } _MTL_INLINE void MTL::CommandBuffer::encodeWait(const MTL::Event* event, uint64_t value) { Object::sendMessage(this, _MTL_PRIVATE_SEL(encodeWaitForEvent_value_), event, value); } _MTL_INLINE void MTL::CommandBuffer::encodeSignalEvent(const MTL::Event* event, uint64_t value) { Object::sendMessage(this, _MTL_PRIVATE_SEL(encodeSignalEvent_value_), event, value); } _MTL_INLINE MTL::ParallelRenderCommandEncoder* MTL::CommandBuffer::parallelRenderCommandEncoder(const MTL::RenderPassDescriptor* renderPassDescriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(parallelRenderCommandEncoderWithDescriptor_), renderPassDescriptor); } _MTL_INLINE MTL::ResourceStateCommandEncoder* MTL::CommandBuffer::resourceStateCommandEncoder() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(resourceStateCommandEncoder)); } _MTL_INLINE MTL::ResourceStateCommandEncoder* MTL::CommandBuffer::resourceStateCommandEncoder(const MTL::ResourceStatePassDescriptor* resourceStatePassDescriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(resourceStateCommandEncoderWithDescriptor_), resourceStatePassDescriptor); } _MTL_INLINE MTL::AccelerationStructureCommandEncoder* MTL::CommandBuffer::accelerationStructureCommandEncoder() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(accelerationStructureCommandEncoder)); } _MTL_INLINE MTL::AccelerationStructureCommandEncoder* MTL::CommandBuffer::accelerationStructureCommandEncoder(const MTL::AccelerationStructurePassDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(accelerationStructureCommandEncoderWithDescriptor_), descriptor); } _MTL_INLINE void MTL::CommandBuffer::pushDebugGroup(const NS::String* string) { Object::sendMessage(this, _MTL_PRIVATE_SEL(pushDebugGroup_), string); } _MTL_INLINE void MTL::CommandBuffer::popDebugGroup() { Object::sendMessage(this, _MTL_PRIVATE_SEL(popDebugGroup)); } _MTL_INLINE void MTL::CommandBuffer::useResidencySet(const MTL::ResidencySet* residencySet) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useResidencySet_), residencySet); } _MTL_INLINE void MTL::CommandBuffer::useResidencySets(const MTL::ResidencySet* const residencySets[], NS::UInteger count) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useResidencySets_count_), residencySets, count); } #pragma once namespace MTL { class CommandQueue : public NS::Referencing { public: NS::String* label() const; void setLabel(const NS::String* label); class Device* device() const; class CommandBuffer* commandBuffer(); class CommandBuffer* commandBuffer(const class CommandBufferDescriptor* descriptor); class CommandBuffer* commandBufferWithUnretainedReferences(); void insertDebugCaptureBoundary(); void addResidencySet(const class ResidencySet* residencySet); void addResidencySets(const class ResidencySet* const residencySets[], NS::UInteger count); void removeResidencySet(const class ResidencySet* residencySet); void removeResidencySets(const class ResidencySet* const residencySets[], NS::UInteger count); }; class CommandQueueDescriptor : public NS::Copying { public: static class CommandQueueDescriptor* alloc(); class CommandQueueDescriptor* init(); NS::UInteger maxCommandBufferCount() const; void setMaxCommandBufferCount(NS::UInteger maxCommandBufferCount); class LogState* logState() const; void setLogState(const class LogState* logState); }; } _MTL_INLINE NS::String* MTL::CommandQueue::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::CommandQueue::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::Device* MTL::CommandQueue::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE MTL::CommandBuffer* MTL::CommandQueue::commandBuffer() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(commandBuffer)); } _MTL_INLINE MTL::CommandBuffer* MTL::CommandQueue::commandBuffer(const MTL::CommandBufferDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(commandBufferWithDescriptor_), descriptor); } _MTL_INLINE MTL::CommandBuffer* MTL::CommandQueue::commandBufferWithUnretainedReferences() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(commandBufferWithUnretainedReferences)); } _MTL_INLINE void MTL::CommandQueue::insertDebugCaptureBoundary() { Object::sendMessage(this, _MTL_PRIVATE_SEL(insertDebugCaptureBoundary)); } _MTL_INLINE void MTL::CommandQueue::addResidencySet(const MTL::ResidencySet* residencySet) { Object::sendMessage(this, _MTL_PRIVATE_SEL(addResidencySet_), residencySet); } _MTL_INLINE void MTL::CommandQueue::addResidencySets(const MTL::ResidencySet* const residencySets[], NS::UInteger count) { Object::sendMessage(this, _MTL_PRIVATE_SEL(addResidencySets_count_), residencySets, count); } _MTL_INLINE void MTL::CommandQueue::removeResidencySet(const MTL::ResidencySet* residencySet) { Object::sendMessage(this, _MTL_PRIVATE_SEL(removeResidencySet_), residencySet); } _MTL_INLINE void MTL::CommandQueue::removeResidencySets(const MTL::ResidencySet* const residencySets[], NS::UInteger count) { Object::sendMessage(this, _MTL_PRIVATE_SEL(removeResidencySets_count_), residencySets, count); } _MTL_INLINE MTL::CommandQueueDescriptor* MTL::CommandQueueDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLCommandQueueDescriptor)); } _MTL_INLINE MTL::CommandQueueDescriptor* MTL::CommandQueueDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::UInteger MTL::CommandQueueDescriptor::maxCommandBufferCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxCommandBufferCount)); } _MTL_INLINE void MTL::CommandQueueDescriptor::setMaxCommandBufferCount(NS::UInteger maxCommandBufferCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxCommandBufferCount_), maxCommandBufferCount); } _MTL_INLINE MTL::LogState* MTL::CommandQueueDescriptor::logState() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(logState)); } _MTL_INLINE void MTL::CommandQueueDescriptor::setLogState(const MTL::LogState* logState) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLogState_), logState); } #pragma once namespace MTL { struct DispatchThreadgroupsIndirectArguments { uint32_t threadgroupsPerGrid[3]; } _MTL_PACKED; struct StageInRegionIndirectArguments { uint32_t stageInOrigin[3]; uint32_t stageInSize[3]; } _MTL_PACKED; class ComputeCommandEncoder : public NS::Referencing { public: MTL::DispatchType dispatchType() const; void setComputePipelineState(const class ComputePipelineState* state); void setBytes(const void* bytes, NS::UInteger length, NS::UInteger index); void setBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index); void setBufferOffset(NS::UInteger offset, NS::UInteger index); void setBuffers(const class Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range); void setBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index); void setBuffers(const class Buffer* const buffers[], const NS::UInteger* offsets, const NS::UInteger* strides, NS::Range range); void setBufferOffset(NS::UInteger offset, NS::UInteger stride, NS::UInteger index); void setBytes(const void* bytes, NS::UInteger length, NS::UInteger stride, NS::UInteger index); void setVisibleFunctionTable(const class VisibleFunctionTable* visibleFunctionTable, NS::UInteger bufferIndex); void setVisibleFunctionTables(const class VisibleFunctionTable* const visibleFunctionTables[], NS::Range range); void setIntersectionFunctionTable(const class IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex); void setIntersectionFunctionTables(const class IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range); void setAccelerationStructure(const class AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex); void setTexture(const class Texture* texture, NS::UInteger index); void setTextures(const class Texture* const textures[], NS::Range range); void setSamplerState(const class SamplerState* sampler, NS::UInteger index); void setSamplerStates(const class SamplerState* const samplers[], NS::Range range); void setSamplerState(const class SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index); void setSamplerStates(const class SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range); void setThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index); void setImageblockWidth(NS::UInteger width, NS::UInteger height); void setStageInRegion(MTL::Region region); void setStageInRegion(const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset); void dispatchThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerThreadgroup); void dispatchThreadgroups(const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset, MTL::Size threadsPerThreadgroup); void dispatchThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerThreadgroup); void updateFence(const class Fence* fence); void waitForFence(const class Fence* fence); void useResource(const class Resource* resource, MTL::ResourceUsage usage); void useResources(const class Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage); void useHeap(const class Heap* heap); void useHeaps(const class Heap* const heaps[], NS::UInteger count); void executeCommandsInBuffer(const class IndirectCommandBuffer* indirectCommandBuffer, NS::Range executionRange); void executeCommandsInBuffer(const class IndirectCommandBuffer* indirectCommandbuffer, const class Buffer* indirectRangeBuffer, NS::UInteger indirectBufferOffset); void memoryBarrier(MTL::BarrierScope scope); void memoryBarrier(const class Resource* const resources[], NS::UInteger count); void sampleCountersInBuffer(const class CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier); }; } _MTL_INLINE MTL::DispatchType MTL::ComputeCommandEncoder::dispatchType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(dispatchType)); } _MTL_INLINE void MTL::ComputeCommandEncoder::setComputePipelineState(const MTL::ComputePipelineState* state) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setComputePipelineState_), state); } _MTL_INLINE void MTL::ComputeCommandEncoder::setBytes(const void* bytes, NS::UInteger length, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBytes_length_atIndex_), bytes, length, index); } _MTL_INLINE void MTL::ComputeCommandEncoder::setBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBuffer_offset_atIndex_), buffer, offset, index); } _MTL_INLINE void MTL::ComputeCommandEncoder::setBufferOffset(NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBufferOffset_atIndex_), offset, index); } _MTL_INLINE void MTL::ComputeCommandEncoder::setBuffers(const MTL::Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBuffers_offsets_withRange_), buffers, offsets, range); } _MTL_INLINE void MTL::ComputeCommandEncoder::setBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBuffer_offset_attributeStride_atIndex_), buffer, offset, stride, index); } _MTL_INLINE void MTL::ComputeCommandEncoder::setBuffers(const MTL::Buffer* const buffers[], const NS::UInteger* offsets, const NS::UInteger* strides, NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBuffers_offsets_attributeStrides_withRange_), buffers, offsets, strides, range); } _MTL_INLINE void MTL::ComputeCommandEncoder::setBufferOffset(NS::UInteger offset, NS::UInteger stride, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBufferOffset_attributeStride_atIndex_), offset, stride, index); } _MTL_INLINE void MTL::ComputeCommandEncoder::setBytes(const void* bytes, NS::UInteger length, NS::UInteger stride, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBytes_length_attributeStride_atIndex_), bytes, length, stride, index); } _MTL_INLINE void MTL::ComputeCommandEncoder::setVisibleFunctionTable(const MTL::VisibleFunctionTable* visibleFunctionTable, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVisibleFunctionTable_atBufferIndex_), visibleFunctionTable, bufferIndex); } _MTL_INLINE void MTL::ComputeCommandEncoder::setVisibleFunctionTables(const MTL::VisibleFunctionTable* const visibleFunctionTables[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVisibleFunctionTables_withBufferRange_), visibleFunctionTables, range); } _MTL_INLINE void MTL::ComputeCommandEncoder::setIntersectionFunctionTable(const MTL::IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIntersectionFunctionTable_atBufferIndex_), intersectionFunctionTable, bufferIndex); } _MTL_INLINE void MTL::ComputeCommandEncoder::setIntersectionFunctionTables(const MTL::IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIntersectionFunctionTables_withBufferRange_), intersectionFunctionTables, range); } _MTL_INLINE void MTL::ComputeCommandEncoder::setAccelerationStructure(const MTL::AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setAccelerationStructure_atBufferIndex_), accelerationStructure, bufferIndex); } _MTL_INLINE void MTL::ComputeCommandEncoder::setTexture(const MTL::Texture* texture, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTexture_atIndex_), texture, index); } _MTL_INLINE void MTL::ComputeCommandEncoder::setTextures(const MTL::Texture* const textures[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTextures_withRange_), textures, range); } _MTL_INLINE void MTL::ComputeCommandEncoder::setSamplerState(const MTL::SamplerState* sampler, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSamplerState_atIndex_), sampler, index); } _MTL_INLINE void MTL::ComputeCommandEncoder::setSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSamplerStates_withRange_), samplers, range); } _MTL_INLINE void MTL::ComputeCommandEncoder::setSamplerState(const MTL::SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSamplerState_lodMinClamp_lodMaxClamp_atIndex_), sampler, lodMinClamp, lodMaxClamp, index); } _MTL_INLINE void MTL::ComputeCommandEncoder::setSamplerStates(const MTL::SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSamplerStates_lodMinClamps_lodMaxClamps_withRange_), samplers, lodMinClamps, lodMaxClamps, range); } _MTL_INLINE void MTL::ComputeCommandEncoder::setThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setThreadgroupMemoryLength_atIndex_), length, index); } _MTL_INLINE void MTL::ComputeCommandEncoder::setImageblockWidth(NS::UInteger width, NS::UInteger height) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setImageblockWidth_height_), width, height); } _MTL_INLINE void MTL::ComputeCommandEncoder::setStageInRegion(MTL::Region region) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStageInRegion_), region); } _MTL_INLINE void MTL::ComputeCommandEncoder::setStageInRegion(const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStageInRegionWithIndirectBuffer_indirectBufferOffset_), indirectBuffer, indirectBufferOffset); } _MTL_INLINE void MTL::ComputeCommandEncoder::dispatchThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(dispatchThreadgroups_threadsPerThreadgroup_), threadgroupsPerGrid, threadsPerThreadgroup); } _MTL_INLINE void MTL::ComputeCommandEncoder::dispatchThreadgroups(const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset, MTL::Size threadsPerThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(dispatchThreadgroupsWithIndirectBuffer_indirectBufferOffset_threadsPerThreadgroup_), indirectBuffer, indirectBufferOffset, threadsPerThreadgroup); } _MTL_INLINE void MTL::ComputeCommandEncoder::dispatchThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(dispatchThreads_threadsPerThreadgroup_), threadsPerGrid, threadsPerThreadgroup); } _MTL_INLINE void MTL::ComputeCommandEncoder::updateFence(const MTL::Fence* fence) { Object::sendMessage(this, _MTL_PRIVATE_SEL(updateFence_), fence); } _MTL_INLINE void MTL::ComputeCommandEncoder::waitForFence(const MTL::Fence* fence) { Object::sendMessage(this, _MTL_PRIVATE_SEL(waitForFence_), fence); } _MTL_INLINE void MTL::ComputeCommandEncoder::useResource(const MTL::Resource* resource, MTL::ResourceUsage usage) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useResource_usage_), resource, usage); } _MTL_INLINE void MTL::ComputeCommandEncoder::useResources(const MTL::Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useResources_count_usage_), resources, count, usage); } _MTL_INLINE void MTL::ComputeCommandEncoder::useHeap(const MTL::Heap* heap) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useHeap_), heap); } _MTL_INLINE void MTL::ComputeCommandEncoder::useHeaps(const MTL::Heap* const heaps[], NS::UInteger count) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useHeaps_count_), heaps, count); } _MTL_INLINE void MTL::ComputeCommandEncoder::executeCommandsInBuffer(const MTL::IndirectCommandBuffer* indirectCommandBuffer, NS::Range executionRange) { Object::sendMessage(this, _MTL_PRIVATE_SEL(executeCommandsInBuffer_withRange_), indirectCommandBuffer, executionRange); } _MTL_INLINE void MTL::ComputeCommandEncoder::executeCommandsInBuffer(const MTL::IndirectCommandBuffer* indirectCommandbuffer, const MTL::Buffer* indirectRangeBuffer, NS::UInteger indirectBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(executeCommandsInBuffer_indirectBuffer_indirectBufferOffset_), indirectCommandbuffer, indirectRangeBuffer, indirectBufferOffset); } _MTL_INLINE void MTL::ComputeCommandEncoder::memoryBarrier(MTL::BarrierScope scope) { Object::sendMessage(this, _MTL_PRIVATE_SEL(memoryBarrierWithScope_), scope); } _MTL_INLINE void MTL::ComputeCommandEncoder::memoryBarrier(const MTL::Resource* const resources[], NS::UInteger count) { Object::sendMessage(this, _MTL_PRIVATE_SEL(memoryBarrierWithResources_count_), resources, count); } _MTL_INLINE void MTL::ComputeCommandEncoder::sampleCountersInBuffer(const MTL::CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier) { Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleCountersInBuffer_atSampleIndex_withBarrier_), sampleBuffer, sampleIndex, barrier); } #pragma once namespace MTL { class ComputePassSampleBufferAttachmentDescriptor : public NS::Copying { public: static class ComputePassSampleBufferAttachmentDescriptor* alloc(); class ComputePassSampleBufferAttachmentDescriptor* init(); class CounterSampleBuffer* sampleBuffer() const; void setSampleBuffer(const class CounterSampleBuffer* sampleBuffer); NS::UInteger startOfEncoderSampleIndex() const; void setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex); NS::UInteger endOfEncoderSampleIndex() const; void setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex); }; class ComputePassSampleBufferAttachmentDescriptorArray : public NS::Referencing { public: static class ComputePassSampleBufferAttachmentDescriptorArray* alloc(); class ComputePassSampleBufferAttachmentDescriptorArray* init(); class ComputePassSampleBufferAttachmentDescriptor* object(NS::UInteger attachmentIndex); void setObject(const class ComputePassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex); }; class ComputePassDescriptor : public NS::Copying { public: static class ComputePassDescriptor* alloc(); class ComputePassDescriptor* init(); static class ComputePassDescriptor* computePassDescriptor(); MTL::DispatchType dispatchType() const; void setDispatchType(MTL::DispatchType dispatchType); class ComputePassSampleBufferAttachmentDescriptorArray* sampleBufferAttachments() const; }; } _MTL_INLINE MTL::ComputePassSampleBufferAttachmentDescriptor* MTL::ComputePassSampleBufferAttachmentDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLComputePassSampleBufferAttachmentDescriptor)); } _MTL_INLINE MTL::ComputePassSampleBufferAttachmentDescriptor* MTL::ComputePassSampleBufferAttachmentDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::CounterSampleBuffer* MTL::ComputePassSampleBufferAttachmentDescriptor::sampleBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleBuffer)); } _MTL_INLINE void MTL::ComputePassSampleBufferAttachmentDescriptor::setSampleBuffer(const MTL::CounterSampleBuffer* sampleBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSampleBuffer_), sampleBuffer); } _MTL_INLINE NS::UInteger MTL::ComputePassSampleBufferAttachmentDescriptor::startOfEncoderSampleIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(startOfEncoderSampleIndex)); } _MTL_INLINE void MTL::ComputePassSampleBufferAttachmentDescriptor::setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStartOfEncoderSampleIndex_), startOfEncoderSampleIndex); } _MTL_INLINE NS::UInteger MTL::ComputePassSampleBufferAttachmentDescriptor::endOfEncoderSampleIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(endOfEncoderSampleIndex)); } _MTL_INLINE void MTL::ComputePassSampleBufferAttachmentDescriptor::setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setEndOfEncoderSampleIndex_), endOfEncoderSampleIndex); } _MTL_INLINE MTL::ComputePassSampleBufferAttachmentDescriptorArray* MTL::ComputePassSampleBufferAttachmentDescriptorArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLComputePassSampleBufferAttachmentDescriptorArray)); } _MTL_INLINE MTL::ComputePassSampleBufferAttachmentDescriptorArray* MTL::ComputePassSampleBufferAttachmentDescriptorArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::ComputePassSampleBufferAttachmentDescriptor* MTL::ComputePassSampleBufferAttachmentDescriptorArray::object(NS::UInteger attachmentIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex); } _MTL_INLINE void MTL::ComputePassSampleBufferAttachmentDescriptorArray::setObject(const MTL::ComputePassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex); } _MTL_INLINE MTL::ComputePassDescriptor* MTL::ComputePassDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLComputePassDescriptor)); } _MTL_INLINE MTL::ComputePassDescriptor* MTL::ComputePassDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::ComputePassDescriptor* MTL::ComputePassDescriptor::computePassDescriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLComputePassDescriptor), _MTL_PRIVATE_SEL(computePassDescriptor)); } _MTL_INLINE MTL::DispatchType MTL::ComputePassDescriptor::dispatchType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(dispatchType)); } _MTL_INLINE void MTL::ComputePassDescriptor::setDispatchType(MTL::DispatchType dispatchType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDispatchType_), dispatchType); } _MTL_INLINE MTL::ComputePassSampleBufferAttachmentDescriptorArray* MTL::ComputePassDescriptor::sampleBufferAttachments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleBufferAttachments)); } #pragma once #pragma once namespace MTL { _MTL_ENUM(NS::UInteger, Mutability) { MutabilityDefault = 0, MutabilityMutable = 1, MutabilityImmutable = 2, }; _MTL_ENUM(NS::Integer, ShaderValidation) { ShaderValidationDefault = 0, ShaderValidationEnabled = 1, ShaderValidationDisabled = 2, }; class PipelineBufferDescriptor : public NS::Copying { public: static class PipelineBufferDescriptor* alloc(); class PipelineBufferDescriptor* init(); MTL::Mutability mutability() const; void setMutability(MTL::Mutability mutability); }; class PipelineBufferDescriptorArray : public NS::Referencing { public: static class PipelineBufferDescriptorArray* alloc(); class PipelineBufferDescriptorArray* init(); class PipelineBufferDescriptor* object(NS::UInteger bufferIndex); void setObject(const class PipelineBufferDescriptor* buffer, NS::UInteger bufferIndex); }; } _MTL_INLINE MTL::PipelineBufferDescriptor* MTL::PipelineBufferDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLPipelineBufferDescriptor)); } _MTL_INLINE MTL::PipelineBufferDescriptor* MTL::PipelineBufferDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::Mutability MTL::PipelineBufferDescriptor::mutability() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(mutability)); } _MTL_INLINE void MTL::PipelineBufferDescriptor::setMutability(MTL::Mutability mutability) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMutability_), mutability); } _MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::PipelineBufferDescriptorArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLPipelineBufferDescriptorArray)); } _MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::PipelineBufferDescriptorArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::PipelineBufferDescriptor* MTL::PipelineBufferDescriptorArray::object(NS::UInteger bufferIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), bufferIndex); } _MTL_INLINE void MTL::PipelineBufferDescriptorArray::setObject(const MTL::PipelineBufferDescriptor* buffer, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), buffer, bufferIndex); } namespace MTL { class ComputePipelineReflection : public NS::Referencing { public: static class ComputePipelineReflection* alloc(); class ComputePipelineReflection* init(); NS::Array* bindings() const; NS::Array* arguments() const; }; class ComputePipelineDescriptor : public NS::Copying { public: static class ComputePipelineDescriptor* alloc(); class ComputePipelineDescriptor* init(); NS::String* label() const; void setLabel(const NS::String* label); class Function* computeFunction() const; void setComputeFunction(const class Function* computeFunction); bool threadGroupSizeIsMultipleOfThreadExecutionWidth() const; void setThreadGroupSizeIsMultipleOfThreadExecutionWidth(bool threadGroupSizeIsMultipleOfThreadExecutionWidth); NS::UInteger maxTotalThreadsPerThreadgroup() const; void setMaxTotalThreadsPerThreadgroup(NS::UInteger maxTotalThreadsPerThreadgroup); class StageInputOutputDescriptor* stageInputDescriptor() const; void setStageInputDescriptor(const class StageInputOutputDescriptor* stageInputDescriptor); class PipelineBufferDescriptorArray* buffers() const; bool supportIndirectCommandBuffers() const; void setSupportIndirectCommandBuffers(bool supportIndirectCommandBuffers); NS::Array* insertLibraries() const; void setInsertLibraries(const NS::Array* insertLibraries); NS::Array* preloadedLibraries() const; void setPreloadedLibraries(const NS::Array* preloadedLibraries); NS::Array* binaryArchives() const; void setBinaryArchives(const NS::Array* binaryArchives); void reset(); class LinkedFunctions* linkedFunctions() const; void setLinkedFunctions(const class LinkedFunctions* linkedFunctions); bool supportAddingBinaryFunctions() const; void setSupportAddingBinaryFunctions(bool supportAddingBinaryFunctions); NS::UInteger maxCallStackDepth() const; void setMaxCallStackDepth(NS::UInteger maxCallStackDepth); MTL::ShaderValidation shaderValidation() const; void setShaderValidation(MTL::ShaderValidation shaderValidation); }; class ComputePipelineState : public NS::Referencing { public: NS::String* label() const; class Device* device() const; NS::UInteger maxTotalThreadsPerThreadgroup() const; NS::UInteger threadExecutionWidth() const; NS::UInteger staticThreadgroupMemoryLength() const; NS::UInteger imageblockMemoryLength(MTL::Size imageblockDimensions); bool supportIndirectCommandBuffers() const; MTL::ResourceID gpuResourceID() const; class FunctionHandle* functionHandle(const class Function* function); class ComputePipelineState* newComputePipelineState(const NS::Array* functions, NS::Error** error); class VisibleFunctionTable* newVisibleFunctionTable(const class VisibleFunctionTableDescriptor* descriptor); class IntersectionFunctionTable* newIntersectionFunctionTable(const class IntersectionFunctionTableDescriptor* descriptor); MTL::ShaderValidation shaderValidation() const; }; } _MTL_INLINE MTL::ComputePipelineReflection* MTL::ComputePipelineReflection::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLComputePipelineReflection)); } _MTL_INLINE MTL::ComputePipelineReflection* MTL::ComputePipelineReflection::init() { return NS::Object::init(); } _MTL_INLINE NS::Array* MTL::ComputePipelineReflection::bindings() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bindings)); } _MTL_INLINE NS::Array* MTL::ComputePipelineReflection::arguments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(arguments)); } _MTL_INLINE MTL::ComputePipelineDescriptor* MTL::ComputePipelineDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLComputePipelineDescriptor)); } _MTL_INLINE MTL::ComputePipelineDescriptor* MTL::ComputePipelineDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::ComputePipelineDescriptor::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::ComputePipelineDescriptor::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::Function* MTL::ComputePipelineDescriptor::computeFunction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(computeFunction)); } _MTL_INLINE void MTL::ComputePipelineDescriptor::setComputeFunction(const MTL::Function* computeFunction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setComputeFunction_), computeFunction); } _MTL_INLINE bool MTL::ComputePipelineDescriptor::threadGroupSizeIsMultipleOfThreadExecutionWidth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(threadGroupSizeIsMultipleOfThreadExecutionWidth)); } _MTL_INLINE void MTL::ComputePipelineDescriptor::setThreadGroupSizeIsMultipleOfThreadExecutionWidth(bool threadGroupSizeIsMultipleOfThreadExecutionWidth) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setThreadGroupSizeIsMultipleOfThreadExecutionWidth_), threadGroupSizeIsMultipleOfThreadExecutionWidth); } _MTL_INLINE NS::UInteger MTL::ComputePipelineDescriptor::maxTotalThreadsPerThreadgroup() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerThreadgroup)); } _MTL_INLINE void MTL::ComputePipelineDescriptor::setMaxTotalThreadsPerThreadgroup(NS::UInteger maxTotalThreadsPerThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxTotalThreadsPerThreadgroup_), maxTotalThreadsPerThreadgroup); } _MTL_INLINE MTL::StageInputOutputDescriptor* MTL::ComputePipelineDescriptor::stageInputDescriptor() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stageInputDescriptor)); } _MTL_INLINE void MTL::ComputePipelineDescriptor::setStageInputDescriptor(const MTL::StageInputOutputDescriptor* stageInputDescriptor) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStageInputDescriptor_), stageInputDescriptor); } _MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::ComputePipelineDescriptor::buffers() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(buffers)); } _MTL_INLINE bool MTL::ComputePipelineDescriptor::supportIndirectCommandBuffers() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportIndirectCommandBuffers)); } _MTL_INLINE void MTL::ComputePipelineDescriptor::setSupportIndirectCommandBuffers(bool supportIndirectCommandBuffers) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSupportIndirectCommandBuffers_), supportIndirectCommandBuffers); } _MTL_INLINE NS::Array* MTL::ComputePipelineDescriptor::insertLibraries() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(insertLibraries)); } _MTL_INLINE void MTL::ComputePipelineDescriptor::setInsertLibraries(const NS::Array* insertLibraries) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInsertLibraries_), insertLibraries); } _MTL_INLINE NS::Array* MTL::ComputePipelineDescriptor::preloadedLibraries() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(preloadedLibraries)); } _MTL_INLINE void MTL::ComputePipelineDescriptor::setPreloadedLibraries(const NS::Array* preloadedLibraries) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPreloadedLibraries_), preloadedLibraries); } _MTL_INLINE NS::Array* MTL::ComputePipelineDescriptor::binaryArchives() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(binaryArchives)); } _MTL_INLINE void MTL::ComputePipelineDescriptor::setBinaryArchives(const NS::Array* binaryArchives) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBinaryArchives_), binaryArchives); } _MTL_INLINE void MTL::ComputePipelineDescriptor::reset() { Object::sendMessage(this, _MTL_PRIVATE_SEL(reset)); } _MTL_INLINE MTL::LinkedFunctions* MTL::ComputePipelineDescriptor::linkedFunctions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(linkedFunctions)); } _MTL_INLINE void MTL::ComputePipelineDescriptor::setLinkedFunctions(const MTL::LinkedFunctions* linkedFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLinkedFunctions_), linkedFunctions); } _MTL_INLINE bool MTL::ComputePipelineDescriptor::supportAddingBinaryFunctions() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportAddingBinaryFunctions)); } _MTL_INLINE void MTL::ComputePipelineDescriptor::setSupportAddingBinaryFunctions(bool supportAddingBinaryFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSupportAddingBinaryFunctions_), supportAddingBinaryFunctions); } _MTL_INLINE NS::UInteger MTL::ComputePipelineDescriptor::maxCallStackDepth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxCallStackDepth)); } _MTL_INLINE void MTL::ComputePipelineDescriptor::setMaxCallStackDepth(NS::UInteger maxCallStackDepth) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxCallStackDepth_), maxCallStackDepth); } _MTL_INLINE MTL::ShaderValidation MTL::ComputePipelineDescriptor::shaderValidation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(shaderValidation)); } _MTL_INLINE void MTL::ComputePipelineDescriptor::setShaderValidation(MTL::ShaderValidation shaderValidation) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setShaderValidation_), shaderValidation); } _MTL_INLINE NS::String* MTL::ComputePipelineState::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE MTL::Device* MTL::ComputePipelineState::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE NS::UInteger MTL::ComputePipelineState::maxTotalThreadsPerThreadgroup() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerThreadgroup)); } _MTL_INLINE NS::UInteger MTL::ComputePipelineState::threadExecutionWidth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(threadExecutionWidth)); } _MTL_INLINE NS::UInteger MTL::ComputePipelineState::staticThreadgroupMemoryLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(staticThreadgroupMemoryLength)); } _MTL_INLINE NS::UInteger MTL::ComputePipelineState::imageblockMemoryLength(MTL::Size imageblockDimensions) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(imageblockMemoryLengthForDimensions_), imageblockDimensions); } _MTL_INLINE bool MTL::ComputePipelineState::supportIndirectCommandBuffers() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportIndirectCommandBuffers)); } _MTL_INLINE MTL::ResourceID MTL::ComputePipelineState::gpuResourceID() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(gpuResourceID)); } _MTL_INLINE MTL::FunctionHandle* MTL::ComputePipelineState::functionHandle(const MTL::Function* function) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(functionHandleWithFunction_), function); } _MTL_INLINE MTL::ComputePipelineState* MTL::ComputePipelineState::newComputePipelineState(const NS::Array* functions, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithAdditionalBinaryFunctions_error_), functions, error); } _MTL_INLINE MTL::VisibleFunctionTable* MTL::ComputePipelineState::newVisibleFunctionTable(const MTL::VisibleFunctionTableDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newVisibleFunctionTableWithDescriptor_), descriptor); } _MTL_INLINE MTL::IntersectionFunctionTable* MTL::ComputePipelineState::newIntersectionFunctionTable(const MTL::IntersectionFunctionTableDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newIntersectionFunctionTableWithDescriptor_), descriptor); } _MTL_INLINE MTL::ShaderValidation MTL::ComputePipelineState::shaderValidation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(shaderValidation)); } #pragma once namespace MTL { struct CounterResultTimestamp { uint64_t timestamp; } _MTL_PACKED; struct CounterResultStageUtilization { uint64_t totalCycles; uint64_t vertexCycles; uint64_t tessellationCycles; uint64_t postTessellationVertexCycles; uint64_t fragmentCycles; uint64_t renderTargetCycles; } _MTL_PACKED; struct CounterResultStatistic { uint64_t tessellationInputPatches; uint64_t vertexInvocations; uint64_t postTessellationVertexInvocations; uint64_t clipperInvocations; uint64_t clipperPrimitivesOut; uint64_t fragmentInvocations; uint64_t fragmentsPassed; uint64_t computeKernelInvocations; } _MTL_PACKED; _MTL_CONST(NS::ErrorDomain, CounterErrorDomain); using CommonCounter = NS::String*; _MTL_CONST(CommonCounter, CommonCounterTimestamp); _MTL_CONST(CommonCounter, CommonCounterTessellationInputPatches); _MTL_CONST(CommonCounter, CommonCounterVertexInvocations); _MTL_CONST(CommonCounter, CommonCounterPostTessellationVertexInvocations); _MTL_CONST(CommonCounter, CommonCounterClipperInvocations); _MTL_CONST(CommonCounter, CommonCounterClipperPrimitivesOut); _MTL_CONST(CommonCounter, CommonCounterFragmentInvocations); _MTL_CONST(CommonCounter, CommonCounterFragmentsPassed); _MTL_CONST(CommonCounter, CommonCounterComputeKernelInvocations); _MTL_CONST(CommonCounter, CommonCounterTotalCycles); _MTL_CONST(CommonCounter, CommonCounterVertexCycles); _MTL_CONST(CommonCounter, CommonCounterTessellationCycles); _MTL_CONST(CommonCounter, CommonCounterPostTessellationVertexCycles); _MTL_CONST(CommonCounter, CommonCounterFragmentCycles); _MTL_CONST(CommonCounter, CommonCounterRenderTargetWriteCycles); using CommonCounterSet = NS::String*; _MTL_CONST(CommonCounterSet, CommonCounterSetTimestamp); _MTL_CONST(CommonCounterSet, CommonCounterSetStageUtilization); _MTL_CONST(CommonCounterSet, CommonCounterSetStatistic); class Counter : public NS::Referencing { public: NS::String* name() const; }; class CounterSet : public NS::Referencing { public: NS::String* name() const; NS::Array* counters() const; }; class CounterSampleBufferDescriptor : public NS::Copying { public: static class CounterSampleBufferDescriptor* alloc(); class CounterSampleBufferDescriptor* init(); class CounterSet* counterSet() const; void setCounterSet(const class CounterSet* counterSet); NS::String* label() const; void setLabel(const NS::String* label); MTL::StorageMode storageMode() const; void setStorageMode(MTL::StorageMode storageMode); NS::UInteger sampleCount() const; void setSampleCount(NS::UInteger sampleCount); }; class CounterSampleBuffer : public NS::Referencing { public: class Device* device() const; NS::String* label() const; NS::UInteger sampleCount() const; NS::Data* resolveCounterRange(NS::Range range); }; _MTL_ENUM(NS::Integer, CounterSampleBufferError) { CounterSampleBufferErrorOutOfMemory = 0, CounterSampleBufferErrorInvalid = 1, CounterSampleBufferErrorInternal = 2, }; static const NS::UInteger CounterErrorValue = static_cast(~0ULL); static const NS::UInteger CounterDontSample = static_cast(-1); } _MTL_PRIVATE_DEF_STR(NS::ErrorDomain, CounterErrorDomain); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterTimestamp); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterTessellationInputPatches); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterVertexInvocations); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterPostTessellationVertexInvocations); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterClipperInvocations); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterClipperPrimitivesOut); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterFragmentInvocations); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterFragmentsPassed); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterComputeKernelInvocations); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterTotalCycles); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterVertexCycles); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterTessellationCycles); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterPostTessellationVertexCycles); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterFragmentCycles); _MTL_PRIVATE_DEF_STR(MTL::CommonCounter, CommonCounterRenderTargetWriteCycles); _MTL_PRIVATE_DEF_STR(MTL::CommonCounterSet, CommonCounterSetTimestamp); _MTL_PRIVATE_DEF_STR(MTL::CommonCounterSet, CommonCounterSetStageUtilization); _MTL_PRIVATE_DEF_STR(MTL::CommonCounterSet, CommonCounterSetStatistic); _MTL_INLINE NS::String* MTL::Counter::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_INLINE NS::String* MTL::CounterSet::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_INLINE NS::Array* MTL::CounterSet::counters() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(counters)); } _MTL_INLINE MTL::CounterSampleBufferDescriptor* MTL::CounterSampleBufferDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLCounterSampleBufferDescriptor)); } _MTL_INLINE MTL::CounterSampleBufferDescriptor* MTL::CounterSampleBufferDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::CounterSet* MTL::CounterSampleBufferDescriptor::counterSet() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(counterSet)); } _MTL_INLINE void MTL::CounterSampleBufferDescriptor::setCounterSet(const MTL::CounterSet* counterSet) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCounterSet_), counterSet); } _MTL_INLINE NS::String* MTL::CounterSampleBufferDescriptor::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::CounterSampleBufferDescriptor::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::StorageMode MTL::CounterSampleBufferDescriptor::storageMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(storageMode)); } _MTL_INLINE void MTL::CounterSampleBufferDescriptor::setStorageMode(MTL::StorageMode storageMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStorageMode_), storageMode); } _MTL_INLINE NS::UInteger MTL::CounterSampleBufferDescriptor::sampleCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleCount)); } _MTL_INLINE void MTL::CounterSampleBufferDescriptor::setSampleCount(NS::UInteger sampleCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSampleCount_), sampleCount); } _MTL_INLINE MTL::Device* MTL::CounterSampleBuffer::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE NS::String* MTL::CounterSampleBuffer::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE NS::UInteger MTL::CounterSampleBuffer::sampleCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleCount)); } _MTL_INLINE NS::Data* MTL::CounterSampleBuffer::resolveCounterRange(NS::Range range) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(resolveCounterRange_), range); } #pragma once namespace MTL { _MTL_ENUM(NS::UInteger, CompareFunction) { CompareFunctionNever = 0, CompareFunctionLess = 1, CompareFunctionEqual = 2, CompareFunctionLessEqual = 3, CompareFunctionGreater = 4, CompareFunctionNotEqual = 5, CompareFunctionGreaterEqual = 6, CompareFunctionAlways = 7, }; _MTL_ENUM(NS::UInteger, StencilOperation) { StencilOperationKeep = 0, StencilOperationZero = 1, StencilOperationReplace = 2, StencilOperationIncrementClamp = 3, StencilOperationDecrementClamp = 4, StencilOperationInvert = 5, StencilOperationIncrementWrap = 6, StencilOperationDecrementWrap = 7, }; class StencilDescriptor : public NS::Copying { public: static class StencilDescriptor* alloc(); class StencilDescriptor* init(); MTL::CompareFunction stencilCompareFunction() const; void setStencilCompareFunction(MTL::CompareFunction stencilCompareFunction); MTL::StencilOperation stencilFailureOperation() const; void setStencilFailureOperation(MTL::StencilOperation stencilFailureOperation); MTL::StencilOperation depthFailureOperation() const; void setDepthFailureOperation(MTL::StencilOperation depthFailureOperation); MTL::StencilOperation depthStencilPassOperation() const; void setDepthStencilPassOperation(MTL::StencilOperation depthStencilPassOperation); uint32_t readMask() const; void setReadMask(uint32_t readMask); uint32_t writeMask() const; void setWriteMask(uint32_t writeMask); }; class DepthStencilDescriptor : public NS::Copying { public: static class DepthStencilDescriptor* alloc(); class DepthStencilDescriptor* init(); MTL::CompareFunction depthCompareFunction() const; void setDepthCompareFunction(MTL::CompareFunction depthCompareFunction); bool depthWriteEnabled() const; void setDepthWriteEnabled(bool depthWriteEnabled); class StencilDescriptor* frontFaceStencil() const; void setFrontFaceStencil(const class StencilDescriptor* frontFaceStencil); class StencilDescriptor* backFaceStencil() const; void setBackFaceStencil(const class StencilDescriptor* backFaceStencil); NS::String* label() const; void setLabel(const NS::String* label); }; class DepthStencilState : public NS::Referencing { public: NS::String* label() const; class Device* device() const; }; } _MTL_INLINE MTL::StencilDescriptor* MTL::StencilDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLStencilDescriptor)); } _MTL_INLINE MTL::StencilDescriptor* MTL::StencilDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::CompareFunction MTL::StencilDescriptor::stencilCompareFunction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stencilCompareFunction)); } _MTL_INLINE void MTL::StencilDescriptor::setStencilCompareFunction(MTL::CompareFunction stencilCompareFunction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStencilCompareFunction_), stencilCompareFunction); } _MTL_INLINE MTL::StencilOperation MTL::StencilDescriptor::stencilFailureOperation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stencilFailureOperation)); } _MTL_INLINE void MTL::StencilDescriptor::setStencilFailureOperation(MTL::StencilOperation stencilFailureOperation) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStencilFailureOperation_), stencilFailureOperation); } _MTL_INLINE MTL::StencilOperation MTL::StencilDescriptor::depthFailureOperation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(depthFailureOperation)); } _MTL_INLINE void MTL::StencilDescriptor::setDepthFailureOperation(MTL::StencilOperation depthFailureOperation) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthFailureOperation_), depthFailureOperation); } _MTL_INLINE MTL::StencilOperation MTL::StencilDescriptor::depthStencilPassOperation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(depthStencilPassOperation)); } _MTL_INLINE void MTL::StencilDescriptor::setDepthStencilPassOperation(MTL::StencilOperation depthStencilPassOperation) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthStencilPassOperation_), depthStencilPassOperation); } _MTL_INLINE uint32_t MTL::StencilDescriptor::readMask() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(readMask)); } _MTL_INLINE void MTL::StencilDescriptor::setReadMask(uint32_t readMask) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setReadMask_), readMask); } _MTL_INLINE uint32_t MTL::StencilDescriptor::writeMask() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(writeMask)); } _MTL_INLINE void MTL::StencilDescriptor::setWriteMask(uint32_t writeMask) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setWriteMask_), writeMask); } _MTL_INLINE MTL::DepthStencilDescriptor* MTL::DepthStencilDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLDepthStencilDescriptor)); } _MTL_INLINE MTL::DepthStencilDescriptor* MTL::DepthStencilDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::CompareFunction MTL::DepthStencilDescriptor::depthCompareFunction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(depthCompareFunction)); } _MTL_INLINE void MTL::DepthStencilDescriptor::setDepthCompareFunction(MTL::CompareFunction depthCompareFunction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthCompareFunction_), depthCompareFunction); } _MTL_INLINE bool MTL::DepthStencilDescriptor::depthWriteEnabled() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isDepthWriteEnabled)); } _MTL_INLINE void MTL::DepthStencilDescriptor::setDepthWriteEnabled(bool depthWriteEnabled) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthWriteEnabled_), depthWriteEnabled); } _MTL_INLINE MTL::StencilDescriptor* MTL::DepthStencilDescriptor::frontFaceStencil() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(frontFaceStencil)); } _MTL_INLINE void MTL::DepthStencilDescriptor::setFrontFaceStencil(const MTL::StencilDescriptor* frontFaceStencil) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFrontFaceStencil_), frontFaceStencil); } _MTL_INLINE MTL::StencilDescriptor* MTL::DepthStencilDescriptor::backFaceStencil() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(backFaceStencil)); } _MTL_INLINE void MTL::DepthStencilDescriptor::setBackFaceStencil(const MTL::StencilDescriptor* backFaceStencil) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBackFaceStencil_), backFaceStencil); } _MTL_INLINE NS::String* MTL::DepthStencilDescriptor::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::DepthStencilDescriptor::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE NS::String* MTL::DepthStencilState::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE MTL::Device* MTL::DepthStencilState::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } #pragma once #include #include namespace MTL { _MTL_ENUM(NS::Integer, IOCompressionMethod) { IOCompressionMethodZlib = 0, IOCompressionMethodLZFSE = 1, IOCompressionMethodLZ4 = 2, IOCompressionMethodLZMA = 3, IOCompressionMethodLZBitmap = 4, }; _MTL_ENUM(NS::UInteger, FeatureSet) { FeatureSet_iOS_GPUFamily1_v1 = 0, FeatureSet_iOS_GPUFamily2_v1 = 1, FeatureSet_iOS_GPUFamily1_v2 = 2, FeatureSet_iOS_GPUFamily2_v2 = 3, FeatureSet_iOS_GPUFamily3_v1 = 4, FeatureSet_iOS_GPUFamily1_v3 = 5, FeatureSet_iOS_GPUFamily2_v3 = 6, FeatureSet_iOS_GPUFamily3_v2 = 7, FeatureSet_iOS_GPUFamily1_v4 = 8, FeatureSet_iOS_GPUFamily2_v4 = 9, FeatureSet_iOS_GPUFamily3_v3 = 10, FeatureSet_iOS_GPUFamily4_v1 = 11, FeatureSet_iOS_GPUFamily1_v5 = 12, FeatureSet_iOS_GPUFamily2_v5 = 13, FeatureSet_iOS_GPUFamily3_v4 = 14, FeatureSet_iOS_GPUFamily4_v2 = 15, FeatureSet_iOS_GPUFamily5_v1 = 16, FeatureSet_macOS_GPUFamily1_v1 = 10000, FeatureSet_OSX_GPUFamily1_v1 = 10000, FeatureSet_macOS_GPUFamily1_v2 = 10001, FeatureSet_OSX_GPUFamily1_v2 = 10001, FeatureSet_macOS_ReadWriteTextureTier2 = 10002, FeatureSet_OSX_ReadWriteTextureTier2 = 10002, FeatureSet_macOS_GPUFamily1_v3 = 10003, FeatureSet_macOS_GPUFamily1_v4 = 10004, FeatureSet_macOS_GPUFamily2_v1 = 10005, FeatureSet_watchOS_GPUFamily1_v1 = 20000, FeatureSet_WatchOS_GPUFamily1_v1 = 20000, FeatureSet_watchOS_GPUFamily2_v1 = 20001, FeatureSet_WatchOS_GPUFamily2_v1 = 20001, FeatureSet_tvOS_GPUFamily1_v1 = 30000, FeatureSet_TVOS_GPUFamily1_v1 = 30000, FeatureSet_tvOS_GPUFamily1_v2 = 30001, FeatureSet_tvOS_GPUFamily1_v3 = 30002, FeatureSet_tvOS_GPUFamily2_v1 = 30003, FeatureSet_tvOS_GPUFamily1_v4 = 30004, FeatureSet_tvOS_GPUFamily2_v2 = 30005, }; _MTL_ENUM(NS::Integer, GPUFamily) { GPUFamilyApple1 = 1001, GPUFamilyApple2 = 1002, GPUFamilyApple3 = 1003, GPUFamilyApple4 = 1004, GPUFamilyApple5 = 1005, GPUFamilyApple6 = 1006, GPUFamilyApple7 = 1007, GPUFamilyApple8 = 1008, GPUFamilyApple9 = 1009, GPUFamilyMac1 = 2001, GPUFamilyMac2 = 2002, GPUFamilyCommon1 = 3001, GPUFamilyCommon2 = 3002, GPUFamilyCommon3 = 3003, GPUFamilyMacCatalyst1 = 4001, GPUFamilyMacCatalyst2 = 4002, GPUFamilyMetal3 = 5001, }; _MTL_ENUM(NS::UInteger, DeviceLocation) { DeviceLocationBuiltIn = 0, DeviceLocationSlot = 1, DeviceLocationExternal = 2, DeviceLocationUnspecified = NS::UIntegerMax, }; _MTL_OPTIONS(NS::UInteger, PipelineOption) { PipelineOptionNone = 0, PipelineOptionArgumentInfo = 1, PipelineOptionBindingInfo = 1, PipelineOptionBufferTypeInfo = 2, PipelineOptionFailOnBinaryArchiveMiss = 4, }; _MTL_ENUM(NS::UInteger, ReadWriteTextureTier) { ReadWriteTextureTierNone = 0, ReadWriteTextureTier1 = 1, ReadWriteTextureTier2 = 2, }; _MTL_ENUM(NS::UInteger, ArgumentBuffersTier) { ArgumentBuffersTier1 = 0, ArgumentBuffersTier2 = 1, }; _MTL_ENUM(NS::UInteger, SparseTextureRegionAlignmentMode) { SparseTextureRegionAlignmentModeOutward = 0, SparseTextureRegionAlignmentModeInward = 1, }; _MTL_ENUM(NS::Integer, SparsePageSize) { SparsePageSize16 = 101, SparsePageSize64 = 102, SparsePageSize256 = 103, }; struct AccelerationStructureSizes { NS::UInteger accelerationStructureSize; NS::UInteger buildScratchBufferSize; NS::UInteger refitScratchBufferSize; } _MTL_PACKED; _MTL_ENUM(NS::UInteger, CounterSamplingPoint) { CounterSamplingPointAtStageBoundary = 0, CounterSamplingPointAtDrawBoundary = 1, CounterSamplingPointAtDispatchBoundary = 2, CounterSamplingPointAtTileDispatchBoundary = 3, CounterSamplingPointAtBlitBoundary = 4, }; struct SizeAndAlign { NS::UInteger size; NS::UInteger align; } _MTL_PACKED; class ArgumentDescriptor : public NS::Copying { public: static class ArgumentDescriptor* alloc(); class ArgumentDescriptor* init(); static class ArgumentDescriptor* argumentDescriptor(); MTL::DataType dataType() const; void setDataType(MTL::DataType dataType); NS::UInteger index() const; void setIndex(NS::UInteger index); NS::UInteger arrayLength() const; void setArrayLength(NS::UInteger arrayLength); MTL::BindingAccess access() const; void setAccess(MTL::BindingAccess access); MTL::TextureType textureType() const; void setTextureType(MTL::TextureType textureType); NS::UInteger constantBlockAlignment() const; void setConstantBlockAlignment(NS::UInteger constantBlockAlignment); }; class Architecture : public NS::Copying { public: static class Architecture* alloc(); class Architecture* init(); NS::String* name() const; }; using DeviceNotificationName = NS::String*; _MTL_CONST(DeviceNotificationName, DeviceWasAddedNotification); _MTL_CONST(DeviceNotificationName, DeviceRemovalRequestedNotification); _MTL_CONST(DeviceNotificationName, DeviceWasRemovedNotification); _MTL_CONST(NS::ErrorUserInfoKey, CommandBufferEncoderInfoErrorKey); using DeviceNotificationHandlerBlock = void (^)(class Device* pDevice, DeviceNotificationName notifyName); using DeviceNotificationHandlerFunction = std::function; using AutoreleasedComputePipelineReflection = class ComputePipelineReflection*; using AutoreleasedRenderPipelineReflection = class RenderPipelineReflection*; using NewLibraryCompletionHandler = void (^)(class Library*, NS::Error*); using NewLibraryCompletionHandlerFunction = std::function; using NewRenderPipelineStateCompletionHandler = void (^)(class RenderPipelineState*, NS::Error*); using NewRenderPipelineStateCompletionHandlerFunction = std::function; using NewRenderPipelineStateWithReflectionCompletionHandler = void (^)(class RenderPipelineState*, class RenderPipelineReflection*, NS::Error*); using NewRenderPipelineStateWithReflectionCompletionHandlerFunction = std::function; using NewComputePipelineStateCompletionHandler = void (^)(class ComputePipelineState*, NS::Error*); using NewComputePipelineStateCompletionHandlerFunction = std::function; using NewComputePipelineStateWithReflectionCompletionHandler = void (^)(class ComputePipelineState*, class ComputePipelineReflection*, NS::Error*); using NewComputePipelineStateWithReflectionCompletionHandlerFunction = std::function; using Timestamp = std::uint64_t; MTL::Device* CreateSystemDefaultDevice(); NS::Array* CopyAllDevices(); NS::Array* CopyAllDevicesWithObserver(NS::Object** pOutObserver, DeviceNotificationHandlerBlock handler); NS::Array* CopyAllDevicesWithObserver(NS::Object** pOutObserver, const DeviceNotificationHandlerFunction& handler); void RemoveDeviceObserver(const NS::Object* pObserver); class Device : public NS::Referencing { public: void newLibrary(const NS::String* pSource, const class CompileOptions* pOptions, const NewLibraryCompletionHandlerFunction& completionHandler); void newLibrary(const class StitchedLibraryDescriptor* pDescriptor, const MTL::NewLibraryCompletionHandlerFunction& completionHandler); void newRenderPipelineState(const class RenderPipelineDescriptor* pDescriptor, const NewRenderPipelineStateCompletionHandlerFunction& completionHandler); void newRenderPipelineState(const class RenderPipelineDescriptor* pDescriptor, PipelineOption options, const NewRenderPipelineStateWithReflectionCompletionHandlerFunction& completionHandler); void newRenderPipelineState(const class TileRenderPipelineDescriptor* pDescriptor, PipelineOption options, const NewRenderPipelineStateWithReflectionCompletionHandlerFunction& completionHandler); void newComputePipelineState(const class Function* pFunction, const NewComputePipelineStateCompletionHandlerFunction& completionHandler); void newComputePipelineState(const class Function* pFunction, PipelineOption options, const NewComputePipelineStateWithReflectionCompletionHandlerFunction& completionHandler); void newComputePipelineState(const class ComputePipelineDescriptor* pDescriptor, PipelineOption options, const NewComputePipelineStateWithReflectionCompletionHandlerFunction& completionHandler); bool isHeadless() const; NS::String* name() const; uint64_t registryID() const; class Architecture* architecture() const; MTL::Size maxThreadsPerThreadgroup() const; bool lowPower() const; bool headless() const; bool removable() const; bool hasUnifiedMemory() const; uint64_t recommendedMaxWorkingSetSize() const; MTL::DeviceLocation location() const; NS::UInteger locationNumber() const; uint64_t maxTransferRate() const; bool depth24Stencil8PixelFormatSupported() const; MTL::ReadWriteTextureTier readWriteTextureSupport() const; MTL::ArgumentBuffersTier argumentBuffersSupport() const; bool rasterOrderGroupsSupported() const; bool supports32BitFloatFiltering() const; bool supports32BitMSAA() const; bool supportsQueryTextureLOD() const; bool supportsBCTextureCompression() const; bool supportsPullModelInterpolation() const; bool barycentricCoordsSupported() const; bool supportsShaderBarycentricCoordinates() const; NS::UInteger currentAllocatedSize() const; class LogState* newLogState(const class LogStateDescriptor* descriptor, NS::Error** error); class CommandQueue* newCommandQueue(); class CommandQueue* newCommandQueue(NS::UInteger maxCommandBufferCount); class CommandQueue* newCommandQueue(const class CommandQueueDescriptor* descriptor); MTL::SizeAndAlign heapTextureSizeAndAlign(const class TextureDescriptor* desc); MTL::SizeAndAlign heapBufferSizeAndAlign(NS::UInteger length, MTL::ResourceOptions options); class Heap* newHeap(const class HeapDescriptor* descriptor); class Buffer* newBuffer(NS::UInteger length, MTL::ResourceOptions options); class Buffer* newBuffer(const void* pointer, NS::UInteger length, MTL::ResourceOptions options); class Buffer* newBuffer(const void* pointer, NS::UInteger length, MTL::ResourceOptions options, void (^deallocator)(void*, NS::UInteger)); class DepthStencilState* newDepthStencilState(const class DepthStencilDescriptor* descriptor); class Texture* newTexture(const class TextureDescriptor* descriptor); class Texture* newTexture(const class TextureDescriptor* descriptor, const IOSurfaceRef iosurface, NS::UInteger plane); class Texture* newSharedTexture(const class TextureDescriptor* descriptor); class Texture* newSharedTexture(const class SharedTextureHandle* sharedHandle); class SamplerState* newSamplerState(const class SamplerDescriptor* descriptor); class Library* newDefaultLibrary(); class Library* newDefaultLibrary(const NS::Bundle* bundle, NS::Error** error); class Library* newLibrary(const NS::String* filepath, NS::Error** error); class Library* newLibrary(const NS::URL* url, NS::Error** error); class Library* newLibrary(const dispatch_data_t data, NS::Error** error); class Library* newLibrary(const NS::String* source, const class CompileOptions* options, NS::Error** error); void newLibrary(const NS::String* source, const class CompileOptions* options, const MTL::NewLibraryCompletionHandler completionHandler); class Library* newLibrary(const class StitchedLibraryDescriptor* descriptor, NS::Error** error); void newLibrary(const class StitchedLibraryDescriptor* descriptor, const MTL::NewLibraryCompletionHandler completionHandler); class RenderPipelineState* newRenderPipelineState(const class RenderPipelineDescriptor* descriptor, NS::Error** error); class RenderPipelineState* newRenderPipelineState(const class RenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedRenderPipelineReflection* reflection, NS::Error** error); void newRenderPipelineState(const class RenderPipelineDescriptor* descriptor, const MTL::NewRenderPipelineStateCompletionHandler completionHandler); void newRenderPipelineState(const class RenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewRenderPipelineStateWithReflectionCompletionHandler completionHandler); class ComputePipelineState* newComputePipelineState(const class Function* computeFunction, NS::Error** error); class ComputePipelineState* newComputePipelineState(const class Function* computeFunction, MTL::PipelineOption options, const MTL::AutoreleasedComputePipelineReflection* reflection, NS::Error** error); void newComputePipelineState(const class Function* computeFunction, const MTL::NewComputePipelineStateCompletionHandler completionHandler); void newComputePipelineState(const class Function* computeFunction, MTL::PipelineOption options, const MTL::NewComputePipelineStateWithReflectionCompletionHandler completionHandler); class ComputePipelineState* newComputePipelineState(const class ComputePipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedComputePipelineReflection* reflection, NS::Error** error); void newComputePipelineState(const class ComputePipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewComputePipelineStateWithReflectionCompletionHandler completionHandler); class Fence* newFence(); bool supportsFeatureSet(MTL::FeatureSet featureSet); bool supportsFamily(MTL::GPUFamily gpuFamily); bool supportsTextureSampleCount(NS::UInteger sampleCount); NS::UInteger minimumLinearTextureAlignmentForPixelFormat(MTL::PixelFormat format); NS::UInteger minimumTextureBufferAlignmentForPixelFormat(MTL::PixelFormat format); class RenderPipelineState* newRenderPipelineState(const class TileRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedRenderPipelineReflection* reflection, NS::Error** error); void newRenderPipelineState(const class TileRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewRenderPipelineStateWithReflectionCompletionHandler completionHandler); class RenderPipelineState* newRenderPipelineState(const class MeshRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedRenderPipelineReflection* reflection, NS::Error** error); void newRenderPipelineState(const class MeshRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewRenderPipelineStateWithReflectionCompletionHandler completionHandler); NS::UInteger maxThreadgroupMemoryLength() const; NS::UInteger maxArgumentBufferSamplerCount() const; bool programmableSamplePositionsSupported() const; void getDefaultSamplePositions(MTL::SamplePosition* positions, NS::UInteger count); class ArgumentEncoder* newArgumentEncoder(const NS::Array* arguments); bool supportsRasterizationRateMap(NS::UInteger layerCount); class RasterizationRateMap* newRasterizationRateMap(const class RasterizationRateMapDescriptor* descriptor); class IndirectCommandBuffer* newIndirectCommandBuffer(const class IndirectCommandBufferDescriptor* descriptor, NS::UInteger maxCount, MTL::ResourceOptions options); class Event* newEvent(); class SharedEvent* newSharedEvent(); class SharedEvent* newSharedEvent(const class SharedEventHandle* sharedEventHandle); uint64_t peerGroupID() const; uint32_t peerIndex() const; uint32_t peerCount() const; class IOFileHandle* newIOHandle(const NS::URL* url, NS::Error** error); class IOCommandQueue* newIOCommandQueue(const class IOCommandQueueDescriptor* descriptor, NS::Error** error); class IOFileHandle* newIOHandle(const NS::URL* url, MTL::IOCompressionMethod compressionMethod, NS::Error** error); class IOFileHandle* newIOFileHandle(const NS::URL* url, NS::Error** error); class IOFileHandle* newIOFileHandle(const NS::URL* url, MTL::IOCompressionMethod compressionMethod, NS::Error** error); MTL::Size sparseTileSize(MTL::TextureType textureType, MTL::PixelFormat pixelFormat, NS::UInteger sampleCount); NS::UInteger sparseTileSizeInBytes() const; void convertSparsePixelRegions(const MTL::Region* pixelRegions, MTL::Region* tileRegions, MTL::Size tileSize, MTL::SparseTextureRegionAlignmentMode mode, NS::UInteger numRegions); void convertSparseTileRegions(const MTL::Region* tileRegions, MTL::Region* pixelRegions, MTL::Size tileSize, NS::UInteger numRegions); NS::UInteger sparseTileSizeInBytes(MTL::SparsePageSize sparsePageSize); MTL::Size sparseTileSize(MTL::TextureType textureType, MTL::PixelFormat pixelFormat, NS::UInteger sampleCount, MTL::SparsePageSize sparsePageSize); NS::UInteger maxBufferLength() const; NS::Array* counterSets() const; class CounterSampleBuffer* newCounterSampleBuffer(const class CounterSampleBufferDescriptor* descriptor, NS::Error** error); void sampleTimestamps(MTL::Timestamp* cpuTimestamp, MTL::Timestamp* gpuTimestamp); class ArgumentEncoder* newArgumentEncoder(const class BufferBinding* bufferBinding); bool supportsCounterSampling(MTL::CounterSamplingPoint samplingPoint); bool supportsVertexAmplificationCount(NS::UInteger count); bool supportsDynamicLibraries() const; bool supportsRenderDynamicLibraries() const; class DynamicLibrary* newDynamicLibrary(const class Library* library, NS::Error** error); class DynamicLibrary* newDynamicLibrary(const NS::URL* url, NS::Error** error); class BinaryArchive* newBinaryArchive(const class BinaryArchiveDescriptor* descriptor, NS::Error** error); bool supportsRaytracing() const; MTL::AccelerationStructureSizes accelerationStructureSizes(const class AccelerationStructureDescriptor* descriptor); class AccelerationStructure* newAccelerationStructure(NS::UInteger size); class AccelerationStructure* newAccelerationStructure(const class AccelerationStructureDescriptor* descriptor); MTL::SizeAndAlign heapAccelerationStructureSizeAndAlign(NS::UInteger size); MTL::SizeAndAlign heapAccelerationStructureSizeAndAlign(const class AccelerationStructureDescriptor* descriptor); bool supportsFunctionPointers() const; bool supportsFunctionPointersFromRender() const; bool supportsRaytracingFromRender() const; bool supportsPrimitiveMotionBlur() const; bool shouldMaximizeConcurrentCompilation() const; void setShouldMaximizeConcurrentCompilation(bool shouldMaximizeConcurrentCompilation); NS::UInteger maximumConcurrentCompilationTaskCount() const; class ResidencySet* newResidencySet(const class ResidencySetDescriptor* desc, NS::Error** error); }; } _MTL_INLINE MTL::ArgumentDescriptor* MTL::ArgumentDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLArgumentDescriptor)); } _MTL_INLINE MTL::ArgumentDescriptor* MTL::ArgumentDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::ArgumentDescriptor* MTL::ArgumentDescriptor::argumentDescriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLArgumentDescriptor), _MTL_PRIVATE_SEL(argumentDescriptor)); } _MTL_INLINE MTL::DataType MTL::ArgumentDescriptor::dataType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(dataType)); } _MTL_INLINE void MTL::ArgumentDescriptor::setDataType(MTL::DataType dataType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDataType_), dataType); } _MTL_INLINE NS::UInteger MTL::ArgumentDescriptor::index() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(index)); } _MTL_INLINE void MTL::ArgumentDescriptor::setIndex(NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setIndex_), index); } _MTL_INLINE NS::UInteger MTL::ArgumentDescriptor::arrayLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(arrayLength)); } _MTL_INLINE void MTL::ArgumentDescriptor::setArrayLength(NS::UInteger arrayLength) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setArrayLength_), arrayLength); } _MTL_INLINE MTL::BindingAccess MTL::ArgumentDescriptor::access() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(access)); } _MTL_INLINE void MTL::ArgumentDescriptor::setAccess(MTL::BindingAccess access) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setAccess_), access); } _MTL_INLINE MTL::TextureType MTL::ArgumentDescriptor::textureType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(textureType)); } _MTL_INLINE void MTL::ArgumentDescriptor::setTextureType(MTL::TextureType textureType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTextureType_), textureType); } _MTL_INLINE NS::UInteger MTL::ArgumentDescriptor::constantBlockAlignment() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(constantBlockAlignment)); } _MTL_INLINE void MTL::ArgumentDescriptor::setConstantBlockAlignment(NS::UInteger constantBlockAlignment) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setConstantBlockAlignment_), constantBlockAlignment); } _MTL_INLINE MTL::Architecture* MTL::Architecture::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLArchitecture)); } _MTL_INLINE MTL::Architecture* MTL::Architecture::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::Architecture::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_PRIVATE_DEF_WEAK_CONST(MTL::DeviceNotificationName, DeviceWasAddedNotification); _MTL_PRIVATE_DEF_WEAK_CONST(MTL::DeviceNotificationName, DeviceRemovalRequestedNotification); _MTL_PRIVATE_DEF_WEAK_CONST(MTL::DeviceNotificationName, DeviceWasRemovedNotification); _MTL_PRIVATE_DEF_CONST(NS::ErrorUserInfoKey, CommandBufferEncoderInfoErrorKey); #if defined(MTL_PRIVATE_IMPLEMENTATION) extern "C" MTL::Device* MTLCreateSystemDefaultDevice(); extern "C" NS::Array* MTLCopyAllDevices(); extern "C" NS::Array* MTLCopyAllDevicesWithObserver(NS::Object**, MTL::DeviceNotificationHandlerBlock); extern "C" void MTLRemoveDeviceObserver(const NS::Object*); #include _NS_EXPORT MTL::Device* MTL::CreateSystemDefaultDevice() { return ::MTLCreateSystemDefaultDevice(); } _NS_EXPORT NS::Array* MTL::CopyAllDevices() { #if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 180000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100) return ::MTLCopyAllDevices(); #else return nullptr; #endif // __IPHONE_18 } _NS_EXPORT NS::Array* MTL::CopyAllDevicesWithObserver(NS::Object** pOutObserver, DeviceNotificationHandlerBlock handler) { #if TARGET_OS_OSX return ::MTLCopyAllDevicesWithObserver(pOutObserver, handler); #else (void)pOutObserver; (void)handler; return nullptr; #endif // TARGET_OS_OSX } _NS_EXPORT NS::Array* MTL::CopyAllDevicesWithObserver(NS::Object** pOutObserver, const DeviceNotificationHandlerFunction& handler) { __block DeviceNotificationHandlerFunction function = handler; return CopyAllDevicesWithObserver(pOutObserver, ^(Device* pDevice, DeviceNotificationName pNotificationName) { function(pDevice, pNotificationName); }); } _NS_EXPORT void MTL::RemoveDeviceObserver(const NS::Object* pObserver) { (void)pObserver; #if TARGET_OS_OSX ::MTLRemoveDeviceObserver(pObserver); #endif // TARGET_OS_OSX } #endif // MTL_PRIVATE_IMPLEMENTATION _MTL_INLINE void MTL::Device::newLibrary(const NS::String* pSource, const CompileOptions* pOptions, const NewLibraryCompletionHandlerFunction& completionHandler) { __block NewLibraryCompletionHandlerFunction blockCompletionHandler = completionHandler; newLibrary(pSource, pOptions, ^(Library* pLibrary, NS::Error* pError) { blockCompletionHandler(pLibrary, pError); }); } _MTL_INLINE void MTL::Device::newLibrary(const class StitchedLibraryDescriptor* pDescriptor, const MTL::NewLibraryCompletionHandlerFunction& completionHandler) { __block NewLibraryCompletionHandlerFunction blockCompletionHandler = completionHandler; newLibrary(pDescriptor, ^(Library* pLibrary, NS::Error* pError) { blockCompletionHandler(pLibrary, pError); }); } _MTL_INLINE void MTL::Device::newRenderPipelineState(const RenderPipelineDescriptor* pDescriptor, const NewRenderPipelineStateCompletionHandlerFunction& completionHandler) { __block NewRenderPipelineStateCompletionHandlerFunction blockCompletionHandler = completionHandler; newRenderPipelineState(pDescriptor, ^(RenderPipelineState* pPipelineState, NS::Error* pError) { blockCompletionHandler(pPipelineState, pError); }); } _MTL_INLINE void MTL::Device::newRenderPipelineState(const RenderPipelineDescriptor* pDescriptor, PipelineOption options, const NewRenderPipelineStateWithReflectionCompletionHandlerFunction& completionHandler) { __block NewRenderPipelineStateWithReflectionCompletionHandlerFunction blockCompletionHandler = completionHandler; newRenderPipelineState(pDescriptor, options, ^(RenderPipelineState* pPipelineState, class RenderPipelineReflection* pReflection, NS::Error* pError) { blockCompletionHandler(pPipelineState, pReflection, pError); }); } _MTL_INLINE void MTL::Device::newRenderPipelineState(const TileRenderPipelineDescriptor* pDescriptor, PipelineOption options, const NewRenderPipelineStateWithReflectionCompletionHandlerFunction& completionHandler) { __block NewRenderPipelineStateWithReflectionCompletionHandlerFunction blockCompletionHandler = completionHandler; newRenderPipelineState(pDescriptor, options, ^(RenderPipelineState* pPipelineState, class RenderPipelineReflection* pReflection, NS::Error* pError) { blockCompletionHandler(pPipelineState, pReflection, pError); }); } _MTL_INLINE void MTL::Device::newComputePipelineState(const class Function* pFunction, const NewComputePipelineStateCompletionHandlerFunction& completionHandler) { __block NewComputePipelineStateCompletionHandlerFunction blockCompletionHandler = completionHandler; newComputePipelineState(pFunction, ^(ComputePipelineState* pPipelineState, NS::Error* pError) { blockCompletionHandler(pPipelineState, pError); }); } _MTL_INLINE void MTL::Device::newComputePipelineState(const Function* pFunction, PipelineOption options, const NewComputePipelineStateWithReflectionCompletionHandlerFunction& completionHandler) { __block NewComputePipelineStateWithReflectionCompletionHandlerFunction blockCompletionHandler = completionHandler; newComputePipelineState(pFunction, options, ^(ComputePipelineState* pPipelineState, ComputePipelineReflection* pReflection, NS::Error* pError) { blockCompletionHandler(pPipelineState, pReflection, pError); }); } _MTL_INLINE void MTL::Device::newComputePipelineState(const ComputePipelineDescriptor* pDescriptor, PipelineOption options, const NewComputePipelineStateWithReflectionCompletionHandlerFunction& completionHandler) { __block NewComputePipelineStateWithReflectionCompletionHandlerFunction blockCompletionHandler = completionHandler; newComputePipelineState(pDescriptor, options, ^(ComputePipelineState* pPipelineState, ComputePipelineReflection* pReflection, NS::Error* pError) { blockCompletionHandler(pPipelineState, pReflection, pError); }); } _MTL_INLINE bool MTL::Device::isHeadless() const { return headless(); } _MTL_INLINE NS::String* MTL::Device::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_INLINE uint64_t MTL::Device::registryID() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(registryID)); } _MTL_INLINE MTL::Architecture* MTL::Device::architecture() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(architecture)); } _MTL_INLINE MTL::Size MTL::Device::maxThreadsPerThreadgroup() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxThreadsPerThreadgroup)); } _MTL_INLINE bool MTL::Device::lowPower() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isLowPower)); } _MTL_INLINE bool MTL::Device::headless() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isHeadless)); } _MTL_INLINE bool MTL::Device::removable() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isRemovable)); } _MTL_INLINE bool MTL::Device::hasUnifiedMemory() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(hasUnifiedMemory)); } _MTL_INLINE uint64_t MTL::Device::recommendedMaxWorkingSetSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(recommendedMaxWorkingSetSize)); } _MTL_INLINE MTL::DeviceLocation MTL::Device::location() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(location)); } _MTL_INLINE NS::UInteger MTL::Device::locationNumber() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(locationNumber)); } _MTL_INLINE uint64_t MTL::Device::maxTransferRate() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxTransferRate)); } _MTL_INLINE bool MTL::Device::depth24Stencil8PixelFormatSupported() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(isDepth24Stencil8PixelFormatSupported)); } _MTL_INLINE MTL::ReadWriteTextureTier MTL::Device::readWriteTextureSupport() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(readWriteTextureSupport)); } _MTL_INLINE MTL::ArgumentBuffersTier MTL::Device::argumentBuffersSupport() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(argumentBuffersSupport)); } _MTL_INLINE bool MTL::Device::rasterOrderGroupsSupported() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(areRasterOrderGroupsSupported)); } _MTL_INLINE bool MTL::Device::supports32BitFloatFiltering() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supports32BitFloatFiltering)); } _MTL_INLINE bool MTL::Device::supports32BitMSAA() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supports32BitMSAA)); } _MTL_INLINE bool MTL::Device::supportsQueryTextureLOD() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsQueryTextureLOD)); } _MTL_INLINE bool MTL::Device::supportsBCTextureCompression() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsBCTextureCompression)); } _MTL_INLINE bool MTL::Device::supportsPullModelInterpolation() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsPullModelInterpolation)); } _MTL_INLINE bool MTL::Device::barycentricCoordsSupported() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(areBarycentricCoordsSupported)); } _MTL_INLINE bool MTL::Device::supportsShaderBarycentricCoordinates() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsShaderBarycentricCoordinates)); } _MTL_INLINE NS::UInteger MTL::Device::currentAllocatedSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(currentAllocatedSize)); } _MTL_INLINE MTL::LogState* MTL::Device::newLogState(const MTL::LogStateDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newLogStateWithDescriptor_error_), descriptor, error); } _MTL_INLINE MTL::CommandQueue* MTL::Device::newCommandQueue() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newCommandQueue)); } _MTL_INLINE MTL::CommandQueue* MTL::Device::newCommandQueue(NS::UInteger maxCommandBufferCount) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newCommandQueueWithMaxCommandBufferCount_), maxCommandBufferCount); } _MTL_INLINE MTL::CommandQueue* MTL::Device::newCommandQueue(const MTL::CommandQueueDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newCommandQueueWithDescriptor_), descriptor); } _MTL_INLINE MTL::SizeAndAlign MTL::Device::heapTextureSizeAndAlign(const MTL::TextureDescriptor* desc) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(heapTextureSizeAndAlignWithDescriptor_), desc); } _MTL_INLINE MTL::SizeAndAlign MTL::Device::heapBufferSizeAndAlign(NS::UInteger length, MTL::ResourceOptions options) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(heapBufferSizeAndAlignWithLength_options_), length, options); } _MTL_INLINE MTL::Heap* MTL::Device::newHeap(const MTL::HeapDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newHeapWithDescriptor_), descriptor); } _MTL_INLINE MTL::Buffer* MTL::Device::newBuffer(NS::UInteger length, MTL::ResourceOptions options) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newBufferWithLength_options_), length, options); } _MTL_INLINE MTL::Buffer* MTL::Device::newBuffer(const void* pointer, NS::UInteger length, MTL::ResourceOptions options) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newBufferWithBytes_length_options_), pointer, length, options); } _MTL_INLINE MTL::Buffer* MTL::Device::newBuffer(const void* pointer, NS::UInteger length, MTL::ResourceOptions options, void (^deallocator)(void*, NS::UInteger)) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newBufferWithBytesNoCopy_length_options_deallocator_), pointer, length, options, deallocator); } _MTL_INLINE MTL::DepthStencilState* MTL::Device::newDepthStencilState(const MTL::DepthStencilDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newDepthStencilStateWithDescriptor_), descriptor); } _MTL_INLINE MTL::Texture* MTL::Device::newTexture(const MTL::TextureDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newTextureWithDescriptor_), descriptor); } _MTL_INLINE MTL::Texture* MTL::Device::newTexture(const MTL::TextureDescriptor* descriptor, const IOSurfaceRef iosurface, NS::UInteger plane) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newTextureWithDescriptor_iosurface_plane_), descriptor, iosurface, plane); } _MTL_INLINE MTL::Texture* MTL::Device::newSharedTexture(const MTL::TextureDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newSharedTextureWithDescriptor_), descriptor); } _MTL_INLINE MTL::Texture* MTL::Device::newSharedTexture(const MTL::SharedTextureHandle* sharedHandle) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newSharedTextureWithHandle_), sharedHandle); } _MTL_INLINE MTL::SamplerState* MTL::Device::newSamplerState(const MTL::SamplerDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newSamplerStateWithDescriptor_), descriptor); } _MTL_INLINE MTL::Library* MTL::Device::newDefaultLibrary() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newDefaultLibrary)); } _MTL_INLINE MTL::Library* MTL::Device::newDefaultLibrary(const NS::Bundle* bundle, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newDefaultLibraryWithBundle_error_), bundle, error); } _MTL_INLINE MTL::Library* MTL::Device::newLibrary(const NS::String* filepath, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newLibraryWithFile_error_), filepath, error); } _MTL_INLINE MTL::Library* MTL::Device::newLibrary(const NS::URL* url, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newLibraryWithURL_error_), url, error); } _MTL_INLINE MTL::Library* MTL::Device::newLibrary(const dispatch_data_t data, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newLibraryWithData_error_), data, error); } _MTL_INLINE MTL::Library* MTL::Device::newLibrary(const NS::String* source, const MTL::CompileOptions* options, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newLibraryWithSource_options_error_), source, options, error); } _MTL_INLINE void MTL::Device::newLibrary(const NS::String* source, const MTL::CompileOptions* options, const MTL::NewLibraryCompletionHandler completionHandler) { Object::sendMessage(this, _MTL_PRIVATE_SEL(newLibraryWithSource_options_completionHandler_), source, options, completionHandler); } _MTL_INLINE MTL::Library* MTL::Device::newLibrary(const MTL::StitchedLibraryDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newLibraryWithStitchedDescriptor_error_), descriptor, error); } _MTL_INLINE void MTL::Device::newLibrary(const MTL::StitchedLibraryDescriptor* descriptor, const MTL::NewLibraryCompletionHandler completionHandler) { Object::sendMessage(this, _MTL_PRIVATE_SEL(newLibraryWithStitchedDescriptor_completionHandler_), descriptor, completionHandler); } _MTL_INLINE MTL::RenderPipelineState* MTL::Device::newRenderPipelineState(const MTL::RenderPipelineDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithDescriptor_error_), descriptor, error); } _MTL_INLINE MTL::RenderPipelineState* MTL::Device::newRenderPipelineState(const MTL::RenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedRenderPipelineReflection* reflection, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithDescriptor_options_reflection_error_), descriptor, options, reflection, error); } _MTL_INLINE void MTL::Device::newRenderPipelineState(const MTL::RenderPipelineDescriptor* descriptor, const MTL::NewRenderPipelineStateCompletionHandler completionHandler) { Object::sendMessage(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithDescriptor_completionHandler_), descriptor, completionHandler); } _MTL_INLINE void MTL::Device::newRenderPipelineState(const MTL::RenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewRenderPipelineStateWithReflectionCompletionHandler completionHandler) { Object::sendMessage(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithDescriptor_options_completionHandler_), descriptor, options, completionHandler); } _MTL_INLINE MTL::ComputePipelineState* MTL::Device::newComputePipelineState(const MTL::Function* computeFunction, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithFunction_error_), computeFunction, error); } _MTL_INLINE MTL::ComputePipelineState* MTL::Device::newComputePipelineState(const MTL::Function* computeFunction, MTL::PipelineOption options, const MTL::AutoreleasedComputePipelineReflection* reflection, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithFunction_options_reflection_error_), computeFunction, options, reflection, error); } _MTL_INLINE void MTL::Device::newComputePipelineState(const MTL::Function* computeFunction, const MTL::NewComputePipelineStateCompletionHandler completionHandler) { Object::sendMessage(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithFunction_completionHandler_), computeFunction, completionHandler); } _MTL_INLINE void MTL::Device::newComputePipelineState(const MTL::Function* computeFunction, MTL::PipelineOption options, const MTL::NewComputePipelineStateWithReflectionCompletionHandler completionHandler) { Object::sendMessage(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithFunction_options_completionHandler_), computeFunction, options, completionHandler); } _MTL_INLINE MTL::ComputePipelineState* MTL::Device::newComputePipelineState(const MTL::ComputePipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedComputePipelineReflection* reflection, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithDescriptor_options_reflection_error_), descriptor, options, reflection, error); } _MTL_INLINE void MTL::Device::newComputePipelineState(const MTL::ComputePipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewComputePipelineStateWithReflectionCompletionHandler completionHandler) { Object::sendMessage(this, _MTL_PRIVATE_SEL(newComputePipelineStateWithDescriptor_options_completionHandler_), descriptor, options, completionHandler); } _MTL_INLINE MTL::Fence* MTL::Device::newFence() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newFence)); } _MTL_INLINE bool MTL::Device::supportsFeatureSet(MTL::FeatureSet featureSet) { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsFeatureSet_), featureSet); } _MTL_INLINE bool MTL::Device::supportsFamily(MTL::GPUFamily gpuFamily) { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsFamily_), gpuFamily); } _MTL_INLINE bool MTL::Device::supportsTextureSampleCount(NS::UInteger sampleCount) { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsTextureSampleCount_), sampleCount); } _MTL_INLINE NS::UInteger MTL::Device::minimumLinearTextureAlignmentForPixelFormat(MTL::PixelFormat format) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(minimumLinearTextureAlignmentForPixelFormat_), format); } _MTL_INLINE NS::UInteger MTL::Device::minimumTextureBufferAlignmentForPixelFormat(MTL::PixelFormat format) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(minimumTextureBufferAlignmentForPixelFormat_), format); } _MTL_INLINE MTL::RenderPipelineState* MTL::Device::newRenderPipelineState(const MTL::TileRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedRenderPipelineReflection* reflection, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithTileDescriptor_options_reflection_error_), descriptor, options, reflection, error); } _MTL_INLINE void MTL::Device::newRenderPipelineState(const MTL::TileRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewRenderPipelineStateWithReflectionCompletionHandler completionHandler) { Object::sendMessage(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithTileDescriptor_options_completionHandler_), descriptor, options, completionHandler); } _MTL_INLINE MTL::RenderPipelineState* MTL::Device::newRenderPipelineState(const MTL::MeshRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::AutoreleasedRenderPipelineReflection* reflection, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithMeshDescriptor_options_reflection_error_), descriptor, options, reflection, error); } _MTL_INLINE void MTL::Device::newRenderPipelineState(const MTL::MeshRenderPipelineDescriptor* descriptor, MTL::PipelineOption options, const MTL::NewRenderPipelineStateWithReflectionCompletionHandler completionHandler) { Object::sendMessage(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithMeshDescriptor_options_completionHandler_), descriptor, options, completionHandler); } _MTL_INLINE NS::UInteger MTL::Device::maxThreadgroupMemoryLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxThreadgroupMemoryLength)); } _MTL_INLINE NS::UInteger MTL::Device::maxArgumentBufferSamplerCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxArgumentBufferSamplerCount)); } _MTL_INLINE bool MTL::Device::programmableSamplePositionsSupported() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(areProgrammableSamplePositionsSupported)); } _MTL_INLINE void MTL::Device::getDefaultSamplePositions(MTL::SamplePosition* positions, NS::UInteger count) { Object::sendMessage(this, _MTL_PRIVATE_SEL(getDefaultSamplePositions_count_), positions, count); } _MTL_INLINE MTL::ArgumentEncoder* MTL::Device::newArgumentEncoder(const NS::Array* arguments) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newArgumentEncoderWithArguments_), arguments); } _MTL_INLINE bool MTL::Device::supportsRasterizationRateMap(NS::UInteger layerCount) { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsRasterizationRateMapWithLayerCount_), layerCount); } _MTL_INLINE MTL::RasterizationRateMap* MTL::Device::newRasterizationRateMap(const MTL::RasterizationRateMapDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newRasterizationRateMapWithDescriptor_), descriptor); } _MTL_INLINE MTL::IndirectCommandBuffer* MTL::Device::newIndirectCommandBuffer(const MTL::IndirectCommandBufferDescriptor* descriptor, NS::UInteger maxCount, MTL::ResourceOptions options) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newIndirectCommandBufferWithDescriptor_maxCommandCount_options_), descriptor, maxCount, options); } _MTL_INLINE MTL::Event* MTL::Device::newEvent() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newEvent)); } _MTL_INLINE MTL::SharedEvent* MTL::Device::newSharedEvent() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newSharedEvent)); } _MTL_INLINE MTL::SharedEvent* MTL::Device::newSharedEvent(const MTL::SharedEventHandle* sharedEventHandle) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newSharedEventWithHandle_), sharedEventHandle); } _MTL_INLINE uint64_t MTL::Device::peerGroupID() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(peerGroupID)); } _MTL_INLINE uint32_t MTL::Device::peerIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(peerIndex)); } _MTL_INLINE uint32_t MTL::Device::peerCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(peerCount)); } _MTL_INLINE MTL::IOFileHandle* MTL::Device::newIOHandle(const NS::URL* url, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newIOHandleWithURL_error_), url, error); } _MTL_INLINE MTL::IOCommandQueue* MTL::Device::newIOCommandQueue(const MTL::IOCommandQueueDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newIOCommandQueueWithDescriptor_error_), descriptor, error); } _MTL_INLINE MTL::IOFileHandle* MTL::Device::newIOHandle(const NS::URL* url, MTL::IOCompressionMethod compressionMethod, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newIOHandleWithURL_compressionMethod_error_), url, compressionMethod, error); } _MTL_INLINE MTL::IOFileHandle* MTL::Device::newIOFileHandle(const NS::URL* url, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newIOFileHandleWithURL_error_), url, error); } _MTL_INLINE MTL::IOFileHandle* MTL::Device::newIOFileHandle(const NS::URL* url, MTL::IOCompressionMethod compressionMethod, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newIOFileHandleWithURL_compressionMethod_error_), url, compressionMethod, error); } _MTL_INLINE MTL::Size MTL::Device::sparseTileSize(MTL::TextureType textureType, MTL::PixelFormat pixelFormat, NS::UInteger sampleCount) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sparseTileSizeWithTextureType_pixelFormat_sampleCount_), textureType, pixelFormat, sampleCount); } _MTL_INLINE NS::UInteger MTL::Device::sparseTileSizeInBytes() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sparseTileSizeInBytes)); } _MTL_INLINE void MTL::Device::convertSparsePixelRegions(const MTL::Region* pixelRegions, MTL::Region* tileRegions, MTL::Size tileSize, MTL::SparseTextureRegionAlignmentMode mode, NS::UInteger numRegions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(convertSparsePixelRegions_toTileRegions_withTileSize_alignmentMode_numRegions_), pixelRegions, tileRegions, tileSize, mode, numRegions); } _MTL_INLINE void MTL::Device::convertSparseTileRegions(const MTL::Region* tileRegions, MTL::Region* pixelRegions, MTL::Size tileSize, NS::UInteger numRegions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(convertSparseTileRegions_toPixelRegions_withTileSize_numRegions_), tileRegions, pixelRegions, tileSize, numRegions); } _MTL_INLINE NS::UInteger MTL::Device::sparseTileSizeInBytes(MTL::SparsePageSize sparsePageSize) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sparseTileSizeInBytesForSparsePageSize_), sparsePageSize); } _MTL_INLINE MTL::Size MTL::Device::sparseTileSize(MTL::TextureType textureType, MTL::PixelFormat pixelFormat, NS::UInteger sampleCount, MTL::SparsePageSize sparsePageSize) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sparseTileSizeWithTextureType_pixelFormat_sampleCount_sparsePageSize_), textureType, pixelFormat, sampleCount, sparsePageSize); } _MTL_INLINE NS::UInteger MTL::Device::maxBufferLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxBufferLength)); } _MTL_INLINE NS::Array* MTL::Device::counterSets() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(counterSets)); } _MTL_INLINE MTL::CounterSampleBuffer* MTL::Device::newCounterSampleBuffer(const MTL::CounterSampleBufferDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newCounterSampleBufferWithDescriptor_error_), descriptor, error); } _MTL_INLINE void MTL::Device::sampleTimestamps(MTL::Timestamp* cpuTimestamp, MTL::Timestamp* gpuTimestamp) { Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleTimestamps_gpuTimestamp_), cpuTimestamp, gpuTimestamp); } _MTL_INLINE MTL::ArgumentEncoder* MTL::Device::newArgumentEncoder(const MTL::BufferBinding* bufferBinding) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newArgumentEncoderWithBufferBinding_), bufferBinding); } _MTL_INLINE bool MTL::Device::supportsCounterSampling(MTL::CounterSamplingPoint samplingPoint) { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsCounterSampling_), samplingPoint); } _MTL_INLINE bool MTL::Device::supportsVertexAmplificationCount(NS::UInteger count) { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsVertexAmplificationCount_), count); } _MTL_INLINE bool MTL::Device::supportsDynamicLibraries() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsDynamicLibraries)); } _MTL_INLINE bool MTL::Device::supportsRenderDynamicLibraries() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsRenderDynamicLibraries)); } _MTL_INLINE MTL::DynamicLibrary* MTL::Device::newDynamicLibrary(const MTL::Library* library, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newDynamicLibrary_error_), library, error); } _MTL_INLINE MTL::DynamicLibrary* MTL::Device::newDynamicLibrary(const NS::URL* url, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newDynamicLibraryWithURL_error_), url, error); } _MTL_INLINE MTL::BinaryArchive* MTL::Device::newBinaryArchive(const MTL::BinaryArchiveDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newBinaryArchiveWithDescriptor_error_), descriptor, error); } _MTL_INLINE bool MTL::Device::supportsRaytracing() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsRaytracing)); } _MTL_INLINE MTL::AccelerationStructureSizes MTL::Device::accelerationStructureSizes(const MTL::AccelerationStructureDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(accelerationStructureSizesWithDescriptor_), descriptor); } _MTL_INLINE MTL::AccelerationStructure* MTL::Device::newAccelerationStructure(NS::UInteger size) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newAccelerationStructureWithSize_), size); } _MTL_INLINE MTL::AccelerationStructure* MTL::Device::newAccelerationStructure(const MTL::AccelerationStructureDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newAccelerationStructureWithDescriptor_), descriptor); } _MTL_INLINE MTL::SizeAndAlign MTL::Device::heapAccelerationStructureSizeAndAlign(NS::UInteger size) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(heapAccelerationStructureSizeAndAlignWithSize_), size); } _MTL_INLINE MTL::SizeAndAlign MTL::Device::heapAccelerationStructureSizeAndAlign(const MTL::AccelerationStructureDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(heapAccelerationStructureSizeAndAlignWithDescriptor_), descriptor); } _MTL_INLINE bool MTL::Device::supportsFunctionPointers() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsFunctionPointers)); } _MTL_INLINE bool MTL::Device::supportsFunctionPointersFromRender() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsFunctionPointersFromRender)); } _MTL_INLINE bool MTL::Device::supportsRaytracingFromRender() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsRaytracingFromRender)); } _MTL_INLINE bool MTL::Device::supportsPrimitiveMotionBlur() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportsPrimitiveMotionBlur)); } _MTL_INLINE bool MTL::Device::shouldMaximizeConcurrentCompilation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(shouldMaximizeConcurrentCompilation)); } _MTL_INLINE void MTL::Device::setShouldMaximizeConcurrentCompilation(bool shouldMaximizeConcurrentCompilation) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setShouldMaximizeConcurrentCompilation_), shouldMaximizeConcurrentCompilation); } _MTL_INLINE NS::UInteger MTL::Device::maximumConcurrentCompilationTaskCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maximumConcurrentCompilationTaskCount)); } _MTL_INLINE MTL::ResidencySet* MTL::Device::newResidencySet(const MTL::ResidencySetDescriptor* desc, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newResidencySetWithDescriptor_error_), desc, error); } #pragma once namespace MTL { _MTL_ENUM(NS::UInteger, DynamicLibraryError) { DynamicLibraryErrorNone = 0, DynamicLibraryErrorInvalidFile = 1, DynamicLibraryErrorCompilationFailure = 2, DynamicLibraryErrorUnresolvedInstallName = 3, DynamicLibraryErrorDependencyLoadFailure = 4, DynamicLibraryErrorUnsupported = 5, }; class DynamicLibrary : public NS::Referencing { public: NS::String* label() const; void setLabel(const NS::String* label); class Device* device() const; NS::String* installName() const; bool serializeToURL(const NS::URL* url, NS::Error** error); }; } _MTL_INLINE NS::String* MTL::DynamicLibrary::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::DynamicLibrary::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::Device* MTL::DynamicLibrary::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE NS::String* MTL::DynamicLibrary::installName() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(installName)); } _MTL_INLINE bool MTL::DynamicLibrary::serializeToURL(const NS::URL* url, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(serializeToURL_error_), url, error); } #pragma once namespace MTL { class Event : public NS::Referencing { public: class Device* device() const; NS::String* label() const; void setLabel(const NS::String* label); }; class SharedEventListener : public NS::Referencing { public: static class SharedEventListener* alloc(); MTL::SharedEventListener* init(); MTL::SharedEventListener* init(const dispatch_queue_t dispatchQueue); dispatch_queue_t dispatchQueue() const; }; using SharedEventNotificationBlock = void (^)(class SharedEvent* pEvent, std::uint64_t value); using SharedEventNotificationFunction = std::function; class SharedEvent : public NS::Referencing { public: void notifyListener(const class SharedEventListener* listener, uint64_t value, const MTL::SharedEventNotificationBlock block); void notifyListener(const class SharedEventListener* listener, uint64_t value, const MTL::SharedEventNotificationFunction& function); class SharedEventHandle* newSharedEventHandle(); bool waitUntilSignaledValue(uint64_t value, uint64_t milliseconds); uint64_t signaledValue() const; void setSignaledValue(uint64_t signaledValue); }; class SharedEventHandle : public NS::SecureCoding { public: static class SharedEventHandle* alloc(); class SharedEventHandle* init(); NS::String* label() const; }; } _MTL_INLINE MTL::Device* MTL::Event::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE NS::String* MTL::Event::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::Event::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::SharedEventListener* MTL::SharedEventListener::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLSharedEventListener)); } _MTL_INLINE MTL::SharedEventListener* MTL::SharedEventListener::init() { return NS::Object::init(); } _MTL_INLINE MTL::SharedEventListener* MTL::SharedEventListener::init(const dispatch_queue_t dispatchQueue) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(initWithDispatchQueue_), dispatchQueue); } _MTL_INLINE dispatch_queue_t MTL::SharedEventListener::dispatchQueue() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(dispatchQueue)); } _MTL_INLINE void MTL::SharedEvent::notifyListener(const MTL::SharedEventListener* listener, uint64_t value, const MTL::SharedEventNotificationBlock block) { Object::sendMessage(this, _MTL_PRIVATE_SEL(notifyListener_atValue_block_), listener, value, block); } _MTL_INLINE void MTL::SharedEvent::notifyListener(const class SharedEventListener* listener, uint64_t value, const MTL::SharedEventNotificationFunction& function) { __block MTL::SharedEventNotificationFunction callback = function; notifyListener(listener, value, ^void(class SharedEvent* pEvent, std::uint64_t value){ callback(pEvent, value); }); } _MTL_INLINE MTL::SharedEventHandle* MTL::SharedEvent::newSharedEventHandle() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newSharedEventHandle)); } _MTL_INLINE bool MTL::SharedEvent::waitUntilSignaledValue(uint64_t value, uint64_t milliseconds) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(waitUntilSignaledValue_timeoutMS_), value, milliseconds); } _MTL_INLINE uint64_t MTL::SharedEvent::signaledValue() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(signaledValue)); } _MTL_INLINE void MTL::SharedEvent::setSignaledValue(uint64_t signaledValue) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSignaledValue_), signaledValue); } _MTL_INLINE MTL::SharedEventHandle* MTL::SharedEventHandle::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLSharedEventHandle)); } _MTL_INLINE MTL::SharedEventHandle* MTL::SharedEventHandle::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::SharedEventHandle::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } #pragma once namespace MTL { class Fence : public NS::Referencing { public: class Device* device() const; NS::String* label() const; void setLabel(const NS::String* label); }; } _MTL_INLINE MTL::Device* MTL::Fence::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE NS::String* MTL::Fence::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::Fence::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } #pragma once namespace MTL { class FunctionConstantValues : public NS::Copying { public: static class FunctionConstantValues* alloc(); class FunctionConstantValues* init(); void setConstantValue(const void* value, MTL::DataType type, NS::UInteger index); void setConstantValues(const void* values, MTL::DataType type, NS::Range range); void setConstantValue(const void* value, MTL::DataType type, const NS::String* name); void reset(); }; } _MTL_INLINE MTL::FunctionConstantValues* MTL::FunctionConstantValues::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLFunctionConstantValues)); } _MTL_INLINE MTL::FunctionConstantValues* MTL::FunctionConstantValues::init() { return NS::Object::init(); } _MTL_INLINE void MTL::FunctionConstantValues::setConstantValue(const void* value, MTL::DataType type, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setConstantValue_type_atIndex_), value, type, index); } _MTL_INLINE void MTL::FunctionConstantValues::setConstantValues(const void* values, MTL::DataType type, NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setConstantValues_type_withRange_), values, type, range); } _MTL_INLINE void MTL::FunctionConstantValues::setConstantValue(const void* value, MTL::DataType type, const NS::String* name) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setConstantValue_type_withName_), value, type, name); } _MTL_INLINE void MTL::FunctionConstantValues::reset() { Object::sendMessage(this, _MTL_PRIVATE_SEL(reset)); } #pragma once namespace MTL { _MTL_OPTIONS(NS::UInteger, FunctionOptions) { FunctionOptionNone = 0, FunctionOptionCompileToBinary = 1, FunctionOptionStoreFunctionInMetalScript = 2, FunctionOptionStoreFunctionInMetalPipelinesScript = 2, FunctionOptionFailOnBinaryArchiveMiss = 4, }; class FunctionDescriptor : public NS::Copying { public: static class FunctionDescriptor* alloc(); class FunctionDescriptor* init(); static class FunctionDescriptor* functionDescriptor(); NS::String* name() const; void setName(const NS::String* name); NS::String* specializedName() const; void setSpecializedName(const NS::String* specializedName); class FunctionConstantValues* constantValues() const; void setConstantValues(const class FunctionConstantValues* constantValues); MTL::FunctionOptions options() const; void setOptions(MTL::FunctionOptions options); NS::Array* binaryArchives() const; void setBinaryArchives(const NS::Array* binaryArchives); }; class IntersectionFunctionDescriptor : public NS::Copying { public: static class IntersectionFunctionDescriptor* alloc(); class IntersectionFunctionDescriptor* init(); }; } _MTL_INLINE MTL::FunctionDescriptor* MTL::FunctionDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLFunctionDescriptor)); } _MTL_INLINE MTL::FunctionDescriptor* MTL::FunctionDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::FunctionDescriptor* MTL::FunctionDescriptor::functionDescriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLFunctionDescriptor), _MTL_PRIVATE_SEL(functionDescriptor)); } _MTL_INLINE NS::String* MTL::FunctionDescriptor::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_INLINE void MTL::FunctionDescriptor::setName(const NS::String* name) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setName_), name); } _MTL_INLINE NS::String* MTL::FunctionDescriptor::specializedName() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(specializedName)); } _MTL_INLINE void MTL::FunctionDescriptor::setSpecializedName(const NS::String* specializedName) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSpecializedName_), specializedName); } _MTL_INLINE MTL::FunctionConstantValues* MTL::FunctionDescriptor::constantValues() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(constantValues)); } _MTL_INLINE void MTL::FunctionDescriptor::setConstantValues(const MTL::FunctionConstantValues* constantValues) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setConstantValues_), constantValues); } _MTL_INLINE MTL::FunctionOptions MTL::FunctionDescriptor::options() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(options)); } _MTL_INLINE void MTL::FunctionDescriptor::setOptions(MTL::FunctionOptions options) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setOptions_), options); } _MTL_INLINE NS::Array* MTL::FunctionDescriptor::binaryArchives() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(binaryArchives)); } _MTL_INLINE void MTL::FunctionDescriptor::setBinaryArchives(const NS::Array* binaryArchives) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBinaryArchives_), binaryArchives); } _MTL_INLINE MTL::IntersectionFunctionDescriptor* MTL::IntersectionFunctionDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLIntersectionFunctionDescriptor)); } _MTL_INLINE MTL::IntersectionFunctionDescriptor* MTL::IntersectionFunctionDescriptor::init() { return NS::Object::init(); } #pragma once #pragma once #include namespace MTL { _MTL_ENUM(NS::UInteger, PatchType) { PatchTypeNone = 0, PatchTypeTriangle = 1, PatchTypeQuad = 2, }; class VertexAttribute : public NS::Referencing { public: static class VertexAttribute* alloc(); class VertexAttribute* init(); NS::String* name() const; NS::UInteger attributeIndex() const; MTL::DataType attributeType() const; bool active() const; bool patchData() const; bool patchControlPointData() const; }; class Attribute : public NS::Referencing { public: static class Attribute* alloc(); class Attribute* init(); NS::String* name() const; NS::UInteger attributeIndex() const; MTL::DataType attributeType() const; bool active() const; bool patchData() const; bool patchControlPointData() const; }; _MTL_ENUM(NS::UInteger, FunctionType) { FunctionTypeVertex = 1, FunctionTypeFragment = 2, FunctionTypeKernel = 3, FunctionTypeVisible = 5, FunctionTypeIntersection = 6, FunctionTypeMesh = 7, FunctionTypeObject = 8, }; class FunctionConstant : public NS::Referencing { public: static class FunctionConstant* alloc(); class FunctionConstant* init(); NS::String* name() const; MTL::DataType type() const; NS::UInteger index() const; bool required() const; }; using AutoreleasedArgument = class Argument*; class Function : public NS::Referencing { public: NS::String* label() const; void setLabel(const NS::String* label); class Device* device() const; MTL::FunctionType functionType() const; MTL::PatchType patchType() const; NS::Integer patchControlPointCount() const; NS::Array* vertexAttributes() const; NS::Array* stageInputAttributes() const; NS::String* name() const; NS::Dictionary* functionConstantsDictionary() const; class ArgumentEncoder* newArgumentEncoder(NS::UInteger bufferIndex); class ArgumentEncoder* newArgumentEncoder(NS::UInteger bufferIndex, const MTL::AutoreleasedArgument* reflection); MTL::FunctionOptions options() const; }; _MTL_ENUM(NS::UInteger, LanguageVersion) { LanguageVersion1_0 = 65536, LanguageVersion1_1 = 65537, LanguageVersion1_2 = 65538, LanguageVersion2_0 = 131072, LanguageVersion2_1 = 131073, LanguageVersion2_2 = 131074, LanguageVersion2_3 = 131075, LanguageVersion2_4 = 131076, LanguageVersion3_0 = 196608, LanguageVersion3_1 = 196609, LanguageVersion3_2 = 196610, }; _MTL_ENUM(NS::Integer, LibraryType) { LibraryTypeExecutable = 0, LibraryTypeDynamic = 1, }; _MTL_ENUM(NS::Integer, LibraryOptimizationLevel) { LibraryOptimizationLevelDefault = 0, LibraryOptimizationLevelSize = 1, }; _MTL_ENUM(NS::Integer, CompileSymbolVisibility) { CompileSymbolVisibilityDefault = 0, CompileSymbolVisibilityHidden = 1, }; _MTL_ENUM(NS::Integer, MathMode) { MathModeSafe = 0, MathModeRelaxed = 1, MathModeFast = 2, }; _MTL_ENUM(NS::Integer, MathFloatingPointFunctions) { MathFloatingPointFunctionsFast = 0, MathFloatingPointFunctionsPrecise = 1, }; class CompileOptions : public NS::Copying { public: static class CompileOptions* alloc(); class CompileOptions* init(); NS::Dictionary* preprocessorMacros() const; void setPreprocessorMacros(const NS::Dictionary* preprocessorMacros); bool fastMathEnabled() const; void setFastMathEnabled(bool fastMathEnabled); MTL::MathMode mathMode() const; void setMathMode(MTL::MathMode mathMode); MTL::MathFloatingPointFunctions mathFloatingPointFunctions() const; void setMathFloatingPointFunctions(MTL::MathFloatingPointFunctions mathFloatingPointFunctions); MTL::LanguageVersion languageVersion() const; void setLanguageVersion(MTL::LanguageVersion languageVersion); MTL::LibraryType libraryType() const; void setLibraryType(MTL::LibraryType libraryType); NS::String* installName() const; void setInstallName(const NS::String* installName); NS::Array* libraries() const; void setLibraries(const NS::Array* libraries); bool preserveInvariance() const; void setPreserveInvariance(bool preserveInvariance); MTL::LibraryOptimizationLevel optimizationLevel() const; void setOptimizationLevel(MTL::LibraryOptimizationLevel optimizationLevel); MTL::CompileSymbolVisibility compileSymbolVisibility() const; void setCompileSymbolVisibility(MTL::CompileSymbolVisibility compileSymbolVisibility); bool allowReferencingUndefinedSymbols() const; void setAllowReferencingUndefinedSymbols(bool allowReferencingUndefinedSymbols); NS::UInteger maxTotalThreadsPerThreadgroup() const; void setMaxTotalThreadsPerThreadgroup(NS::UInteger maxTotalThreadsPerThreadgroup); bool enableLogging() const; void setEnableLogging(bool enableLogging); }; _MTL_ENUM(NS::UInteger, LibraryError) { LibraryErrorUnsupported = 1, LibraryErrorInternal = 2, LibraryErrorCompileFailure = 3, LibraryErrorCompileWarning = 4, LibraryErrorFunctionNotFound = 5, LibraryErrorFileNotFound = 6, }; class Library : public NS::Referencing { public: void newFunction(const NS::String* pFunctionName, const class FunctionConstantValues* pConstantValues, const std::function& completionHandler); void newFunction(const class FunctionDescriptor* pDescriptor, const std::function& completionHandler); void newIntersectionFunction(const class IntersectionFunctionDescriptor* pDescriptor, const std::function& completionHandler); NS::String* label() const; void setLabel(const NS::String* label); class Device* device() const; class Function* newFunction(const NS::String* functionName); class Function* newFunction(const NS::String* name, const class FunctionConstantValues* constantValues, NS::Error** error); void newFunction(const NS::String* name, const class FunctionConstantValues* constantValues, void (^completionHandler)(MTL::Function*, NS::Error*)); void newFunction(const class FunctionDescriptor* descriptor, void (^completionHandler)(MTL::Function*, NS::Error*)); class Function* newFunction(const class FunctionDescriptor* descriptor, NS::Error** error); void newIntersectionFunction(const class IntersectionFunctionDescriptor* descriptor, void (^completionHandler)(MTL::Function*, NS::Error*)); class Function* newIntersectionFunction(const class IntersectionFunctionDescriptor* descriptor, NS::Error** error); NS::Array* functionNames() const; MTL::LibraryType type() const; NS::String* installName() const; }; } _MTL_INLINE MTL::VertexAttribute* MTL::VertexAttribute::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLVertexAttribute)); } _MTL_INLINE MTL::VertexAttribute* MTL::VertexAttribute::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::VertexAttribute::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_INLINE NS::UInteger MTL::VertexAttribute::attributeIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(attributeIndex)); } _MTL_INLINE MTL::DataType MTL::VertexAttribute::attributeType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(attributeType)); } _MTL_INLINE bool MTL::VertexAttribute::active() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isActive)); } _MTL_INLINE bool MTL::VertexAttribute::patchData() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isPatchData)); } _MTL_INLINE bool MTL::VertexAttribute::patchControlPointData() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isPatchControlPointData)); } _MTL_INLINE MTL::Attribute* MTL::Attribute::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLAttribute)); } _MTL_INLINE MTL::Attribute* MTL::Attribute::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::Attribute::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_INLINE NS::UInteger MTL::Attribute::attributeIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(attributeIndex)); } _MTL_INLINE MTL::DataType MTL::Attribute::attributeType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(attributeType)); } _MTL_INLINE bool MTL::Attribute::active() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isActive)); } _MTL_INLINE bool MTL::Attribute::patchData() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isPatchData)); } _MTL_INLINE bool MTL::Attribute::patchControlPointData() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isPatchControlPointData)); } _MTL_INLINE MTL::FunctionConstant* MTL::FunctionConstant::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLFunctionConstant)); } _MTL_INLINE MTL::FunctionConstant* MTL::FunctionConstant::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::FunctionConstant::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_INLINE MTL::DataType MTL::FunctionConstant::type() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(type)); } _MTL_INLINE NS::UInteger MTL::FunctionConstant::index() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(index)); } _MTL_INLINE bool MTL::FunctionConstant::required() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(required)); } _MTL_INLINE NS::String* MTL::Function::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::Function::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::Device* MTL::Function::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE MTL::FunctionType MTL::Function::functionType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(functionType)); } _MTL_INLINE MTL::PatchType MTL::Function::patchType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(patchType)); } _MTL_INLINE NS::Integer MTL::Function::patchControlPointCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(patchControlPointCount)); } _MTL_INLINE NS::Array* MTL::Function::vertexAttributes() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexAttributes)); } _MTL_INLINE NS::Array* MTL::Function::stageInputAttributes() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stageInputAttributes)); } _MTL_INLINE NS::String* MTL::Function::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_INLINE NS::Dictionary* MTL::Function::functionConstantsDictionary() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(functionConstantsDictionary)); } _MTL_INLINE MTL::ArgumentEncoder* MTL::Function::newArgumentEncoder(NS::UInteger bufferIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newArgumentEncoderWithBufferIndex_), bufferIndex); } _MTL_INLINE MTL::ArgumentEncoder* MTL::Function::newArgumentEncoder(NS::UInteger bufferIndex, const MTL::AutoreleasedArgument* reflection) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newArgumentEncoderWithBufferIndex_reflection_), bufferIndex, reflection); } _MTL_INLINE MTL::FunctionOptions MTL::Function::options() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(options)); } _MTL_INLINE MTL::CompileOptions* MTL::CompileOptions::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLCompileOptions)); } _MTL_INLINE MTL::CompileOptions* MTL::CompileOptions::init() { return NS::Object::init(); } _MTL_INLINE NS::Dictionary* MTL::CompileOptions::preprocessorMacros() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(preprocessorMacros)); } _MTL_INLINE void MTL::CompileOptions::setPreprocessorMacros(const NS::Dictionary* preprocessorMacros) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPreprocessorMacros_), preprocessorMacros); } _MTL_INLINE bool MTL::CompileOptions::fastMathEnabled() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(fastMathEnabled)); } _MTL_INLINE void MTL::CompileOptions::setFastMathEnabled(bool fastMathEnabled) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFastMathEnabled_), fastMathEnabled); } _MTL_INLINE MTL::MathMode MTL::CompileOptions::mathMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(mathMode)); } _MTL_INLINE void MTL::CompileOptions::setMathMode(MTL::MathMode mathMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMathMode_), mathMode); } _MTL_INLINE MTL::MathFloatingPointFunctions MTL::CompileOptions::mathFloatingPointFunctions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(mathFloatingPointFunctions)); } _MTL_INLINE void MTL::CompileOptions::setMathFloatingPointFunctions(MTL::MathFloatingPointFunctions mathFloatingPointFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMathFloatingPointFunctions_), mathFloatingPointFunctions); } _MTL_INLINE MTL::LanguageVersion MTL::CompileOptions::languageVersion() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(languageVersion)); } _MTL_INLINE void MTL::CompileOptions::setLanguageVersion(MTL::LanguageVersion languageVersion) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLanguageVersion_), languageVersion); } _MTL_INLINE MTL::LibraryType MTL::CompileOptions::libraryType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(libraryType)); } _MTL_INLINE void MTL::CompileOptions::setLibraryType(MTL::LibraryType libraryType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLibraryType_), libraryType); } _MTL_INLINE NS::String* MTL::CompileOptions::installName() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(installName)); } _MTL_INLINE void MTL::CompileOptions::setInstallName(const NS::String* installName) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInstallName_), installName); } _MTL_INLINE NS::Array* MTL::CompileOptions::libraries() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(libraries)); } _MTL_INLINE void MTL::CompileOptions::setLibraries(const NS::Array* libraries) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLibraries_), libraries); } _MTL_INLINE bool MTL::CompileOptions::preserveInvariance() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(preserveInvariance)); } _MTL_INLINE void MTL::CompileOptions::setPreserveInvariance(bool preserveInvariance) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPreserveInvariance_), preserveInvariance); } _MTL_INLINE MTL::LibraryOptimizationLevel MTL::CompileOptions::optimizationLevel() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(optimizationLevel)); } _MTL_INLINE void MTL::CompileOptions::setOptimizationLevel(MTL::LibraryOptimizationLevel optimizationLevel) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setOptimizationLevel_), optimizationLevel); } _MTL_INLINE MTL::CompileSymbolVisibility MTL::CompileOptions::compileSymbolVisibility() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(compileSymbolVisibility)); } _MTL_INLINE void MTL::CompileOptions::setCompileSymbolVisibility(MTL::CompileSymbolVisibility compileSymbolVisibility) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCompileSymbolVisibility_), compileSymbolVisibility); } _MTL_INLINE bool MTL::CompileOptions::allowReferencingUndefinedSymbols() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(allowReferencingUndefinedSymbols)); } _MTL_INLINE void MTL::CompileOptions::setAllowReferencingUndefinedSymbols(bool allowReferencingUndefinedSymbols) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setAllowReferencingUndefinedSymbols_), allowReferencingUndefinedSymbols); } _MTL_INLINE NS::UInteger MTL::CompileOptions::maxTotalThreadsPerThreadgroup() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerThreadgroup)); } _MTL_INLINE void MTL::CompileOptions::setMaxTotalThreadsPerThreadgroup(NS::UInteger maxTotalThreadsPerThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxTotalThreadsPerThreadgroup_), maxTotalThreadsPerThreadgroup); } _MTL_INLINE bool MTL::CompileOptions::enableLogging() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(enableLogging)); } _MTL_INLINE void MTL::CompileOptions::setEnableLogging(bool enableLogging) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setEnableLogging_), enableLogging); } _MTL_INLINE void MTL::Library::newFunction(const NS::String* pFunctionName, const FunctionConstantValues* pConstantValues, const std::function& completionHandler) { __block std::function blockCompletionHandler = completionHandler; newFunction(pFunctionName, pConstantValues, ^(Function* pFunction, NS::Error* pError) { blockCompletionHandler(pFunction, pError); }); } _MTL_INLINE void MTL::Library::newFunction(const FunctionDescriptor* pDescriptor, const std::function& completionHandler) { __block std::function blockCompletionHandler = completionHandler; newFunction(pDescriptor, ^(Function* pFunction, NS::Error* pError) { blockCompletionHandler(pFunction, pError); }); } _MTL_INLINE void MTL::Library::newIntersectionFunction(const IntersectionFunctionDescriptor* pDescriptor, const std::function& completionHandler) { __block std::function blockCompletionHandler = completionHandler; newIntersectionFunction(pDescriptor, ^(Function* pFunction, NS::Error* pError) { blockCompletionHandler(pFunction, pError); }); } _MTL_INLINE NS::String* MTL::Library::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::Library::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::Device* MTL::Library::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE MTL::Function* MTL::Library::newFunction(const NS::String* functionName) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newFunctionWithName_), functionName); } _MTL_INLINE MTL::Function* MTL::Library::newFunction(const NS::String* name, const MTL::FunctionConstantValues* constantValues, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newFunctionWithName_constantValues_error_), name, constantValues, error); } _MTL_INLINE void MTL::Library::newFunction(const NS::String* name, const MTL::FunctionConstantValues* constantValues, void (^completionHandler)(MTL::Function*, NS::Error*)) { Object::sendMessage(this, _MTL_PRIVATE_SEL(newFunctionWithName_constantValues_completionHandler_), name, constantValues, completionHandler); } _MTL_INLINE void MTL::Library::newFunction(const MTL::FunctionDescriptor* descriptor, void (^completionHandler)(MTL::Function*, NS::Error*)) { Object::sendMessage(this, _MTL_PRIVATE_SEL(newFunctionWithDescriptor_completionHandler_), descriptor, completionHandler); } _MTL_INLINE MTL::Function* MTL::Library::newFunction(const MTL::FunctionDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newFunctionWithDescriptor_error_), descriptor, error); } _MTL_INLINE void MTL::Library::newIntersectionFunction(const MTL::IntersectionFunctionDescriptor* descriptor, void (^completionHandler)(MTL::Function*, NS::Error*)) { Object::sendMessage(this, _MTL_PRIVATE_SEL(newIntersectionFunctionWithDescriptor_completionHandler_), descriptor, completionHandler); } _MTL_INLINE MTL::Function* MTL::Library::newIntersectionFunction(const MTL::IntersectionFunctionDescriptor* descriptor, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newIntersectionFunctionWithDescriptor_error_), descriptor, error); } _MTL_INLINE NS::Array* MTL::Library::functionNames() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(functionNames)); } _MTL_INLINE MTL::LibraryType MTL::Library::type() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(type)); } _MTL_INLINE NS::String* MTL::Library::installName() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(installName)); } namespace MTL { class FunctionHandle : public NS::Referencing { public: MTL::FunctionType functionType() const; NS::String* name() const; class Device* device() const; }; } _MTL_INLINE MTL::FunctionType MTL::FunctionHandle::functionType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(functionType)); } _MTL_INLINE NS::String* MTL::FunctionHandle::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_INLINE MTL::Device* MTL::FunctionHandle::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } #pragma once namespace MTL { _MTL_ENUM(NS::UInteger, FunctionLogType) { FunctionLogTypeValidation = 0, }; class LogContainer : public NS::Referencing { public: }; class FunctionLogDebugLocation : public NS::Referencing { public: NS::String* functionName() const; NS::URL* URL() const; NS::UInteger line() const; NS::UInteger column() const; }; class FunctionLog : public NS::Referencing { public: MTL::FunctionLogType type() const; NS::String* encoderLabel() const; class Function* function() const; class FunctionLogDebugLocation* debugLocation() const; }; } _MTL_INLINE NS::String* MTL::FunctionLogDebugLocation::functionName() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(functionName)); } _MTL_INLINE NS::URL* MTL::FunctionLogDebugLocation::URL() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(URL)); } _MTL_INLINE NS::UInteger MTL::FunctionLogDebugLocation::line() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(line)); } _MTL_INLINE NS::UInteger MTL::FunctionLogDebugLocation::column() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(column)); } _MTL_INLINE MTL::FunctionLogType MTL::FunctionLog::type() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(type)); } _MTL_INLINE NS::String* MTL::FunctionLog::encoderLabel() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(encoderLabel)); } _MTL_INLINE MTL::Function* MTL::FunctionLog::function() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(function)); } _MTL_INLINE MTL::FunctionLogDebugLocation* MTL::FunctionLog::debugLocation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(debugLocation)); } #pragma once namespace MTL { _MTL_OPTIONS(NS::UInteger, StitchedLibraryOptions) { StitchedLibraryOptionNone = 0, StitchedLibraryOptionFailOnBinaryArchiveMiss = 1, StitchedLibraryOptionStoreLibraryInMetalScript = 2, StitchedLibraryOptionStoreLibraryInMetalPipelinesScript = 2 }; class FunctionStitchingAttribute : public NS::Referencing { public: }; class FunctionStitchingAttributeAlwaysInline : public NS::Referencing { public: static class FunctionStitchingAttributeAlwaysInline* alloc(); class FunctionStitchingAttributeAlwaysInline* init(); }; class FunctionStitchingNode : public NS::Copying { public: }; class FunctionStitchingInputNode : public NS::Referencing { public: static class FunctionStitchingInputNode* alloc(); class FunctionStitchingInputNode* init(); NS::UInteger argumentIndex() const; void setArgumentIndex(NS::UInteger argumentIndex); MTL::FunctionStitchingInputNode* init(NS::UInteger argument); }; class FunctionStitchingFunctionNode : public NS::Referencing { public: static class FunctionStitchingFunctionNode* alloc(); class FunctionStitchingFunctionNode* init(); NS::String* name() const; void setName(const NS::String* name); NS::Array* arguments() const; void setArguments(const NS::Array* arguments); NS::Array* controlDependencies() const; void setControlDependencies(const NS::Array* controlDependencies); MTL::FunctionStitchingFunctionNode* init(const NS::String* name, const NS::Array* arguments, const NS::Array* controlDependencies); }; class FunctionStitchingGraph : public NS::Copying { public: static class FunctionStitchingGraph* alloc(); class FunctionStitchingGraph* init(); NS::String* functionName() const; void setFunctionName(const NS::String* functionName); NS::Array* nodes() const; void setNodes(const NS::Array* nodes); class FunctionStitchingFunctionNode* outputNode() const; void setOutputNode(const class FunctionStitchingFunctionNode* outputNode); NS::Array* attributes() const; void setAttributes(const NS::Array* attributes); MTL::FunctionStitchingGraph* init(const NS::String* functionName, const NS::Array* nodes, const class FunctionStitchingFunctionNode* outputNode, const NS::Array* attributes); }; class StitchedLibraryDescriptor : public NS::Copying { public: static class StitchedLibraryDescriptor* alloc(); class StitchedLibraryDescriptor* init(); NS::Array* functionGraphs() const; void setFunctionGraphs(const NS::Array* functionGraphs); NS::Array* functions() const; void setFunctions(const NS::Array* functions); NS::Array* binaryArchives() const; void setBinaryArchives(const NS::Array* binaryArchives); MTL::StitchedLibraryOptions options() const; void setOptions(MTL::StitchedLibraryOptions options); }; } _MTL_INLINE MTL::FunctionStitchingAttributeAlwaysInline* MTL::FunctionStitchingAttributeAlwaysInline::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLFunctionStitchingAttributeAlwaysInline)); } _MTL_INLINE MTL::FunctionStitchingAttributeAlwaysInline* MTL::FunctionStitchingAttributeAlwaysInline::init() { return NS::Object::init(); } _MTL_INLINE MTL::FunctionStitchingInputNode* MTL::FunctionStitchingInputNode::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLFunctionStitchingInputNode)); } _MTL_INLINE MTL::FunctionStitchingInputNode* MTL::FunctionStitchingInputNode::init() { return NS::Object::init(); } _MTL_INLINE NS::UInteger MTL::FunctionStitchingInputNode::argumentIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(argumentIndex)); } _MTL_INLINE void MTL::FunctionStitchingInputNode::setArgumentIndex(NS::UInteger argumentIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setArgumentIndex_), argumentIndex); } _MTL_INLINE MTL::FunctionStitchingInputNode* MTL::FunctionStitchingInputNode::init(NS::UInteger argument) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(initWithArgumentIndex_), argument); } _MTL_INLINE MTL::FunctionStitchingFunctionNode* MTL::FunctionStitchingFunctionNode::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLFunctionStitchingFunctionNode)); } _MTL_INLINE MTL::FunctionStitchingFunctionNode* MTL::FunctionStitchingFunctionNode::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::FunctionStitchingFunctionNode::name() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(name)); } _MTL_INLINE void MTL::FunctionStitchingFunctionNode::setName(const NS::String* name) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setName_), name); } _MTL_INLINE NS::Array* MTL::FunctionStitchingFunctionNode::arguments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(arguments)); } _MTL_INLINE void MTL::FunctionStitchingFunctionNode::setArguments(const NS::Array* arguments) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setArguments_), arguments); } _MTL_INLINE NS::Array* MTL::FunctionStitchingFunctionNode::controlDependencies() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(controlDependencies)); } _MTL_INLINE void MTL::FunctionStitchingFunctionNode::setControlDependencies(const NS::Array* controlDependencies) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setControlDependencies_), controlDependencies); } _MTL_INLINE MTL::FunctionStitchingFunctionNode* MTL::FunctionStitchingFunctionNode::init(const NS::String* name, const NS::Array* arguments, const NS::Array* controlDependencies) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(initWithName_arguments_controlDependencies_), name, arguments, controlDependencies); } _MTL_INLINE MTL::FunctionStitchingGraph* MTL::FunctionStitchingGraph::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLFunctionStitchingGraph)); } _MTL_INLINE MTL::FunctionStitchingGraph* MTL::FunctionStitchingGraph::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::FunctionStitchingGraph::functionName() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(functionName)); } _MTL_INLINE void MTL::FunctionStitchingGraph::setFunctionName(const NS::String* functionName) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFunctionName_), functionName); } _MTL_INLINE NS::Array* MTL::FunctionStitchingGraph::nodes() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(nodes)); } _MTL_INLINE void MTL::FunctionStitchingGraph::setNodes(const NS::Array* nodes) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setNodes_), nodes); } _MTL_INLINE MTL::FunctionStitchingFunctionNode* MTL::FunctionStitchingGraph::outputNode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(outputNode)); } _MTL_INLINE void MTL::FunctionStitchingGraph::setOutputNode(const MTL::FunctionStitchingFunctionNode* outputNode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setOutputNode_), outputNode); } _MTL_INLINE NS::Array* MTL::FunctionStitchingGraph::attributes() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(attributes)); } _MTL_INLINE void MTL::FunctionStitchingGraph::setAttributes(const NS::Array* attributes) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setAttributes_), attributes); } _MTL_INLINE MTL::FunctionStitchingGraph* MTL::FunctionStitchingGraph::init(const NS::String* functionName, const NS::Array* nodes, const MTL::FunctionStitchingFunctionNode* outputNode, const NS::Array* attributes) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(initWithFunctionName_nodes_outputNode_attributes_), functionName, nodes, outputNode, attributes); } _MTL_INLINE MTL::StitchedLibraryDescriptor* MTL::StitchedLibraryDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLStitchedLibraryDescriptor)); } _MTL_INLINE MTL::StitchedLibraryDescriptor* MTL::StitchedLibraryDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::Array* MTL::StitchedLibraryDescriptor::functionGraphs() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(functionGraphs)); } _MTL_INLINE void MTL::StitchedLibraryDescriptor::setFunctionGraphs(const NS::Array* functionGraphs) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFunctionGraphs_), functionGraphs); } _MTL_INLINE NS::Array* MTL::StitchedLibraryDescriptor::functions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(functions)); } _MTL_INLINE void MTL::StitchedLibraryDescriptor::setFunctions(const NS::Array* functions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFunctions_), functions); } _MTL_INLINE NS::Array* MTL::StitchedLibraryDescriptor::binaryArchives() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(binaryArchives)); } _MTL_INLINE void MTL::StitchedLibraryDescriptor::setBinaryArchives(const NS::Array* binaryArchives) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBinaryArchives_), binaryArchives); } _MTL_INLINE MTL::StitchedLibraryOptions MTL::StitchedLibraryDescriptor::options() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(options)); } _MTL_INLINE void MTL::StitchedLibraryDescriptor::setOptions(MTL::StitchedLibraryOptions options) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setOptions_), options); } #pragma once namespace MTL { _MTL_ENUM(NS::Integer, HeapType) { HeapTypeAutomatic = 0, HeapTypePlacement = 1, HeapTypeSparse = 2, }; class HeapDescriptor : public NS::Copying { public: static class HeapDescriptor* alloc(); class HeapDescriptor* init(); NS::UInteger size() const; void setSize(NS::UInteger size); MTL::StorageMode storageMode() const; void setStorageMode(MTL::StorageMode storageMode); MTL::CPUCacheMode cpuCacheMode() const; void setCpuCacheMode(MTL::CPUCacheMode cpuCacheMode); MTL::SparsePageSize sparsePageSize() const; void setSparsePageSize(MTL::SparsePageSize sparsePageSize); MTL::HazardTrackingMode hazardTrackingMode() const; void setHazardTrackingMode(MTL::HazardTrackingMode hazardTrackingMode); MTL::ResourceOptions resourceOptions() const; void setResourceOptions(MTL::ResourceOptions resourceOptions); MTL::HeapType type() const; void setType(MTL::HeapType type); }; class Heap : public NS::Referencing { public: NS::String* label() const; void setLabel(const NS::String* label); class Device* device() const; MTL::StorageMode storageMode() const; MTL::CPUCacheMode cpuCacheMode() const; MTL::HazardTrackingMode hazardTrackingMode() const; MTL::ResourceOptions resourceOptions() const; NS::UInteger size() const; NS::UInteger usedSize() const; NS::UInteger currentAllocatedSize() const; NS::UInteger maxAvailableSize(NS::UInteger alignment); class Buffer* newBuffer(NS::UInteger length, MTL::ResourceOptions options); class Texture* newTexture(const class TextureDescriptor* descriptor); MTL::PurgeableState setPurgeableState(MTL::PurgeableState state); MTL::HeapType type() const; class Buffer* newBuffer(NS::UInteger length, MTL::ResourceOptions options, NS::UInteger offset); class Texture* newTexture(const class TextureDescriptor* descriptor, NS::UInteger offset); class AccelerationStructure* newAccelerationStructure(NS::UInteger size); class AccelerationStructure* newAccelerationStructure(const class AccelerationStructureDescriptor* descriptor); class AccelerationStructure* newAccelerationStructure(NS::UInteger size, NS::UInteger offset); class AccelerationStructure* newAccelerationStructure(const class AccelerationStructureDescriptor* descriptor, NS::UInteger offset); }; } _MTL_INLINE MTL::HeapDescriptor* MTL::HeapDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLHeapDescriptor)); } _MTL_INLINE MTL::HeapDescriptor* MTL::HeapDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::UInteger MTL::HeapDescriptor::size() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(size)); } _MTL_INLINE void MTL::HeapDescriptor::setSize(NS::UInteger size) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSize_), size); } _MTL_INLINE MTL::StorageMode MTL::HeapDescriptor::storageMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(storageMode)); } _MTL_INLINE void MTL::HeapDescriptor::setStorageMode(MTL::StorageMode storageMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStorageMode_), storageMode); } _MTL_INLINE MTL::CPUCacheMode MTL::HeapDescriptor::cpuCacheMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(cpuCacheMode)); } _MTL_INLINE void MTL::HeapDescriptor::setCpuCacheMode(MTL::CPUCacheMode cpuCacheMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCpuCacheMode_), cpuCacheMode); } _MTL_INLINE MTL::SparsePageSize MTL::HeapDescriptor::sparsePageSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sparsePageSize)); } _MTL_INLINE void MTL::HeapDescriptor::setSparsePageSize(MTL::SparsePageSize sparsePageSize) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSparsePageSize_), sparsePageSize); } _MTL_INLINE MTL::HazardTrackingMode MTL::HeapDescriptor::hazardTrackingMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(hazardTrackingMode)); } _MTL_INLINE void MTL::HeapDescriptor::setHazardTrackingMode(MTL::HazardTrackingMode hazardTrackingMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setHazardTrackingMode_), hazardTrackingMode); } _MTL_INLINE MTL::ResourceOptions MTL::HeapDescriptor::resourceOptions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(resourceOptions)); } _MTL_INLINE void MTL::HeapDescriptor::setResourceOptions(MTL::ResourceOptions resourceOptions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setResourceOptions_), resourceOptions); } _MTL_INLINE MTL::HeapType MTL::HeapDescriptor::type() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(type)); } _MTL_INLINE void MTL::HeapDescriptor::setType(MTL::HeapType type) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setType_), type); } _MTL_INLINE NS::String* MTL::Heap::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::Heap::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::Device* MTL::Heap::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE MTL::StorageMode MTL::Heap::storageMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(storageMode)); } _MTL_INLINE MTL::CPUCacheMode MTL::Heap::cpuCacheMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(cpuCacheMode)); } _MTL_INLINE MTL::HazardTrackingMode MTL::Heap::hazardTrackingMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(hazardTrackingMode)); } _MTL_INLINE MTL::ResourceOptions MTL::Heap::resourceOptions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(resourceOptions)); } _MTL_INLINE NS::UInteger MTL::Heap::size() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(size)); } _MTL_INLINE NS::UInteger MTL::Heap::usedSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(usedSize)); } _MTL_INLINE NS::UInteger MTL::Heap::currentAllocatedSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(currentAllocatedSize)); } _MTL_INLINE NS::UInteger MTL::Heap::maxAvailableSize(NS::UInteger alignment) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxAvailableSizeWithAlignment_), alignment); } _MTL_INLINE MTL::Buffer* MTL::Heap::newBuffer(NS::UInteger length, MTL::ResourceOptions options) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newBufferWithLength_options_), length, options); } _MTL_INLINE MTL::Texture* MTL::Heap::newTexture(const MTL::TextureDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newTextureWithDescriptor_), descriptor); } _MTL_INLINE MTL::PurgeableState MTL::Heap::setPurgeableState(MTL::PurgeableState state) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(setPurgeableState_), state); } _MTL_INLINE MTL::HeapType MTL::Heap::type() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(type)); } _MTL_INLINE MTL::Buffer* MTL::Heap::newBuffer(NS::UInteger length, MTL::ResourceOptions options, NS::UInteger offset) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newBufferWithLength_options_offset_), length, options, offset); } _MTL_INLINE MTL::Texture* MTL::Heap::newTexture(const MTL::TextureDescriptor* descriptor, NS::UInteger offset) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newTextureWithDescriptor_offset_), descriptor, offset); } _MTL_INLINE MTL::AccelerationStructure* MTL::Heap::newAccelerationStructure(NS::UInteger size) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newAccelerationStructureWithSize_), size); } _MTL_INLINE MTL::AccelerationStructure* MTL::Heap::newAccelerationStructure(const MTL::AccelerationStructureDescriptor* descriptor) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newAccelerationStructureWithDescriptor_), descriptor); } _MTL_INLINE MTL::AccelerationStructure* MTL::Heap::newAccelerationStructure(NS::UInteger size, NS::UInteger offset) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newAccelerationStructureWithSize_offset_), size, offset); } _MTL_INLINE MTL::AccelerationStructure* MTL::Heap::newAccelerationStructure(const MTL::AccelerationStructureDescriptor* descriptor, NS::UInteger offset) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newAccelerationStructureWithDescriptor_offset_), descriptor, offset); } #pragma once namespace MTL { _MTL_OPTIONS(NS::UInteger, IndirectCommandType) { IndirectCommandTypeDraw = 1, IndirectCommandTypeDrawIndexed = 2, IndirectCommandTypeDrawPatches = 4, IndirectCommandTypeDrawIndexedPatches = 8, IndirectCommandTypeConcurrentDispatch = 32, IndirectCommandTypeConcurrentDispatchThreads = 64, IndirectCommandTypeDrawMeshThreadgroups = 128, IndirectCommandTypeDrawMeshThreads = 256, }; struct IndirectCommandBufferExecutionRange { uint32_t location; uint32_t length; } _MTL_PACKED; class IndirectCommandBufferDescriptor : public NS::Copying { public: static class IndirectCommandBufferDescriptor* alloc(); class IndirectCommandBufferDescriptor* init(); MTL::IndirectCommandType commandTypes() const; void setCommandTypes(MTL::IndirectCommandType commandTypes); bool inheritPipelineState() const; void setInheritPipelineState(bool inheritPipelineState); bool inheritBuffers() const; void setInheritBuffers(bool inheritBuffers); NS::UInteger maxVertexBufferBindCount() const; void setMaxVertexBufferBindCount(NS::UInteger maxVertexBufferBindCount); NS::UInteger maxFragmentBufferBindCount() const; void setMaxFragmentBufferBindCount(NS::UInteger maxFragmentBufferBindCount); NS::UInteger maxKernelBufferBindCount() const; void setMaxKernelBufferBindCount(NS::UInteger maxKernelBufferBindCount); NS::UInteger maxKernelThreadgroupMemoryBindCount() const; void setMaxKernelThreadgroupMemoryBindCount(NS::UInteger maxKernelThreadgroupMemoryBindCount); NS::UInteger maxObjectBufferBindCount() const; void setMaxObjectBufferBindCount(NS::UInteger maxObjectBufferBindCount); NS::UInteger maxMeshBufferBindCount() const; void setMaxMeshBufferBindCount(NS::UInteger maxMeshBufferBindCount); NS::UInteger maxObjectThreadgroupMemoryBindCount() const; void setMaxObjectThreadgroupMemoryBindCount(NS::UInteger maxObjectThreadgroupMemoryBindCount); bool supportRayTracing() const; void setSupportRayTracing(bool supportRayTracing); bool supportDynamicAttributeStride() const; void setSupportDynamicAttributeStride(bool supportDynamicAttributeStride); }; class IndirectCommandBuffer : public NS::Referencing { public: NS::UInteger size() const; MTL::ResourceID gpuResourceID() const; void reset(NS::Range range); class IndirectRenderCommand* indirectRenderCommand(NS::UInteger commandIndex); class IndirectComputeCommand* indirectComputeCommand(NS::UInteger commandIndex); }; } _MTL_INLINE MTL::IndirectCommandBufferDescriptor* MTL::IndirectCommandBufferDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLIndirectCommandBufferDescriptor)); } _MTL_INLINE MTL::IndirectCommandBufferDescriptor* MTL::IndirectCommandBufferDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::IndirectCommandType MTL::IndirectCommandBufferDescriptor::commandTypes() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(commandTypes)); } _MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setCommandTypes(MTL::IndirectCommandType commandTypes) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCommandTypes_), commandTypes); } _MTL_INLINE bool MTL::IndirectCommandBufferDescriptor::inheritPipelineState() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(inheritPipelineState)); } _MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setInheritPipelineState(bool inheritPipelineState) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInheritPipelineState_), inheritPipelineState); } _MTL_INLINE bool MTL::IndirectCommandBufferDescriptor::inheritBuffers() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(inheritBuffers)); } _MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setInheritBuffers(bool inheritBuffers) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInheritBuffers_), inheritBuffers); } _MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxVertexBufferBindCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxVertexBufferBindCount)); } _MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxVertexBufferBindCount(NS::UInteger maxVertexBufferBindCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxVertexBufferBindCount_), maxVertexBufferBindCount); } _MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxFragmentBufferBindCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxFragmentBufferBindCount)); } _MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxFragmentBufferBindCount(NS::UInteger maxFragmentBufferBindCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxFragmentBufferBindCount_), maxFragmentBufferBindCount); } _MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxKernelBufferBindCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxKernelBufferBindCount)); } _MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxKernelBufferBindCount(NS::UInteger maxKernelBufferBindCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxKernelBufferBindCount_), maxKernelBufferBindCount); } _MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxKernelThreadgroupMemoryBindCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxKernelThreadgroupMemoryBindCount)); } _MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxKernelThreadgroupMemoryBindCount(NS::UInteger maxKernelThreadgroupMemoryBindCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxKernelThreadgroupMemoryBindCount_), maxKernelThreadgroupMemoryBindCount); } _MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxObjectBufferBindCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxObjectBufferBindCount)); } _MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxObjectBufferBindCount(NS::UInteger maxObjectBufferBindCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxObjectBufferBindCount_), maxObjectBufferBindCount); } _MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxMeshBufferBindCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxMeshBufferBindCount)); } _MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxMeshBufferBindCount(NS::UInteger maxMeshBufferBindCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxMeshBufferBindCount_), maxMeshBufferBindCount); } _MTL_INLINE NS::UInteger MTL::IndirectCommandBufferDescriptor::maxObjectThreadgroupMemoryBindCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxObjectThreadgroupMemoryBindCount)); } _MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setMaxObjectThreadgroupMemoryBindCount(NS::UInteger maxObjectThreadgroupMemoryBindCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxObjectThreadgroupMemoryBindCount_), maxObjectThreadgroupMemoryBindCount); } _MTL_INLINE bool MTL::IndirectCommandBufferDescriptor::supportRayTracing() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportRayTracing)); } _MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setSupportRayTracing(bool supportRayTracing) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSupportRayTracing_), supportRayTracing); } _MTL_INLINE bool MTL::IndirectCommandBufferDescriptor::supportDynamicAttributeStride() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportDynamicAttributeStride)); } _MTL_INLINE void MTL::IndirectCommandBufferDescriptor::setSupportDynamicAttributeStride(bool supportDynamicAttributeStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSupportDynamicAttributeStride_), supportDynamicAttributeStride); } _MTL_INLINE NS::UInteger MTL::IndirectCommandBuffer::size() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(size)); } _MTL_INLINE MTL::ResourceID MTL::IndirectCommandBuffer::gpuResourceID() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(gpuResourceID)); } _MTL_INLINE void MTL::IndirectCommandBuffer::reset(NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(resetWithRange_), range); } _MTL_INLINE MTL::IndirectRenderCommand* MTL::IndirectCommandBuffer::indirectRenderCommand(NS::UInteger commandIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indirectRenderCommandAtIndex_), commandIndex); } _MTL_INLINE MTL::IndirectComputeCommand* MTL::IndirectCommandBuffer::indirectComputeCommand(NS::UInteger commandIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(indirectComputeCommandAtIndex_), commandIndex); } #pragma once #pragma once #pragma once namespace MTL { struct ClearColor { static ClearColor Make(double red, double green, double blue, double alpha); ClearColor() = default; ClearColor(double red, double green, double blue, double alpha); double red; double green; double blue; double alpha; } _MTL_PACKED; _MTL_ENUM(NS::UInteger, LoadAction) { LoadActionDontCare = 0, LoadActionLoad = 1, LoadActionClear = 2, }; _MTL_ENUM(NS::UInteger, StoreAction) { StoreActionDontCare = 0, StoreActionStore = 1, StoreActionMultisampleResolve = 2, StoreActionStoreAndMultisampleResolve = 3, StoreActionUnknown = 4, StoreActionCustomSampleDepthStore = 5, }; _MTL_OPTIONS(NS::UInteger, StoreActionOptions) { StoreActionOptionNone = 0, StoreActionOptionCustomSamplePositions = 1, StoreActionOptionValidMask = 1, }; class RenderPassAttachmentDescriptor : public NS::Copying { public: static class RenderPassAttachmentDescriptor* alloc(); class RenderPassAttachmentDescriptor* init(); class Texture* texture() const; void setTexture(const class Texture* texture); NS::UInteger level() const; void setLevel(NS::UInteger level); NS::UInteger slice() const; void setSlice(NS::UInteger slice); NS::UInteger depthPlane() const; void setDepthPlane(NS::UInteger depthPlane); class Texture* resolveTexture() const; void setResolveTexture(const class Texture* resolveTexture); NS::UInteger resolveLevel() const; void setResolveLevel(NS::UInteger resolveLevel); NS::UInteger resolveSlice() const; void setResolveSlice(NS::UInteger resolveSlice); NS::UInteger resolveDepthPlane() const; void setResolveDepthPlane(NS::UInteger resolveDepthPlane); MTL::LoadAction loadAction() const; void setLoadAction(MTL::LoadAction loadAction); MTL::StoreAction storeAction() const; void setStoreAction(MTL::StoreAction storeAction); MTL::StoreActionOptions storeActionOptions() const; void setStoreActionOptions(MTL::StoreActionOptions storeActionOptions); }; class RenderPassColorAttachmentDescriptor : public NS::Copying { public: static class RenderPassColorAttachmentDescriptor* alloc(); class RenderPassColorAttachmentDescriptor* init(); MTL::ClearColor clearColor() const; void setClearColor(MTL::ClearColor clearColor); }; _MTL_ENUM(NS::UInteger, MultisampleDepthResolveFilter) { MultisampleDepthResolveFilterSample0 = 0, MultisampleDepthResolveFilterMin = 1, MultisampleDepthResolveFilterMax = 2, }; class RenderPassDepthAttachmentDescriptor : public NS::Copying { public: static class RenderPassDepthAttachmentDescriptor* alloc(); class RenderPassDepthAttachmentDescriptor* init(); double clearDepth() const; void setClearDepth(double clearDepth); MTL::MultisampleDepthResolveFilter depthResolveFilter() const; void setDepthResolveFilter(MTL::MultisampleDepthResolveFilter depthResolveFilter); }; _MTL_ENUM(NS::UInteger, MultisampleStencilResolveFilter) { MultisampleStencilResolveFilterSample0 = 0, MultisampleStencilResolveFilterDepthResolvedSample = 1, }; class RenderPassStencilAttachmentDescriptor : public NS::Copying { public: static class RenderPassStencilAttachmentDescriptor* alloc(); class RenderPassStencilAttachmentDescriptor* init(); uint32_t clearStencil() const; void setClearStencil(uint32_t clearStencil); MTL::MultisampleStencilResolveFilter stencilResolveFilter() const; void setStencilResolveFilter(MTL::MultisampleStencilResolveFilter stencilResolveFilter); }; class RenderPassColorAttachmentDescriptorArray : public NS::Referencing { public: static class RenderPassColorAttachmentDescriptorArray* alloc(); class RenderPassColorAttachmentDescriptorArray* init(); class RenderPassColorAttachmentDescriptor* object(NS::UInteger attachmentIndex); void setObject(const class RenderPassColorAttachmentDescriptor* attachment, NS::UInteger attachmentIndex); }; class RenderPassSampleBufferAttachmentDescriptor : public NS::Copying { public: static class RenderPassSampleBufferAttachmentDescriptor* alloc(); class RenderPassSampleBufferAttachmentDescriptor* init(); class CounterSampleBuffer* sampleBuffer() const; void setSampleBuffer(const class CounterSampleBuffer* sampleBuffer); NS::UInteger startOfVertexSampleIndex() const; void setStartOfVertexSampleIndex(NS::UInteger startOfVertexSampleIndex); NS::UInteger endOfVertexSampleIndex() const; void setEndOfVertexSampleIndex(NS::UInteger endOfVertexSampleIndex); NS::UInteger startOfFragmentSampleIndex() const; void setStartOfFragmentSampleIndex(NS::UInteger startOfFragmentSampleIndex); NS::UInteger endOfFragmentSampleIndex() const; void setEndOfFragmentSampleIndex(NS::UInteger endOfFragmentSampleIndex); }; class RenderPassSampleBufferAttachmentDescriptorArray : public NS::Referencing { public: static class RenderPassSampleBufferAttachmentDescriptorArray* alloc(); class RenderPassSampleBufferAttachmentDescriptorArray* init(); class RenderPassSampleBufferAttachmentDescriptor* object(NS::UInteger attachmentIndex); void setObject(const class RenderPassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex); }; class RenderPassDescriptor : public NS::Copying { public: static class RenderPassDescriptor* alloc(); class RenderPassDescriptor* init(); static class RenderPassDescriptor* renderPassDescriptor(); class RenderPassColorAttachmentDescriptorArray* colorAttachments() const; class RenderPassDepthAttachmentDescriptor* depthAttachment() const; void setDepthAttachment(const class RenderPassDepthAttachmentDescriptor* depthAttachment); class RenderPassStencilAttachmentDescriptor* stencilAttachment() const; void setStencilAttachment(const class RenderPassStencilAttachmentDescriptor* stencilAttachment); class Buffer* visibilityResultBuffer() const; void setVisibilityResultBuffer(const class Buffer* visibilityResultBuffer); NS::UInteger renderTargetArrayLength() const; void setRenderTargetArrayLength(NS::UInteger renderTargetArrayLength); NS::UInteger imageblockSampleLength() const; void setImageblockSampleLength(NS::UInteger imageblockSampleLength); NS::UInteger threadgroupMemoryLength() const; void setThreadgroupMemoryLength(NS::UInteger threadgroupMemoryLength); NS::UInteger tileWidth() const; void setTileWidth(NS::UInteger tileWidth); NS::UInteger tileHeight() const; void setTileHeight(NS::UInteger tileHeight); NS::UInteger defaultRasterSampleCount() const; void setDefaultRasterSampleCount(NS::UInteger defaultRasterSampleCount); NS::UInteger renderTargetWidth() const; void setRenderTargetWidth(NS::UInteger renderTargetWidth); NS::UInteger renderTargetHeight() const; void setRenderTargetHeight(NS::UInteger renderTargetHeight); void setSamplePositions(const MTL::SamplePosition* positions, NS::UInteger count); NS::UInteger getSamplePositions(MTL::SamplePosition* positions, NS::UInteger count); class RasterizationRateMap* rasterizationRateMap() const; void setRasterizationRateMap(const class RasterizationRateMap* rasterizationRateMap); class RenderPassSampleBufferAttachmentDescriptorArray* sampleBufferAttachments() const; }; } _MTL_INLINE MTL::ClearColor MTL::ClearColor::Make(double red, double green, double blue, double alpha) { return ClearColor(red, green, blue, alpha); } _MTL_INLINE MTL::ClearColor::ClearColor(double _red, double _green, double _blue, double _alpha) : red(_red) , green(_green) , blue(_blue) , alpha(_alpha) { } _MTL_INLINE MTL::RenderPassAttachmentDescriptor* MTL::RenderPassAttachmentDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRenderPassAttachmentDescriptor)); } _MTL_INLINE MTL::RenderPassAttachmentDescriptor* MTL::RenderPassAttachmentDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::Texture* MTL::RenderPassAttachmentDescriptor::texture() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(texture)); } _MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setTexture(const MTL::Texture* texture) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTexture_), texture); } _MTL_INLINE NS::UInteger MTL::RenderPassAttachmentDescriptor::level() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(level)); } _MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setLevel(NS::UInteger level) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLevel_), level); } _MTL_INLINE NS::UInteger MTL::RenderPassAttachmentDescriptor::slice() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(slice)); } _MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setSlice(NS::UInteger slice) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSlice_), slice); } _MTL_INLINE NS::UInteger MTL::RenderPassAttachmentDescriptor::depthPlane() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(depthPlane)); } _MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setDepthPlane(NS::UInteger depthPlane) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthPlane_), depthPlane); } _MTL_INLINE MTL::Texture* MTL::RenderPassAttachmentDescriptor::resolveTexture() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(resolveTexture)); } _MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setResolveTexture(const MTL::Texture* resolveTexture) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setResolveTexture_), resolveTexture); } _MTL_INLINE NS::UInteger MTL::RenderPassAttachmentDescriptor::resolveLevel() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(resolveLevel)); } _MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setResolveLevel(NS::UInteger resolveLevel) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setResolveLevel_), resolveLevel); } _MTL_INLINE NS::UInteger MTL::RenderPassAttachmentDescriptor::resolveSlice() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(resolveSlice)); } _MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setResolveSlice(NS::UInteger resolveSlice) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setResolveSlice_), resolveSlice); } _MTL_INLINE NS::UInteger MTL::RenderPassAttachmentDescriptor::resolveDepthPlane() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(resolveDepthPlane)); } _MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setResolveDepthPlane(NS::UInteger resolveDepthPlane) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setResolveDepthPlane_), resolveDepthPlane); } _MTL_INLINE MTL::LoadAction MTL::RenderPassAttachmentDescriptor::loadAction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(loadAction)); } _MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setLoadAction(MTL::LoadAction loadAction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLoadAction_), loadAction); } _MTL_INLINE MTL::StoreAction MTL::RenderPassAttachmentDescriptor::storeAction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(storeAction)); } _MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setStoreAction(MTL::StoreAction storeAction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStoreAction_), storeAction); } _MTL_INLINE MTL::StoreActionOptions MTL::RenderPassAttachmentDescriptor::storeActionOptions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(storeActionOptions)); } _MTL_INLINE void MTL::RenderPassAttachmentDescriptor::setStoreActionOptions(MTL::StoreActionOptions storeActionOptions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStoreActionOptions_), storeActionOptions); } _MTL_INLINE MTL::RenderPassColorAttachmentDescriptor* MTL::RenderPassColorAttachmentDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRenderPassColorAttachmentDescriptor)); } _MTL_INLINE MTL::RenderPassColorAttachmentDescriptor* MTL::RenderPassColorAttachmentDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::ClearColor MTL::RenderPassColorAttachmentDescriptor::clearColor() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(clearColor)); } _MTL_INLINE void MTL::RenderPassColorAttachmentDescriptor::setClearColor(MTL::ClearColor clearColor) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setClearColor_), clearColor); } _MTL_INLINE MTL::RenderPassDepthAttachmentDescriptor* MTL::RenderPassDepthAttachmentDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRenderPassDepthAttachmentDescriptor)); } _MTL_INLINE MTL::RenderPassDepthAttachmentDescriptor* MTL::RenderPassDepthAttachmentDescriptor::init() { return NS::Object::init(); } _MTL_INLINE double MTL::RenderPassDepthAttachmentDescriptor::clearDepth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(clearDepth)); } _MTL_INLINE void MTL::RenderPassDepthAttachmentDescriptor::setClearDepth(double clearDepth) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setClearDepth_), clearDepth); } _MTL_INLINE MTL::MultisampleDepthResolveFilter MTL::RenderPassDepthAttachmentDescriptor::depthResolveFilter() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(depthResolveFilter)); } _MTL_INLINE void MTL::RenderPassDepthAttachmentDescriptor::setDepthResolveFilter(MTL::MultisampleDepthResolveFilter depthResolveFilter) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthResolveFilter_), depthResolveFilter); } _MTL_INLINE MTL::RenderPassStencilAttachmentDescriptor* MTL::RenderPassStencilAttachmentDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRenderPassStencilAttachmentDescriptor)); } _MTL_INLINE MTL::RenderPassStencilAttachmentDescriptor* MTL::RenderPassStencilAttachmentDescriptor::init() { return NS::Object::init(); } _MTL_INLINE uint32_t MTL::RenderPassStencilAttachmentDescriptor::clearStencil() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(clearStencil)); } _MTL_INLINE void MTL::RenderPassStencilAttachmentDescriptor::setClearStencil(uint32_t clearStencil) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setClearStencil_), clearStencil); } _MTL_INLINE MTL::MultisampleStencilResolveFilter MTL::RenderPassStencilAttachmentDescriptor::stencilResolveFilter() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stencilResolveFilter)); } _MTL_INLINE void MTL::RenderPassStencilAttachmentDescriptor::setStencilResolveFilter(MTL::MultisampleStencilResolveFilter stencilResolveFilter) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStencilResolveFilter_), stencilResolveFilter); } _MTL_INLINE MTL::RenderPassColorAttachmentDescriptorArray* MTL::RenderPassColorAttachmentDescriptorArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRenderPassColorAttachmentDescriptorArray)); } _MTL_INLINE MTL::RenderPassColorAttachmentDescriptorArray* MTL::RenderPassColorAttachmentDescriptorArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::RenderPassColorAttachmentDescriptor* MTL::RenderPassColorAttachmentDescriptorArray::object(NS::UInteger attachmentIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex); } _MTL_INLINE void MTL::RenderPassColorAttachmentDescriptorArray::setObject(const MTL::RenderPassColorAttachmentDescriptor* attachment, NS::UInteger attachmentIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex); } _MTL_INLINE MTL::RenderPassSampleBufferAttachmentDescriptor* MTL::RenderPassSampleBufferAttachmentDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRenderPassSampleBufferAttachmentDescriptor)); } _MTL_INLINE MTL::RenderPassSampleBufferAttachmentDescriptor* MTL::RenderPassSampleBufferAttachmentDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::CounterSampleBuffer* MTL::RenderPassSampleBufferAttachmentDescriptor::sampleBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleBuffer)); } _MTL_INLINE void MTL::RenderPassSampleBufferAttachmentDescriptor::setSampleBuffer(const MTL::CounterSampleBuffer* sampleBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSampleBuffer_), sampleBuffer); } _MTL_INLINE NS::UInteger MTL::RenderPassSampleBufferAttachmentDescriptor::startOfVertexSampleIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(startOfVertexSampleIndex)); } _MTL_INLINE void MTL::RenderPassSampleBufferAttachmentDescriptor::setStartOfVertexSampleIndex(NS::UInteger startOfVertexSampleIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStartOfVertexSampleIndex_), startOfVertexSampleIndex); } _MTL_INLINE NS::UInteger MTL::RenderPassSampleBufferAttachmentDescriptor::endOfVertexSampleIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(endOfVertexSampleIndex)); } _MTL_INLINE void MTL::RenderPassSampleBufferAttachmentDescriptor::setEndOfVertexSampleIndex(NS::UInteger endOfVertexSampleIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setEndOfVertexSampleIndex_), endOfVertexSampleIndex); } _MTL_INLINE NS::UInteger MTL::RenderPassSampleBufferAttachmentDescriptor::startOfFragmentSampleIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(startOfFragmentSampleIndex)); } _MTL_INLINE void MTL::RenderPassSampleBufferAttachmentDescriptor::setStartOfFragmentSampleIndex(NS::UInteger startOfFragmentSampleIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStartOfFragmentSampleIndex_), startOfFragmentSampleIndex); } _MTL_INLINE NS::UInteger MTL::RenderPassSampleBufferAttachmentDescriptor::endOfFragmentSampleIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(endOfFragmentSampleIndex)); } _MTL_INLINE void MTL::RenderPassSampleBufferAttachmentDescriptor::setEndOfFragmentSampleIndex(NS::UInteger endOfFragmentSampleIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setEndOfFragmentSampleIndex_), endOfFragmentSampleIndex); } _MTL_INLINE MTL::RenderPassSampleBufferAttachmentDescriptorArray* MTL::RenderPassSampleBufferAttachmentDescriptorArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRenderPassSampleBufferAttachmentDescriptorArray)); } _MTL_INLINE MTL::RenderPassSampleBufferAttachmentDescriptorArray* MTL::RenderPassSampleBufferAttachmentDescriptorArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::RenderPassSampleBufferAttachmentDescriptor* MTL::RenderPassSampleBufferAttachmentDescriptorArray::object(NS::UInteger attachmentIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex); } _MTL_INLINE void MTL::RenderPassSampleBufferAttachmentDescriptorArray::setObject(const MTL::RenderPassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex); } _MTL_INLINE MTL::RenderPassDescriptor* MTL::RenderPassDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRenderPassDescriptor)); } _MTL_INLINE MTL::RenderPassDescriptor* MTL::RenderPassDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::RenderPassDescriptor* MTL::RenderPassDescriptor::renderPassDescriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLRenderPassDescriptor), _MTL_PRIVATE_SEL(renderPassDescriptor)); } _MTL_INLINE MTL::RenderPassColorAttachmentDescriptorArray* MTL::RenderPassDescriptor::colorAttachments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(colorAttachments)); } _MTL_INLINE MTL::RenderPassDepthAttachmentDescriptor* MTL::RenderPassDescriptor::depthAttachment() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(depthAttachment)); } _MTL_INLINE void MTL::RenderPassDescriptor::setDepthAttachment(const MTL::RenderPassDepthAttachmentDescriptor* depthAttachment) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthAttachment_), depthAttachment); } _MTL_INLINE MTL::RenderPassStencilAttachmentDescriptor* MTL::RenderPassDescriptor::stencilAttachment() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stencilAttachment)); } _MTL_INLINE void MTL::RenderPassDescriptor::setStencilAttachment(const MTL::RenderPassStencilAttachmentDescriptor* stencilAttachment) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStencilAttachment_), stencilAttachment); } _MTL_INLINE MTL::Buffer* MTL::RenderPassDescriptor::visibilityResultBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(visibilityResultBuffer)); } _MTL_INLINE void MTL::RenderPassDescriptor::setVisibilityResultBuffer(const MTL::Buffer* visibilityResultBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVisibilityResultBuffer_), visibilityResultBuffer); } _MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::renderTargetArrayLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(renderTargetArrayLength)); } _MTL_INLINE void MTL::RenderPassDescriptor::setRenderTargetArrayLength(NS::UInteger renderTargetArrayLength) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRenderTargetArrayLength_), renderTargetArrayLength); } _MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::imageblockSampleLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(imageblockSampleLength)); } _MTL_INLINE void MTL::RenderPassDescriptor::setImageblockSampleLength(NS::UInteger imageblockSampleLength) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setImageblockSampleLength_), imageblockSampleLength); } _MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::threadgroupMemoryLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(threadgroupMemoryLength)); } _MTL_INLINE void MTL::RenderPassDescriptor::setThreadgroupMemoryLength(NS::UInteger threadgroupMemoryLength) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setThreadgroupMemoryLength_), threadgroupMemoryLength); } _MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::tileWidth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tileWidth)); } _MTL_INLINE void MTL::RenderPassDescriptor::setTileWidth(NS::UInteger tileWidth) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileWidth_), tileWidth); } _MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::tileHeight() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tileHeight)); } _MTL_INLINE void MTL::RenderPassDescriptor::setTileHeight(NS::UInteger tileHeight) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileHeight_), tileHeight); } _MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::defaultRasterSampleCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(defaultRasterSampleCount)); } _MTL_INLINE void MTL::RenderPassDescriptor::setDefaultRasterSampleCount(NS::UInteger defaultRasterSampleCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDefaultRasterSampleCount_), defaultRasterSampleCount); } _MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::renderTargetWidth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(renderTargetWidth)); } _MTL_INLINE void MTL::RenderPassDescriptor::setRenderTargetWidth(NS::UInteger renderTargetWidth) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRenderTargetWidth_), renderTargetWidth); } _MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::renderTargetHeight() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(renderTargetHeight)); } _MTL_INLINE void MTL::RenderPassDescriptor::setRenderTargetHeight(NS::UInteger renderTargetHeight) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRenderTargetHeight_), renderTargetHeight); } _MTL_INLINE void MTL::RenderPassDescriptor::setSamplePositions(const MTL::SamplePosition* positions, NS::UInteger count) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSamplePositions_count_), positions, count); } _MTL_INLINE NS::UInteger MTL::RenderPassDescriptor::getSamplePositions(MTL::SamplePosition* positions, NS::UInteger count) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(getSamplePositions_count_), positions, count); } _MTL_INLINE MTL::RasterizationRateMap* MTL::RenderPassDescriptor::rasterizationRateMap() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(rasterizationRateMap)); } _MTL_INLINE void MTL::RenderPassDescriptor::setRasterizationRateMap(const MTL::RasterizationRateMap* rasterizationRateMap) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRasterizationRateMap_), rasterizationRateMap); } _MTL_INLINE MTL::RenderPassSampleBufferAttachmentDescriptorArray* MTL::RenderPassDescriptor::sampleBufferAttachments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleBufferAttachments)); } namespace MTL { _MTL_ENUM(NS::UInteger, PrimitiveType) { PrimitiveTypePoint = 0, PrimitiveTypeLine = 1, PrimitiveTypeLineStrip = 2, PrimitiveTypeTriangle = 3, PrimitiveTypeTriangleStrip = 4, }; _MTL_ENUM(NS::UInteger, VisibilityResultMode) { VisibilityResultModeDisabled = 0, VisibilityResultModeBoolean = 1, VisibilityResultModeCounting = 2, }; struct ScissorRect { NS::UInteger x; NS::UInteger y; NS::UInteger width; NS::UInteger height; } _MTL_PACKED; struct Viewport { double originX; double originY; double width; double height; double znear; double zfar; } _MTL_PACKED; _MTL_ENUM(NS::UInteger, CullMode) { CullModeNone = 0, CullModeFront = 1, CullModeBack = 2, }; _MTL_ENUM(NS::UInteger, Winding) { WindingClockwise = 0, WindingCounterClockwise = 1, }; _MTL_ENUM(NS::UInteger, DepthClipMode) { DepthClipModeClip = 0, DepthClipModeClamp = 1, }; _MTL_ENUM(NS::UInteger, TriangleFillMode) { TriangleFillModeFill = 0, TriangleFillModeLines = 1, }; struct DrawPrimitivesIndirectArguments { uint32_t vertexCount; uint32_t instanceCount; uint32_t vertexStart; uint32_t baseInstance; } _MTL_PACKED; struct DrawIndexedPrimitivesIndirectArguments { uint32_t indexCount; uint32_t instanceCount; uint32_t indexStart; int32_t baseVertex; uint32_t baseInstance; } _MTL_PACKED; struct VertexAmplificationViewMapping { uint32_t viewportArrayIndexOffset; uint32_t renderTargetArrayIndexOffset; } _MTL_PACKED; struct DrawPatchIndirectArguments { uint32_t patchCount; uint32_t instanceCount; uint32_t patchStart; uint32_t baseInstance; } _MTL_PACKED; struct QuadTessellationFactorsHalf { uint16_t edgeTessellationFactor[4]; uint16_t insideTessellationFactor[2]; } _MTL_PACKED; struct TriangleTessellationFactorsHalf { uint16_t edgeTessellationFactor[3]; uint16_t insideTessellationFactor; } _MTL_PACKED; _MTL_OPTIONS(NS::UInteger, RenderStages) { RenderStageVertex = 1, RenderStageFragment = 2, RenderStageTile = 4, RenderStageObject = 8, RenderStageMesh = 16, }; class RenderCommandEncoder : public NS::Referencing { public: void setRenderPipelineState(const class RenderPipelineState* pipelineState); void setVertexBytes(const void* bytes, NS::UInteger length, NS::UInteger index); void setVertexBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index); void setVertexBufferOffset(NS::UInteger offset, NS::UInteger index); void setVertexBuffers(const class Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range); void setVertexBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index); void setVertexBuffers(const class Buffer* const buffers[], const NS::UInteger* offsets, const NS::UInteger* strides, NS::Range range); void setVertexBufferOffset(NS::UInteger offset, NS::UInteger stride, NS::UInteger index); void setVertexBytes(const void* bytes, NS::UInteger length, NS::UInteger stride, NS::UInteger index); void setVertexTexture(const class Texture* texture, NS::UInteger index); void setVertexTextures(const class Texture* const textures[], NS::Range range); void setVertexSamplerState(const class SamplerState* sampler, NS::UInteger index); void setVertexSamplerStates(const class SamplerState* const samplers[], NS::Range range); void setVertexSamplerState(const class SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index); void setVertexSamplerStates(const class SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range); void setVertexVisibleFunctionTable(const class VisibleFunctionTable* functionTable, NS::UInteger bufferIndex); void setVertexVisibleFunctionTables(const class VisibleFunctionTable* const functionTables[], NS::Range range); void setVertexIntersectionFunctionTable(const class IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex); void setVertexIntersectionFunctionTables(const class IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range); void setVertexAccelerationStructure(const class AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex); void setViewport(MTL::Viewport viewport); void setViewports(const MTL::Viewport* viewports, NS::UInteger count); void setFrontFacingWinding(MTL::Winding frontFacingWinding); void setVertexAmplificationCount(NS::UInteger count, const MTL::VertexAmplificationViewMapping* viewMappings); void setCullMode(MTL::CullMode cullMode); void setDepthClipMode(MTL::DepthClipMode depthClipMode); void setDepthBias(float depthBias, float slopeScale, float clamp); void setScissorRect(MTL::ScissorRect rect); void setScissorRects(const MTL::ScissorRect* scissorRects, NS::UInteger count); void setTriangleFillMode(MTL::TriangleFillMode fillMode); void setFragmentBytes(const void* bytes, NS::UInteger length, NS::UInteger index); void setFragmentBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index); void setFragmentBufferOffset(NS::UInteger offset, NS::UInteger index); void setFragmentBuffers(const class Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range); void setFragmentTexture(const class Texture* texture, NS::UInteger index); void setFragmentTextures(const class Texture* const textures[], NS::Range range); void setFragmentSamplerState(const class SamplerState* sampler, NS::UInteger index); void setFragmentSamplerStates(const class SamplerState* const samplers[], NS::Range range); void setFragmentSamplerState(const class SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index); void setFragmentSamplerStates(const class SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range); void setFragmentVisibleFunctionTable(const class VisibleFunctionTable* functionTable, NS::UInteger bufferIndex); void setFragmentVisibleFunctionTables(const class VisibleFunctionTable* const functionTables[], NS::Range range); void setFragmentIntersectionFunctionTable(const class IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex); void setFragmentIntersectionFunctionTables(const class IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range); void setFragmentAccelerationStructure(const class AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex); void setBlendColor(float red, float green, float blue, float alpha); void setDepthStencilState(const class DepthStencilState* depthStencilState); void setStencilReferenceValue(uint32_t referenceValue); void setStencilReferenceValues(uint32_t frontReferenceValue, uint32_t backReferenceValue); void setVisibilityResultMode(MTL::VisibilityResultMode mode, NS::UInteger offset); void setColorStoreAction(MTL::StoreAction storeAction, NS::UInteger colorAttachmentIndex); void setDepthStoreAction(MTL::StoreAction storeAction); void setStencilStoreAction(MTL::StoreAction storeAction); void setColorStoreActionOptions(MTL::StoreActionOptions storeActionOptions, NS::UInteger colorAttachmentIndex); void setDepthStoreActionOptions(MTL::StoreActionOptions storeActionOptions); void setStencilStoreActionOptions(MTL::StoreActionOptions storeActionOptions); void setObjectBytes(const void* bytes, NS::UInteger length, NS::UInteger index); void setObjectBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index); void setObjectBufferOffset(NS::UInteger offset, NS::UInteger index); void setObjectBuffers(const class Buffer* const buffers[], const NS::UInteger* offsets, NS::Range range); void setObjectTexture(const class Texture* texture, NS::UInteger index); void setObjectTextures(const class Texture* const textures[], NS::Range range); void setObjectSamplerState(const class SamplerState* sampler, NS::UInteger index); void setObjectSamplerStates(const class SamplerState* const samplers[], NS::Range range); void setObjectSamplerState(const class SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index); void setObjectSamplerStates(const class SamplerState* const samplers[], const float* lodMinClamps, const float* lodMaxClamps, NS::Range range); void setObjectThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index); void setMeshBytes(const void* bytes, NS::UInteger length, NS::UInteger index); void setMeshBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index); void setMeshBufferOffset(NS::UInteger offset, NS::UInteger index); void setMeshBuffers(const class Buffer* const buffers[], const NS::UInteger* offsets, NS::Range range); void setMeshTexture(const class Texture* texture, NS::UInteger index); void setMeshTextures(const class Texture* const textures[], NS::Range range); void setMeshSamplerState(const class SamplerState* sampler, NS::UInteger index); void setMeshSamplerStates(const class SamplerState* const samplers[], NS::Range range); void setMeshSamplerState(const class SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index); void setMeshSamplerStates(const class SamplerState* const samplers[], const float* lodMinClamps, const float* lodMaxClamps, NS::Range range); void drawMeshThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup); void drawMeshThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup); void drawMeshThreadgroups(const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup); void drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount, NS::UInteger instanceCount); void drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount); void drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const class Buffer* indexBuffer, NS::UInteger indexBufferOffset, NS::UInteger instanceCount); void drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const class Buffer* indexBuffer, NS::UInteger indexBufferOffset); void drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount, NS::UInteger instanceCount, NS::UInteger baseInstance); void drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const class Buffer* indexBuffer, NS::UInteger indexBufferOffset, NS::UInteger instanceCount, NS::Integer baseVertex, NS::UInteger baseInstance); void drawPrimitives(MTL::PrimitiveType primitiveType, const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset); void drawIndexedPrimitives(MTL::PrimitiveType primitiveType, MTL::IndexType indexType, const class Buffer* indexBuffer, NS::UInteger indexBufferOffset, const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset); void textureBarrier(); void updateFence(const class Fence* fence, MTL::RenderStages stages); void waitForFence(const class Fence* fence, MTL::RenderStages stages); void setTessellationFactorBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger instanceStride); void setTessellationFactorScale(float scale); void drawPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const class Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance); void drawPatches(NS::UInteger numberOfPatchControlPoints, const class Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset); void drawIndexedPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const class Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const class Buffer* controlPointIndexBuffer, NS::UInteger controlPointIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance); void drawIndexedPatches(NS::UInteger numberOfPatchControlPoints, const class Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const class Buffer* controlPointIndexBuffer, NS::UInteger controlPointIndexBufferOffset, const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset); NS::UInteger tileWidth() const; NS::UInteger tileHeight() const; void setTileBytes(const void* bytes, NS::UInteger length, NS::UInteger index); void setTileBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index); void setTileBufferOffset(NS::UInteger offset, NS::UInteger index); void setTileBuffers(const class Buffer* const buffers[], const NS::UInteger* offsets, NS::Range range); void setTileTexture(const class Texture* texture, NS::UInteger index); void setTileTextures(const class Texture* const textures[], NS::Range range); void setTileSamplerState(const class SamplerState* sampler, NS::UInteger index); void setTileSamplerStates(const class SamplerState* const samplers[], NS::Range range); void setTileSamplerState(const class SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index); void setTileSamplerStates(const class SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range); void setTileVisibleFunctionTable(const class VisibleFunctionTable* functionTable, NS::UInteger bufferIndex); void setTileVisibleFunctionTables(const class VisibleFunctionTable* const functionTables[], NS::Range range); void setTileIntersectionFunctionTable(const class IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex); void setTileIntersectionFunctionTables(const class IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range); void setTileAccelerationStructure(const class AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex); void dispatchThreadsPerTile(MTL::Size threadsPerTile); void setThreadgroupMemoryLength(NS::UInteger length, NS::UInteger offset, NS::UInteger index); void useResource(const class Resource* resource, MTL::ResourceUsage usage); void useResources(const class Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage); void useResource(const class Resource* resource, MTL::ResourceUsage usage, MTL::RenderStages stages); void useResources(const class Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage, MTL::RenderStages stages); void useHeap(const class Heap* heap); void useHeaps(const class Heap* const heaps[], NS::UInteger count); void useHeap(const class Heap* heap, MTL::RenderStages stages); void useHeaps(const class Heap* const heaps[], NS::UInteger count, MTL::RenderStages stages); void executeCommandsInBuffer(const class IndirectCommandBuffer* indirectCommandBuffer, NS::Range executionRange); void executeCommandsInBuffer(const class IndirectCommandBuffer* indirectCommandbuffer, const class Buffer* indirectRangeBuffer, NS::UInteger indirectBufferOffset); void memoryBarrier(MTL::BarrierScope scope, MTL::RenderStages after, MTL::RenderStages before); void memoryBarrier(const class Resource* const resources[], NS::UInteger count, MTL::RenderStages after, MTL::RenderStages before); void sampleCountersInBuffer(const class CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier); }; } _MTL_INLINE void MTL::RenderCommandEncoder::setRenderPipelineState(const MTL::RenderPipelineState* pipelineState) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRenderPipelineState_), pipelineState); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexBytes(const void* bytes, NS::UInteger length, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexBytes_length_atIndex_), bytes, length, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexBuffer_offset_atIndex_), buffer, offset, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexBufferOffset(NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexBufferOffset_atIndex_), offset, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexBuffers(const MTL::Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexBuffers_offsets_withRange_), buffers, offsets, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexBuffer_offset_attributeStride_atIndex_), buffer, offset, stride, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexBuffers(const MTL::Buffer* const buffers[], const NS::UInteger* offsets, const NS::UInteger* strides, NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexBuffers_offsets_attributeStrides_withRange_), buffers, offsets, strides, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexBufferOffset(NS::UInteger offset, NS::UInteger stride, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexBufferOffset_attributeStride_atIndex_), offset, stride, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexBytes(const void* bytes, NS::UInteger length, NS::UInteger stride, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexBytes_length_attributeStride_atIndex_), bytes, length, stride, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexTexture(const MTL::Texture* texture, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexTexture_atIndex_), texture, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexTextures(const MTL::Texture* const textures[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexTextures_withRange_), textures, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexSamplerState(const MTL::SamplerState* sampler, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexSamplerState_atIndex_), sampler, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexSamplerStates_withRange_), samplers, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexSamplerState(const MTL::SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexSamplerState_lodMinClamp_lodMaxClamp_atIndex_), sampler, lodMinClamp, lodMaxClamp, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexSamplerStates(const MTL::SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexSamplerStates_lodMinClamps_lodMaxClamps_withRange_), samplers, lodMinClamps, lodMaxClamps, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexVisibleFunctionTable(const MTL::VisibleFunctionTable* functionTable, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexVisibleFunctionTable_atBufferIndex_), functionTable, bufferIndex); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexVisibleFunctionTables(const MTL::VisibleFunctionTable* const functionTables[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexVisibleFunctionTables_withBufferRange_), functionTables, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexIntersectionFunctionTable(const MTL::IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexIntersectionFunctionTable_atBufferIndex_), intersectionFunctionTable, bufferIndex); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexIntersectionFunctionTables(const MTL::IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexIntersectionFunctionTables_withBufferRange_), intersectionFunctionTables, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexAccelerationStructure(const MTL::AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexAccelerationStructure_atBufferIndex_), accelerationStructure, bufferIndex); } _MTL_INLINE void MTL::RenderCommandEncoder::setViewport(MTL::Viewport viewport) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setViewport_), viewport); } _MTL_INLINE void MTL::RenderCommandEncoder::setViewports(const MTL::Viewport* viewports, NS::UInteger count) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setViewports_count_), viewports, count); } _MTL_INLINE void MTL::RenderCommandEncoder::setFrontFacingWinding(MTL::Winding frontFacingWinding) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFrontFacingWinding_), frontFacingWinding); } _MTL_INLINE void MTL::RenderCommandEncoder::setVertexAmplificationCount(NS::UInteger count, const MTL::VertexAmplificationViewMapping* viewMappings) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexAmplificationCount_viewMappings_), count, viewMappings); } _MTL_INLINE void MTL::RenderCommandEncoder::setCullMode(MTL::CullMode cullMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCullMode_), cullMode); } _MTL_INLINE void MTL::RenderCommandEncoder::setDepthClipMode(MTL::DepthClipMode depthClipMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthClipMode_), depthClipMode); } _MTL_INLINE void MTL::RenderCommandEncoder::setDepthBias(float depthBias, float slopeScale, float clamp) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthBias_slopeScale_clamp_), depthBias, slopeScale, clamp); } _MTL_INLINE void MTL::RenderCommandEncoder::setScissorRect(MTL::ScissorRect rect) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setScissorRect_), rect); } _MTL_INLINE void MTL::RenderCommandEncoder::setScissorRects(const MTL::ScissorRect* scissorRects, NS::UInteger count) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setScissorRects_count_), scissorRects, count); } _MTL_INLINE void MTL::RenderCommandEncoder::setTriangleFillMode(MTL::TriangleFillMode fillMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTriangleFillMode_), fillMode); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentBytes(const void* bytes, NS::UInteger length, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentBytes_length_atIndex_), bytes, length, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentBuffer_offset_atIndex_), buffer, offset, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentBufferOffset(NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentBufferOffset_atIndex_), offset, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentBuffers(const MTL::Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentBuffers_offsets_withRange_), buffers, offsets, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentTexture(const MTL::Texture* texture, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentTexture_atIndex_), texture, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentTextures(const MTL::Texture* const textures[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentTextures_withRange_), textures, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentSamplerState(const MTL::SamplerState* sampler, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentSamplerState_atIndex_), sampler, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentSamplerStates_withRange_), samplers, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentSamplerState(const MTL::SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentSamplerState_lodMinClamp_lodMaxClamp_atIndex_), sampler, lodMinClamp, lodMaxClamp, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentSamplerStates(const MTL::SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentSamplerStates_lodMinClamps_lodMaxClamps_withRange_), samplers, lodMinClamps, lodMaxClamps, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentVisibleFunctionTable(const MTL::VisibleFunctionTable* functionTable, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentVisibleFunctionTable_atBufferIndex_), functionTable, bufferIndex); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentVisibleFunctionTables(const MTL::VisibleFunctionTable* const functionTables[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentVisibleFunctionTables_withBufferRange_), functionTables, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentIntersectionFunctionTable(const MTL::IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentIntersectionFunctionTable_atBufferIndex_), intersectionFunctionTable, bufferIndex); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentIntersectionFunctionTables(const MTL::IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentIntersectionFunctionTables_withBufferRange_), intersectionFunctionTables, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setFragmentAccelerationStructure(const MTL::AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentAccelerationStructure_atBufferIndex_), accelerationStructure, bufferIndex); } _MTL_INLINE void MTL::RenderCommandEncoder::setBlendColor(float red, float green, float blue, float alpha) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBlendColorRed_green_blue_alpha_), red, green, blue, alpha); } _MTL_INLINE void MTL::RenderCommandEncoder::setDepthStencilState(const MTL::DepthStencilState* depthStencilState) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthStencilState_), depthStencilState); } _MTL_INLINE void MTL::RenderCommandEncoder::setStencilReferenceValue(uint32_t referenceValue) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStencilReferenceValue_), referenceValue); } _MTL_INLINE void MTL::RenderCommandEncoder::setStencilReferenceValues(uint32_t frontReferenceValue, uint32_t backReferenceValue) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStencilFrontReferenceValue_backReferenceValue_), frontReferenceValue, backReferenceValue); } _MTL_INLINE void MTL::RenderCommandEncoder::setVisibilityResultMode(MTL::VisibilityResultMode mode, NS::UInteger offset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVisibilityResultMode_offset_), mode, offset); } _MTL_INLINE void MTL::RenderCommandEncoder::setColorStoreAction(MTL::StoreAction storeAction, NS::UInteger colorAttachmentIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setColorStoreAction_atIndex_), storeAction, colorAttachmentIndex); } _MTL_INLINE void MTL::RenderCommandEncoder::setDepthStoreAction(MTL::StoreAction storeAction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthStoreAction_), storeAction); } _MTL_INLINE void MTL::RenderCommandEncoder::setStencilStoreAction(MTL::StoreAction storeAction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStencilStoreAction_), storeAction); } _MTL_INLINE void MTL::RenderCommandEncoder::setColorStoreActionOptions(MTL::StoreActionOptions storeActionOptions, NS::UInteger colorAttachmentIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setColorStoreActionOptions_atIndex_), storeActionOptions, colorAttachmentIndex); } _MTL_INLINE void MTL::RenderCommandEncoder::setDepthStoreActionOptions(MTL::StoreActionOptions storeActionOptions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthStoreActionOptions_), storeActionOptions); } _MTL_INLINE void MTL::RenderCommandEncoder::setStencilStoreActionOptions(MTL::StoreActionOptions storeActionOptions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStencilStoreActionOptions_), storeActionOptions); } _MTL_INLINE void MTL::RenderCommandEncoder::setObjectBytes(const void* bytes, NS::UInteger length, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectBytes_length_atIndex_), bytes, length, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setObjectBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectBuffer_offset_atIndex_), buffer, offset, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setObjectBufferOffset(NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectBufferOffset_atIndex_), offset, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setObjectBuffers(const MTL::Buffer* const buffers[], const NS::UInteger* offsets, NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectBuffers_offsets_withRange_), buffers, offsets, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setObjectTexture(const MTL::Texture* texture, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectTexture_atIndex_), texture, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setObjectTextures(const MTL::Texture* const textures[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectTextures_withRange_), textures, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setObjectSamplerState(const MTL::SamplerState* sampler, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectSamplerState_atIndex_), sampler, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setObjectSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectSamplerStates_withRange_), samplers, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setObjectSamplerState(const MTL::SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectSamplerState_lodMinClamp_lodMaxClamp_atIndex_), sampler, lodMinClamp, lodMaxClamp, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setObjectSamplerStates(const MTL::SamplerState* const samplers[], const float* lodMinClamps, const float* lodMaxClamps, NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectSamplerStates_lodMinClamps_lodMaxClamps_withRange_), samplers, lodMinClamps, lodMaxClamps, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setObjectThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectThreadgroupMemoryLength_atIndex_), length, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setMeshBytes(const void* bytes, NS::UInteger length, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshBytes_length_atIndex_), bytes, length, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setMeshBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshBuffer_offset_atIndex_), buffer, offset, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setMeshBufferOffset(NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshBufferOffset_atIndex_), offset, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setMeshBuffers(const MTL::Buffer* const buffers[], const NS::UInteger* offsets, NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshBuffers_offsets_withRange_), buffers, offsets, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setMeshTexture(const MTL::Texture* texture, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshTexture_atIndex_), texture, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setMeshTextures(const MTL::Texture* const textures[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshTextures_withRange_), textures, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setMeshSamplerState(const MTL::SamplerState* sampler, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshSamplerState_atIndex_), sampler, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setMeshSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshSamplerStates_withRange_), samplers, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setMeshSamplerState(const MTL::SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshSamplerState_lodMinClamp_lodMaxClamp_atIndex_), sampler, lodMinClamp, lodMaxClamp, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setMeshSamplerStates(const MTL::SamplerState* const samplers[], const float* lodMinClamps, const float* lodMaxClamps, NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshSamplerStates_lodMinClamps_lodMaxClamps_withRange_), samplers, lodMinClamps, lodMaxClamps, range); } _MTL_INLINE void MTL::RenderCommandEncoder::drawMeshThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawMeshThreadgroups_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_), threadgroupsPerGrid, threadsPerObjectThreadgroup, threadsPerMeshThreadgroup); } _MTL_INLINE void MTL::RenderCommandEncoder::drawMeshThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawMeshThreads_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_), threadsPerGrid, threadsPerObjectThreadgroup, threadsPerMeshThreadgroup); } _MTL_INLINE void MTL::RenderCommandEncoder::drawMeshThreadgroups(const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawMeshThreadgroupsWithIndirectBuffer_indirectBufferOffset_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_), indirectBuffer, indirectBufferOffset, threadsPerObjectThreadgroup, threadsPerMeshThreadgroup); } _MTL_INLINE void MTL::RenderCommandEncoder::drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount, NS::UInteger instanceCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawPrimitives_vertexStart_vertexCount_instanceCount_), primitiveType, vertexStart, vertexCount, instanceCount); } _MTL_INLINE void MTL::RenderCommandEncoder::drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawPrimitives_vertexStart_vertexCount_), primitiveType, vertexStart, vertexCount); } _MTL_INLINE void MTL::RenderCommandEncoder::drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const MTL::Buffer* indexBuffer, NS::UInteger indexBufferOffset, NS::UInteger instanceCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_instanceCount_), primitiveType, indexCount, indexType, indexBuffer, indexBufferOffset, instanceCount); } _MTL_INLINE void MTL::RenderCommandEncoder::drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const MTL::Buffer* indexBuffer, NS::UInteger indexBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_), primitiveType, indexCount, indexType, indexBuffer, indexBufferOffset); } _MTL_INLINE void MTL::RenderCommandEncoder::drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount, NS::UInteger instanceCount, NS::UInteger baseInstance) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawPrimitives_vertexStart_vertexCount_instanceCount_baseInstance_), primitiveType, vertexStart, vertexCount, instanceCount, baseInstance); } _MTL_INLINE void MTL::RenderCommandEncoder::drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const MTL::Buffer* indexBuffer, NS::UInteger indexBufferOffset, NS::UInteger instanceCount, NS::Integer baseVertex, NS::UInteger baseInstance) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_instanceCount_baseVertex_baseInstance_), primitiveType, indexCount, indexType, indexBuffer, indexBufferOffset, instanceCount, baseVertex, baseInstance); } _MTL_INLINE void MTL::RenderCommandEncoder::drawPrimitives(MTL::PrimitiveType primitiveType, const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawPrimitives_indirectBuffer_indirectBufferOffset_), primitiveType, indirectBuffer, indirectBufferOffset); } _MTL_INLINE void MTL::RenderCommandEncoder::drawIndexedPrimitives(MTL::PrimitiveType primitiveType, MTL::IndexType indexType, const MTL::Buffer* indexBuffer, NS::UInteger indexBufferOffset, const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawIndexedPrimitives_indexType_indexBuffer_indexBufferOffset_indirectBuffer_indirectBufferOffset_), primitiveType, indexType, indexBuffer, indexBufferOffset, indirectBuffer, indirectBufferOffset); } _MTL_INLINE void MTL::RenderCommandEncoder::textureBarrier() { Object::sendMessage(this, _MTL_PRIVATE_SEL(textureBarrier)); } _MTL_INLINE void MTL::RenderCommandEncoder::updateFence(const MTL::Fence* fence, MTL::RenderStages stages) { Object::sendMessage(this, _MTL_PRIVATE_SEL(updateFence_afterStages_), fence, stages); } _MTL_INLINE void MTL::RenderCommandEncoder::waitForFence(const MTL::Fence* fence, MTL::RenderStages stages) { Object::sendMessage(this, _MTL_PRIVATE_SEL(waitForFence_beforeStages_), fence, stages); } _MTL_INLINE void MTL::RenderCommandEncoder::setTessellationFactorBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger instanceStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTessellationFactorBuffer_offset_instanceStride_), buffer, offset, instanceStride); } _MTL_INLINE void MTL::RenderCommandEncoder::setTessellationFactorScale(float scale) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTessellationFactorScale_), scale); } _MTL_INLINE void MTL::RenderCommandEncoder::drawPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const MTL::Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_instanceCount_baseInstance_), numberOfPatchControlPoints, patchStart, patchCount, patchIndexBuffer, patchIndexBufferOffset, instanceCount, baseInstance); } _MTL_INLINE void MTL::RenderCommandEncoder::drawPatches(NS::UInteger numberOfPatchControlPoints, const MTL::Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawPatches_patchIndexBuffer_patchIndexBufferOffset_indirectBuffer_indirectBufferOffset_), numberOfPatchControlPoints, patchIndexBuffer, patchIndexBufferOffset, indirectBuffer, indirectBufferOffset); } _MTL_INLINE void MTL::RenderCommandEncoder::drawIndexedPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const MTL::Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const MTL::Buffer* controlPointIndexBuffer, NS::UInteger controlPointIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawIndexedPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_controlPointIndexBuffer_controlPointIndexBufferOffset_instanceCount_baseInstance_), numberOfPatchControlPoints, patchStart, patchCount, patchIndexBuffer, patchIndexBufferOffset, controlPointIndexBuffer, controlPointIndexBufferOffset, instanceCount, baseInstance); } _MTL_INLINE void MTL::RenderCommandEncoder::drawIndexedPatches(NS::UInteger numberOfPatchControlPoints, const MTL::Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const MTL::Buffer* controlPointIndexBuffer, NS::UInteger controlPointIndexBufferOffset, const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawIndexedPatches_patchIndexBuffer_patchIndexBufferOffset_controlPointIndexBuffer_controlPointIndexBufferOffset_indirectBuffer_indirectBufferOffset_), numberOfPatchControlPoints, patchIndexBuffer, patchIndexBufferOffset, controlPointIndexBuffer, controlPointIndexBufferOffset, indirectBuffer, indirectBufferOffset); } _MTL_INLINE NS::UInteger MTL::RenderCommandEncoder::tileWidth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tileWidth)); } _MTL_INLINE NS::UInteger MTL::RenderCommandEncoder::tileHeight() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tileHeight)); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileBytes(const void* bytes, NS::UInteger length, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileBytes_length_atIndex_), bytes, length, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileBuffer_offset_atIndex_), buffer, offset, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileBufferOffset(NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileBufferOffset_atIndex_), offset, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileBuffers(const MTL::Buffer* const buffers[], const NS::UInteger* offsets, NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileBuffers_offsets_withRange_), buffers, offsets, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileTexture(const MTL::Texture* texture, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileTexture_atIndex_), texture, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileTextures(const MTL::Texture* const textures[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileTextures_withRange_), textures, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileSamplerState(const MTL::SamplerState* sampler, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileSamplerState_atIndex_), sampler, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileSamplerStates(const MTL::SamplerState* const samplers[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileSamplerStates_withRange_), samplers, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileSamplerState(const MTL::SamplerState* sampler, float lodMinClamp, float lodMaxClamp, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileSamplerState_lodMinClamp_lodMaxClamp_atIndex_), sampler, lodMinClamp, lodMaxClamp, index); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileSamplerStates(const MTL::SamplerState* const samplers[], const float lodMinClamps[], const float lodMaxClamps[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileSamplerStates_lodMinClamps_lodMaxClamps_withRange_), samplers, lodMinClamps, lodMaxClamps, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileVisibleFunctionTable(const MTL::VisibleFunctionTable* functionTable, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileVisibleFunctionTable_atBufferIndex_), functionTable, bufferIndex); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileVisibleFunctionTables(const MTL::VisibleFunctionTable* const functionTables[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileVisibleFunctionTables_withBufferRange_), functionTables, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileIntersectionFunctionTable(const MTL::IntersectionFunctionTable* intersectionFunctionTable, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileIntersectionFunctionTable_atBufferIndex_), intersectionFunctionTable, bufferIndex); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileIntersectionFunctionTables(const MTL::IntersectionFunctionTable* const intersectionFunctionTables[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileIntersectionFunctionTables_withBufferRange_), intersectionFunctionTables, range); } _MTL_INLINE void MTL::RenderCommandEncoder::setTileAccelerationStructure(const MTL::AccelerationStructure* accelerationStructure, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileAccelerationStructure_atBufferIndex_), accelerationStructure, bufferIndex); } _MTL_INLINE void MTL::RenderCommandEncoder::dispatchThreadsPerTile(MTL::Size threadsPerTile) { Object::sendMessage(this, _MTL_PRIVATE_SEL(dispatchThreadsPerTile_), threadsPerTile); } _MTL_INLINE void MTL::RenderCommandEncoder::setThreadgroupMemoryLength(NS::UInteger length, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setThreadgroupMemoryLength_offset_atIndex_), length, offset, index); } _MTL_INLINE void MTL::RenderCommandEncoder::useResource(const MTL::Resource* resource, MTL::ResourceUsage usage) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useResource_usage_), resource, usage); } _MTL_INLINE void MTL::RenderCommandEncoder::useResources(const MTL::Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useResources_count_usage_), resources, count, usage); } _MTL_INLINE void MTL::RenderCommandEncoder::useResource(const MTL::Resource* resource, MTL::ResourceUsage usage, MTL::RenderStages stages) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useResource_usage_stages_), resource, usage, stages); } _MTL_INLINE void MTL::RenderCommandEncoder::useResources(const MTL::Resource* const resources[], NS::UInteger count, MTL::ResourceUsage usage, MTL::RenderStages stages) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useResources_count_usage_stages_), resources, count, usage, stages); } _MTL_INLINE void MTL::RenderCommandEncoder::useHeap(const MTL::Heap* heap) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useHeap_), heap); } _MTL_INLINE void MTL::RenderCommandEncoder::useHeaps(const MTL::Heap* const heaps[], NS::UInteger count) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useHeaps_count_), heaps, count); } _MTL_INLINE void MTL::RenderCommandEncoder::useHeap(const MTL::Heap* heap, MTL::RenderStages stages) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useHeap_stages_), heap, stages); } _MTL_INLINE void MTL::RenderCommandEncoder::useHeaps(const MTL::Heap* const heaps[], NS::UInteger count, MTL::RenderStages stages) { Object::sendMessage(this, _MTL_PRIVATE_SEL(useHeaps_count_stages_), heaps, count, stages); } _MTL_INLINE void MTL::RenderCommandEncoder::executeCommandsInBuffer(const MTL::IndirectCommandBuffer* indirectCommandBuffer, NS::Range executionRange) { Object::sendMessage(this, _MTL_PRIVATE_SEL(executeCommandsInBuffer_withRange_), indirectCommandBuffer, executionRange); } _MTL_INLINE void MTL::RenderCommandEncoder::executeCommandsInBuffer(const MTL::IndirectCommandBuffer* indirectCommandbuffer, const MTL::Buffer* indirectRangeBuffer, NS::UInteger indirectBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(executeCommandsInBuffer_indirectBuffer_indirectBufferOffset_), indirectCommandbuffer, indirectRangeBuffer, indirectBufferOffset); } _MTL_INLINE void MTL::RenderCommandEncoder::memoryBarrier(MTL::BarrierScope scope, MTL::RenderStages after, MTL::RenderStages before) { Object::sendMessage(this, _MTL_PRIVATE_SEL(memoryBarrierWithScope_afterStages_beforeStages_), scope, after, before); } _MTL_INLINE void MTL::RenderCommandEncoder::memoryBarrier(const MTL::Resource* const resources[], NS::UInteger count, MTL::RenderStages after, MTL::RenderStages before) { Object::sendMessage(this, _MTL_PRIVATE_SEL(memoryBarrierWithResources_count_afterStages_beforeStages_), resources, count, after, before); } _MTL_INLINE void MTL::RenderCommandEncoder::sampleCountersInBuffer(const MTL::CounterSampleBuffer* sampleBuffer, NS::UInteger sampleIndex, bool barrier) { Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleCountersInBuffer_atSampleIndex_withBarrier_), sampleBuffer, sampleIndex, barrier); } namespace MTL { class IndirectRenderCommand : public NS::Referencing { public: void setRenderPipelineState(const class RenderPipelineState* pipelineState); void setVertexBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index); void setFragmentBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index); void setVertexBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index); void drawPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const class Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance, const class Buffer* buffer, NS::UInteger offset, NS::UInteger instanceStride); void drawIndexedPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const class Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const class Buffer* controlPointIndexBuffer, NS::UInteger controlPointIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance, const class Buffer* buffer, NS::UInteger offset, NS::UInteger instanceStride); void drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount, NS::UInteger instanceCount, NS::UInteger baseInstance); void drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const class Buffer* indexBuffer, NS::UInteger indexBufferOffset, NS::UInteger instanceCount, NS::Integer baseVertex, NS::UInteger baseInstance); void setObjectThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index); void setObjectBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index); void setMeshBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index); void drawMeshThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup); void drawMeshThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup); void setBarrier(); void clearBarrier(); void reset(); }; class IndirectComputeCommand : public NS::Referencing { public: void setComputePipelineState(const class ComputePipelineState* pipelineState); void setKernelBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index); void setKernelBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index); void concurrentDispatchThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerThreadgroup); void concurrentDispatchThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerThreadgroup); void setBarrier(); void clearBarrier(); void setImageblockWidth(NS::UInteger width, NS::UInteger height); void reset(); void setThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index); void setStageInRegion(MTL::Region region); }; } _MTL_INLINE void MTL::IndirectRenderCommand::setRenderPipelineState(const MTL::RenderPipelineState* pipelineState) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRenderPipelineState_), pipelineState); } _MTL_INLINE void MTL::IndirectRenderCommand::setVertexBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexBuffer_offset_atIndex_), buffer, offset, index); } _MTL_INLINE void MTL::IndirectRenderCommand::setFragmentBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentBuffer_offset_atIndex_), buffer, offset, index); } _MTL_INLINE void MTL::IndirectRenderCommand::setVertexBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexBuffer_offset_attributeStride_atIndex_), buffer, offset, stride, index); } _MTL_INLINE void MTL::IndirectRenderCommand::drawPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const MTL::Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance, const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger instanceStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_instanceCount_baseInstance_tessellationFactorBuffer_tessellationFactorBufferOffset_tessellationFactorBufferInstanceStride_), numberOfPatchControlPoints, patchStart, patchCount, patchIndexBuffer, patchIndexBufferOffset, instanceCount, baseInstance, buffer, offset, instanceStride); } _MTL_INLINE void MTL::IndirectRenderCommand::drawIndexedPatches(NS::UInteger numberOfPatchControlPoints, NS::UInteger patchStart, NS::UInteger patchCount, const MTL::Buffer* patchIndexBuffer, NS::UInteger patchIndexBufferOffset, const MTL::Buffer* controlPointIndexBuffer, NS::UInteger controlPointIndexBufferOffset, NS::UInteger instanceCount, NS::UInteger baseInstance, const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger instanceStride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawIndexedPatches_patchStart_patchCount_patchIndexBuffer_patchIndexBufferOffset_controlPointIndexBuffer_controlPointIndexBufferOffset_instanceCount_baseInstance_tessellationFactorBuffer_tessellationFactorBufferOffset_tessellationFactorBufferInstanceStride_), numberOfPatchControlPoints, patchStart, patchCount, patchIndexBuffer, patchIndexBufferOffset, controlPointIndexBuffer, controlPointIndexBufferOffset, instanceCount, baseInstance, buffer, offset, instanceStride); } _MTL_INLINE void MTL::IndirectRenderCommand::drawPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger vertexStart, NS::UInteger vertexCount, NS::UInteger instanceCount, NS::UInteger baseInstance) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawPrimitives_vertexStart_vertexCount_instanceCount_baseInstance_), primitiveType, vertexStart, vertexCount, instanceCount, baseInstance); } _MTL_INLINE void MTL::IndirectRenderCommand::drawIndexedPrimitives(MTL::PrimitiveType primitiveType, NS::UInteger indexCount, MTL::IndexType indexType, const MTL::Buffer* indexBuffer, NS::UInteger indexBufferOffset, NS::UInteger instanceCount, NS::Integer baseVertex, NS::UInteger baseInstance) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_instanceCount_baseVertex_baseInstance_), primitiveType, indexCount, indexType, indexBuffer, indexBufferOffset, instanceCount, baseVertex, baseInstance); } _MTL_INLINE void MTL::IndirectRenderCommand::setObjectThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectThreadgroupMemoryLength_atIndex_), length, index); } _MTL_INLINE void MTL::IndirectRenderCommand::setObjectBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectBuffer_offset_atIndex_), buffer, offset, index); } _MTL_INLINE void MTL::IndirectRenderCommand::setMeshBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshBuffer_offset_atIndex_), buffer, offset, index); } _MTL_INLINE void MTL::IndirectRenderCommand::drawMeshThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawMeshThreadgroups_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_), threadgroupsPerGrid, threadsPerObjectThreadgroup, threadsPerMeshThreadgroup); } _MTL_INLINE void MTL::IndirectRenderCommand::drawMeshThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerObjectThreadgroup, MTL::Size threadsPerMeshThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(drawMeshThreads_threadsPerObjectThreadgroup_threadsPerMeshThreadgroup_), threadsPerGrid, threadsPerObjectThreadgroup, threadsPerMeshThreadgroup); } _MTL_INLINE void MTL::IndirectRenderCommand::setBarrier() { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBarrier)); } _MTL_INLINE void MTL::IndirectRenderCommand::clearBarrier() { Object::sendMessage(this, _MTL_PRIVATE_SEL(clearBarrier)); } _MTL_INLINE void MTL::IndirectRenderCommand::reset() { Object::sendMessage(this, _MTL_PRIVATE_SEL(reset)); } _MTL_INLINE void MTL::IndirectComputeCommand::setComputePipelineState(const MTL::ComputePipelineState* pipelineState) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setComputePipelineState_), pipelineState); } _MTL_INLINE void MTL::IndirectComputeCommand::setKernelBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setKernelBuffer_offset_atIndex_), buffer, offset, index); } _MTL_INLINE void MTL::IndirectComputeCommand::setKernelBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger stride, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setKernelBuffer_offset_attributeStride_atIndex_), buffer, offset, stride, index); } _MTL_INLINE void MTL::IndirectComputeCommand::concurrentDispatchThreadgroups(MTL::Size threadgroupsPerGrid, MTL::Size threadsPerThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(concurrentDispatchThreadgroups_threadsPerThreadgroup_), threadgroupsPerGrid, threadsPerThreadgroup); } _MTL_INLINE void MTL::IndirectComputeCommand::concurrentDispatchThreads(MTL::Size threadsPerGrid, MTL::Size threadsPerThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(concurrentDispatchThreads_threadsPerThreadgroup_), threadsPerGrid, threadsPerThreadgroup); } _MTL_INLINE void MTL::IndirectComputeCommand::setBarrier() { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBarrier)); } _MTL_INLINE void MTL::IndirectComputeCommand::clearBarrier() { Object::sendMessage(this, _MTL_PRIVATE_SEL(clearBarrier)); } _MTL_INLINE void MTL::IndirectComputeCommand::setImageblockWidth(NS::UInteger width, NS::UInteger height) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setImageblockWidth_height_), width, height); } _MTL_INLINE void MTL::IndirectComputeCommand::reset() { Object::sendMessage(this, _MTL_PRIVATE_SEL(reset)); } _MTL_INLINE void MTL::IndirectComputeCommand::setThreadgroupMemoryLength(NS::UInteger length, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setThreadgroupMemoryLength_atIndex_), length, index); } _MTL_INLINE void MTL::IndirectComputeCommand::setStageInRegion(MTL::Region region) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStageInRegion_), region); } #pragma once namespace MTL { _MTL_OPTIONS(NS::UInteger, IntersectionFunctionSignature) { IntersectionFunctionSignatureNone = 0, IntersectionFunctionSignatureInstancing = 1, IntersectionFunctionSignatureTriangleData = 2, IntersectionFunctionSignatureWorldSpaceData = 4, IntersectionFunctionSignatureInstanceMotion = 8, IntersectionFunctionSignaturePrimitiveMotion = 16, IntersectionFunctionSignatureExtendedLimits = 32, IntersectionFunctionSignatureMaxLevels = 64, IntersectionFunctionSignatureCurveData = 128, }; class IntersectionFunctionTableDescriptor : public NS::Copying { public: static class IntersectionFunctionTableDescriptor* alloc(); class IntersectionFunctionTableDescriptor* init(); static class IntersectionFunctionTableDescriptor* intersectionFunctionTableDescriptor(); NS::UInteger functionCount() const; void setFunctionCount(NS::UInteger functionCount); }; class IntersectionFunctionTable : public NS::Referencing { public: void setBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger index); void setBuffers(const class Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range); MTL::ResourceID gpuResourceID() const; void setFunction(const class FunctionHandle* function, NS::UInteger index); void setFunctions(const class FunctionHandle* const functions[], NS::Range range); void setOpaqueTriangleIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::UInteger index); void setOpaqueTriangleIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::Range range); void setOpaqueCurveIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::UInteger index); void setOpaqueCurveIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::Range range); void setVisibleFunctionTable(const class VisibleFunctionTable* functionTable, NS::UInteger bufferIndex); void setVisibleFunctionTables(const class VisibleFunctionTable* const functionTables[], NS::Range bufferRange); }; } _MTL_INLINE MTL::IntersectionFunctionTableDescriptor* MTL::IntersectionFunctionTableDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLIntersectionFunctionTableDescriptor)); } _MTL_INLINE MTL::IntersectionFunctionTableDescriptor* MTL::IntersectionFunctionTableDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::IntersectionFunctionTableDescriptor* MTL::IntersectionFunctionTableDescriptor::intersectionFunctionTableDescriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLIntersectionFunctionTableDescriptor), _MTL_PRIVATE_SEL(intersectionFunctionTableDescriptor)); } _MTL_INLINE NS::UInteger MTL::IntersectionFunctionTableDescriptor::functionCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(functionCount)); } _MTL_INLINE void MTL::IntersectionFunctionTableDescriptor::setFunctionCount(NS::UInteger functionCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFunctionCount_), functionCount); } _MTL_INLINE void MTL::IntersectionFunctionTable::setBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBuffer_offset_atIndex_), buffer, offset, index); } _MTL_INLINE void MTL::IntersectionFunctionTable::setBuffers(const MTL::Buffer* const buffers[], const NS::UInteger offsets[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBuffers_offsets_withRange_), buffers, offsets, range); } _MTL_INLINE MTL::ResourceID MTL::IntersectionFunctionTable::gpuResourceID() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(gpuResourceID)); } _MTL_INLINE void MTL::IntersectionFunctionTable::setFunction(const MTL::FunctionHandle* function, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFunction_atIndex_), function, index); } _MTL_INLINE void MTL::IntersectionFunctionTable::setFunctions(const MTL::FunctionHandle* const functions[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFunctions_withRange_), functions, range); } _MTL_INLINE void MTL::IntersectionFunctionTable::setOpaqueTriangleIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setOpaqueTriangleIntersectionFunctionWithSignature_atIndex_), signature, index); } _MTL_INLINE void MTL::IntersectionFunctionTable::setOpaqueTriangleIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setOpaqueTriangleIntersectionFunctionWithSignature_withRange_), signature, range); } _MTL_INLINE void MTL::IntersectionFunctionTable::setOpaqueCurveIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setOpaqueCurveIntersectionFunctionWithSignature_atIndex_), signature, index); } _MTL_INLINE void MTL::IntersectionFunctionTable::setOpaqueCurveIntersectionFunction(MTL::IntersectionFunctionSignature signature, NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setOpaqueCurveIntersectionFunctionWithSignature_withRange_), signature, range); } _MTL_INLINE void MTL::IntersectionFunctionTable::setVisibleFunctionTable(const MTL::VisibleFunctionTable* functionTable, NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVisibleFunctionTable_atBufferIndex_), functionTable, bufferIndex); } _MTL_INLINE void MTL::IntersectionFunctionTable::setVisibleFunctionTables(const MTL::VisibleFunctionTable* const functionTables[], NS::Range bufferRange) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVisibleFunctionTables_withBufferRange_), functionTables, bufferRange); } #pragma once namespace MTL { _MTL_ENUM(NS::Integer, IOStatus) { IOStatusPending = 0, IOStatusCancelled = 1, IOStatusError = 2, IOStatusComplete = 3, }; using IOCommandBufferHandler = void (^)(class IOCommandBuffer*); using IOCommandBufferHandlerFunction = std::function; class IOCommandBuffer : public NS::Referencing { public: void addCompletedHandler(const MTL::IOCommandBufferHandlerFunction& function); void addCompletedHandler(const MTL::IOCommandBufferHandler block); void loadBytes(const void* pointer, NS::UInteger size, const class IOFileHandle* sourceHandle, NS::UInteger sourceHandleOffset); void loadBuffer(const class Buffer* buffer, NS::UInteger offset, NS::UInteger size, const class IOFileHandle* sourceHandle, NS::UInteger sourceHandleOffset); void loadTexture(const class Texture* texture, NS::UInteger slice, NS::UInteger level, MTL::Size size, NS::UInteger sourceBytesPerRow, NS::UInteger sourceBytesPerImage, MTL::Origin destinationOrigin, const class IOFileHandle* sourceHandle, NS::UInteger sourceHandleOffset); void copyStatusToBuffer(const class Buffer* buffer, NS::UInteger offset); void commit(); void waitUntilCompleted(); void tryCancel(); void addBarrier(); void pushDebugGroup(const NS::String* string); void popDebugGroup(); void enqueue(); void wait(const class SharedEvent* event, uint64_t value); void signalEvent(const class SharedEvent* event, uint64_t value); NS::String* label() const; void setLabel(const NS::String* label); MTL::IOStatus status() const; NS::Error* error() const; }; } _MTL_INLINE void MTL::IOCommandBuffer::addCompletedHandler(const MTL::IOCommandBufferHandlerFunction& function) { __block IOCommandBufferHandlerFunction blockFunction = function; addCompletedHandler(^(IOCommandBuffer* pCommandBuffer) { blockFunction(pCommandBuffer); }); } _MTL_INLINE void MTL::IOCommandBuffer::addCompletedHandler(const MTL::IOCommandBufferHandler block) { Object::sendMessage(this, _MTL_PRIVATE_SEL(addCompletedHandler_), block); } _MTL_INLINE void MTL::IOCommandBuffer::loadBytes(const void* pointer, NS::UInteger size, const MTL::IOFileHandle* sourceHandle, NS::UInteger sourceHandleOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(loadBytes_size_sourceHandle_sourceHandleOffset_), pointer, size, sourceHandle, sourceHandleOffset); } _MTL_INLINE void MTL::IOCommandBuffer::loadBuffer(const MTL::Buffer* buffer, NS::UInteger offset, NS::UInteger size, const MTL::IOFileHandle* sourceHandle, NS::UInteger sourceHandleOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(loadBuffer_offset_size_sourceHandle_sourceHandleOffset_), buffer, offset, size, sourceHandle, sourceHandleOffset); } _MTL_INLINE void MTL::IOCommandBuffer::loadTexture(const MTL::Texture* texture, NS::UInteger slice, NS::UInteger level, MTL::Size size, NS::UInteger sourceBytesPerRow, NS::UInteger sourceBytesPerImage, MTL::Origin destinationOrigin, const MTL::IOFileHandle* sourceHandle, NS::UInteger sourceHandleOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(loadTexture_slice_level_size_sourceBytesPerRow_sourceBytesPerImage_destinationOrigin_sourceHandle_sourceHandleOffset_), texture, slice, level, size, sourceBytesPerRow, sourceBytesPerImage, destinationOrigin, sourceHandle, sourceHandleOffset); } _MTL_INLINE void MTL::IOCommandBuffer::copyStatusToBuffer(const MTL::Buffer* buffer, NS::UInteger offset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(copyStatusToBuffer_offset_), buffer, offset); } _MTL_INLINE void MTL::IOCommandBuffer::commit() { Object::sendMessage(this, _MTL_PRIVATE_SEL(commit)); } _MTL_INLINE void MTL::IOCommandBuffer::waitUntilCompleted() { Object::sendMessage(this, _MTL_PRIVATE_SEL(waitUntilCompleted)); } _MTL_INLINE void MTL::IOCommandBuffer::tryCancel() { Object::sendMessage(this, _MTL_PRIVATE_SEL(tryCancel)); } _MTL_INLINE void MTL::IOCommandBuffer::addBarrier() { Object::sendMessage(this, _MTL_PRIVATE_SEL(addBarrier)); } _MTL_INLINE void MTL::IOCommandBuffer::pushDebugGroup(const NS::String* string) { Object::sendMessage(this, _MTL_PRIVATE_SEL(pushDebugGroup_), string); } _MTL_INLINE void MTL::IOCommandBuffer::popDebugGroup() { Object::sendMessage(this, _MTL_PRIVATE_SEL(popDebugGroup)); } _MTL_INLINE void MTL::IOCommandBuffer::enqueue() { Object::sendMessage(this, _MTL_PRIVATE_SEL(enqueue)); } _MTL_INLINE void MTL::IOCommandBuffer::wait(const MTL::SharedEvent* event, uint64_t value) { Object::sendMessage(this, _MTL_PRIVATE_SEL(waitForEvent_value_), event, value); } _MTL_INLINE void MTL::IOCommandBuffer::signalEvent(const MTL::SharedEvent* event, uint64_t value) { Object::sendMessage(this, _MTL_PRIVATE_SEL(signalEvent_value_), event, value); } _MTL_INLINE NS::String* MTL::IOCommandBuffer::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::IOCommandBuffer::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::IOStatus MTL::IOCommandBuffer::status() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(status)); } _MTL_INLINE NS::Error* MTL::IOCommandBuffer::error() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(error)); } #pragma once namespace MTL { _MTL_ENUM(NS::Integer, IOPriority) { IOPriorityHigh = 0, IOPriorityNormal = 1, IOPriorityLow = 2, }; _MTL_ENUM(NS::Integer, IOCommandQueueType) { IOCommandQueueTypeConcurrent = 0, IOCommandQueueTypeSerial = 1, }; _MTL_CONST(NS::ErrorDomain, IOErrorDomain); _MTL_ENUM(NS::Integer, IOError) { IOErrorURLInvalid = 1, IOErrorInternal = 2, }; class IOCommandQueue : public NS::Referencing { public: void enqueueBarrier(); class IOCommandBuffer* commandBuffer(); class IOCommandBuffer* commandBufferWithUnretainedReferences(); NS::String* label() const; void setLabel(const NS::String* label); }; class IOScratchBuffer : public NS::Referencing { public: class Buffer* buffer() const; }; class IOScratchBufferAllocator : public NS::Referencing { public: class IOScratchBuffer* newScratchBuffer(NS::UInteger minimumSize); }; class IOCommandQueueDescriptor : public NS::Copying { public: static class IOCommandQueueDescriptor* alloc(); class IOCommandQueueDescriptor* init(); NS::UInteger maxCommandBufferCount() const; void setMaxCommandBufferCount(NS::UInteger maxCommandBufferCount); MTL::IOPriority priority() const; void setPriority(MTL::IOPriority priority); MTL::IOCommandQueueType type() const; void setType(MTL::IOCommandQueueType type); NS::UInteger maxCommandsInFlight() const; void setMaxCommandsInFlight(NS::UInteger maxCommandsInFlight); class IOScratchBufferAllocator* scratchBufferAllocator() const; void setScratchBufferAllocator(const class IOScratchBufferAllocator* scratchBufferAllocator); }; class IOFileHandle : public NS::Referencing { public: NS::String* label() const; void setLabel(const NS::String* label); }; } _MTL_PRIVATE_DEF_WEAK_CONST(NS::ErrorDomain, IOErrorDomain); _MTL_INLINE void MTL::IOCommandQueue::enqueueBarrier() { Object::sendMessage(this, _MTL_PRIVATE_SEL(enqueueBarrier)); } _MTL_INLINE MTL::IOCommandBuffer* MTL::IOCommandQueue::commandBuffer() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(commandBuffer)); } _MTL_INLINE MTL::IOCommandBuffer* MTL::IOCommandQueue::commandBufferWithUnretainedReferences() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(commandBufferWithUnretainedReferences)); } _MTL_INLINE NS::String* MTL::IOCommandQueue::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::IOCommandQueue::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::Buffer* MTL::IOScratchBuffer::buffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(buffer)); } _MTL_INLINE MTL::IOScratchBuffer* MTL::IOScratchBufferAllocator::newScratchBuffer(NS::UInteger minimumSize) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newScratchBufferWithMinimumSize_), minimumSize); } _MTL_INLINE MTL::IOCommandQueueDescriptor* MTL::IOCommandQueueDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLIOCommandQueueDescriptor)); } _MTL_INLINE MTL::IOCommandQueueDescriptor* MTL::IOCommandQueueDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::UInteger MTL::IOCommandQueueDescriptor::maxCommandBufferCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxCommandBufferCount)); } _MTL_INLINE void MTL::IOCommandQueueDescriptor::setMaxCommandBufferCount(NS::UInteger maxCommandBufferCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxCommandBufferCount_), maxCommandBufferCount); } _MTL_INLINE MTL::IOPriority MTL::IOCommandQueueDescriptor::priority() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(priority)); } _MTL_INLINE void MTL::IOCommandQueueDescriptor::setPriority(MTL::IOPriority priority) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPriority_), priority); } _MTL_INLINE MTL::IOCommandQueueType MTL::IOCommandQueueDescriptor::type() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(type)); } _MTL_INLINE void MTL::IOCommandQueueDescriptor::setType(MTL::IOCommandQueueType type) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setType_), type); } _MTL_INLINE NS::UInteger MTL::IOCommandQueueDescriptor::maxCommandsInFlight() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxCommandsInFlight)); } _MTL_INLINE void MTL::IOCommandQueueDescriptor::setMaxCommandsInFlight(NS::UInteger maxCommandsInFlight) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxCommandsInFlight_), maxCommandsInFlight); } _MTL_INLINE MTL::IOScratchBufferAllocator* MTL::IOCommandQueueDescriptor::scratchBufferAllocator() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(scratchBufferAllocator)); } _MTL_INLINE void MTL::IOCommandQueueDescriptor::setScratchBufferAllocator(const MTL::IOScratchBufferAllocator* scratchBufferAllocator) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setScratchBufferAllocator_), scratchBufferAllocator); } _MTL_INLINE NS::String* MTL::IOFileHandle::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::IOFileHandle::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } #pragma once namespace MTL { using IOCompresionContext=void*; _MTL_ENUM(NS::Integer, IOCompressionStatus) { IOCompressionStatusComplete = 0, IOCompressionStatusError = 1, }; size_t IOCompressionContextDefaultChunkSize(); IOCompresionContext IOCreateCompressionContext(const char* path, IOCompressionMethod type, size_t chunkSize); void IOCompressionContextAppendData(IOCompresionContext context, const void* data, size_t size); IOCompressionStatus IOFlushAndDestroyCompressionContext(IOCompresionContext context); } #if defined(MTL_PRIVATE_IMPLEMENTATION) namespace MTL::Private { MTL_DEF_FUNC(MTLIOCompressionContextDefaultChunkSize, size_t (*)(void)); MTL_DEF_FUNC( MTLIOCreateCompressionContext, void* (*)(const char*, MTL::IOCompressionMethod, size_t) ); MTL_DEF_FUNC( MTLIOCompressionContextAppendData, void (*)(void*, const void*, size_t) ); MTL_DEF_FUNC( MTLIOFlushAndDestroyCompressionContext, MTL::IOCompressionStatus (*)(void*) ); } _NS_EXPORT size_t MTL::IOCompressionContextDefaultChunkSize() { return MTL::Private::MTLIOCompressionContextDefaultChunkSize(); } _NS_EXPORT void* MTL::IOCreateCompressionContext(const char* path, IOCompressionMethod type, size_t chunkSize) { if ( MTL::Private::MTLIOCreateCompressionContext ) { return MTL::Private::MTLIOCreateCompressionContext( path, type, chunkSize ); } return nullptr; } _NS_EXPORT void MTL::IOCompressionContextAppendData(void* context, const void* data, size_t size) { if ( MTL::Private::MTLIOCompressionContextAppendData ) { MTL::Private::MTLIOCompressionContextAppendData( context, data, size ); } } _NS_EXPORT MTL::IOCompressionStatus MTL::IOFlushAndDestroyCompressionContext(void* context) { if ( MTL::Private::MTLIOFlushAndDestroyCompressionContext ) { return MTL::Private::MTLIOFlushAndDestroyCompressionContext( context ); } return MTL::IOCompressionStatusError; } #endif #pragma once namespace MTL { class LinkedFunctions : public NS::Copying { public: static class LinkedFunctions* alloc(); class LinkedFunctions* init(); static class LinkedFunctions* linkedFunctions(); NS::Array* functions() const; void setFunctions(const NS::Array* functions); NS::Array* binaryFunctions() const; void setBinaryFunctions(const NS::Array* binaryFunctions); NS::Dictionary* groups() const; void setGroups(const NS::Dictionary* groups); NS::Array* privateFunctions() const; void setPrivateFunctions(const NS::Array* privateFunctions); }; } _MTL_INLINE MTL::LinkedFunctions* MTL::LinkedFunctions::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLLinkedFunctions)); } _MTL_INLINE MTL::LinkedFunctions* MTL::LinkedFunctions::init() { return NS::Object::init(); } _MTL_INLINE MTL::LinkedFunctions* MTL::LinkedFunctions::linkedFunctions() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLLinkedFunctions), _MTL_PRIVATE_SEL(linkedFunctions)); } _MTL_INLINE NS::Array* MTL::LinkedFunctions::functions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(functions)); } _MTL_INLINE void MTL::LinkedFunctions::setFunctions(const NS::Array* functions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFunctions_), functions); } _MTL_INLINE NS::Array* MTL::LinkedFunctions::binaryFunctions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(binaryFunctions)); } _MTL_INLINE void MTL::LinkedFunctions::setBinaryFunctions(const NS::Array* binaryFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBinaryFunctions_), binaryFunctions); } _MTL_INLINE NS::Dictionary* MTL::LinkedFunctions::groups() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(groups)); } _MTL_INLINE void MTL::LinkedFunctions::setGroups(const NS::Dictionary* groups) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setGroups_), groups); } _MTL_INLINE NS::Array* MTL::LinkedFunctions::privateFunctions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(privateFunctions)); } _MTL_INLINE void MTL::LinkedFunctions::setPrivateFunctions(const NS::Array* privateFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPrivateFunctions_), privateFunctions); } #pragma once namespace MTL { _MTL_ENUM(NS::Integer, LogLevel) { LogLevelUndefined = 0, LogLevelDebug = 1, LogLevelInfo = 2, LogLevelNotice = 3, LogLevelError = 4, LogLevelFault = 5, }; using LogHandlerFunction = std::function; class LogState : public NS::Referencing { public: void addLogHandler(void (^block)(NS::String*, NS::String*, MTL::LogLevel, NS::String*)); void addLogHandler(const LogHandlerFunction& handler); }; class LogStateDescriptor : public NS::Copying { public: static class LogStateDescriptor* alloc(); class LogStateDescriptor* init(); MTL::LogLevel level() const; void setLevel(MTL::LogLevel level); NS::Integer bufferSize() const; void setBufferSize(NS::Integer bufferSize); }; _MTL_CONST(NS::ErrorDomain, LogStateErrorDomain); _MTL_ENUM(NS::UInteger, LogStateError) { LogStateErrorInvalidSize = 1, LogStateErrorInvalid = 2, }; } _MTL_PRIVATE_DEF_WEAK_CONST(NS::ErrorDomain, LogStateErrorDomain); _MTL_INLINE void MTL::LogState::addLogHandler(void (^block)(NS::String*, NS::String*, MTL::LogLevel, NS::String*)) { Object::sendMessage(this, _MTL_PRIVATE_SEL(addLogHandler_), block); } _MTL_INLINE void MTL::LogState::addLogHandler(const MTL::LogHandlerFunction& handler) { __block LogHandlerFunction function = handler; addLogHandler(^void(NS::String* subsystem, NS::String* category, MTL::LogLevel logLevel, NS::String* message){ function(subsystem, category, logLevel, message); }); } _MTL_INLINE MTL::LogStateDescriptor* MTL::LogStateDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLLogStateDescriptor)); } _MTL_INLINE MTL::LogStateDescriptor* MTL::LogStateDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::LogLevel MTL::LogStateDescriptor::level() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(level)); } _MTL_INLINE void MTL::LogStateDescriptor::setLevel(MTL::LogLevel level) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLevel_), level); } _MTL_INLINE NS::Integer MTL::LogStateDescriptor::bufferSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferSize)); } _MTL_INLINE void MTL::LogStateDescriptor::setBufferSize(NS::Integer bufferSize) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBufferSize_), bufferSize); } #pragma once namespace MTL { class ParallelRenderCommandEncoder : public NS::Referencing { public: class RenderCommandEncoder* renderCommandEncoder(); void setColorStoreAction(MTL::StoreAction storeAction, NS::UInteger colorAttachmentIndex); void setDepthStoreAction(MTL::StoreAction storeAction); void setStencilStoreAction(MTL::StoreAction storeAction); void setColorStoreActionOptions(MTL::StoreActionOptions storeActionOptions, NS::UInteger colorAttachmentIndex); void setDepthStoreActionOptions(MTL::StoreActionOptions storeActionOptions); void setStencilStoreActionOptions(MTL::StoreActionOptions storeActionOptions); }; } _MTL_INLINE MTL::RenderCommandEncoder* MTL::ParallelRenderCommandEncoder::renderCommandEncoder() { return Object::sendMessage(this, _MTL_PRIVATE_SEL(renderCommandEncoder)); } _MTL_INLINE void MTL::ParallelRenderCommandEncoder::setColorStoreAction(MTL::StoreAction storeAction, NS::UInteger colorAttachmentIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setColorStoreAction_atIndex_), storeAction, colorAttachmentIndex); } _MTL_INLINE void MTL::ParallelRenderCommandEncoder::setDepthStoreAction(MTL::StoreAction storeAction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthStoreAction_), storeAction); } _MTL_INLINE void MTL::ParallelRenderCommandEncoder::setStencilStoreAction(MTL::StoreAction storeAction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStencilStoreAction_), storeAction); } _MTL_INLINE void MTL::ParallelRenderCommandEncoder::setColorStoreActionOptions(MTL::StoreActionOptions storeActionOptions, NS::UInteger colorAttachmentIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setColorStoreActionOptions_atIndex_), storeActionOptions, colorAttachmentIndex); } _MTL_INLINE void MTL::ParallelRenderCommandEncoder::setDepthStoreActionOptions(MTL::StoreActionOptions storeActionOptions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthStoreActionOptions_), storeActionOptions); } _MTL_INLINE void MTL::ParallelRenderCommandEncoder::setStencilStoreActionOptions(MTL::StoreActionOptions storeActionOptions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStencilStoreActionOptions_), storeActionOptions); } #pragma once namespace MTL { class RasterizationRateSampleArray : public NS::Referencing { public: static class RasterizationRateSampleArray* alloc(); class RasterizationRateSampleArray* init(); NS::Number* object(NS::UInteger index); void setObject(const NS::Number* value, NS::UInteger index); }; class RasterizationRateLayerDescriptor : public NS::Copying { public: static class RasterizationRateLayerDescriptor* alloc(); MTL::RasterizationRateLayerDescriptor* init(); MTL::RasterizationRateLayerDescriptor* init(MTL::Size sampleCount); MTL::RasterizationRateLayerDescriptor* init(MTL::Size sampleCount, const float* horizontal, const float* vertical); MTL::Size sampleCount() const; MTL::Size maxSampleCount() const; float* horizontalSampleStorage() const; float* verticalSampleStorage() const; class RasterizationRateSampleArray* horizontal() const; class RasterizationRateSampleArray* vertical() const; void setSampleCount(MTL::Size sampleCount); }; class RasterizationRateLayerArray : public NS::Referencing { public: static class RasterizationRateLayerArray* alloc(); class RasterizationRateLayerArray* init(); class RasterizationRateLayerDescriptor* object(NS::UInteger layerIndex); void setObject(const class RasterizationRateLayerDescriptor* layer, NS::UInteger layerIndex); }; class RasterizationRateMapDescriptor : public NS::Copying { public: static class RasterizationRateMapDescriptor* alloc(); class RasterizationRateMapDescriptor* init(); static class RasterizationRateMapDescriptor* rasterizationRateMapDescriptor(MTL::Size screenSize); static class RasterizationRateMapDescriptor* rasterizationRateMapDescriptor(MTL::Size screenSize, const class RasterizationRateLayerDescriptor* layer); static class RasterizationRateMapDescriptor* rasterizationRateMapDescriptor(MTL::Size screenSize, NS::UInteger layerCount, const class RasterizationRateLayerDescriptor* const* layers); class RasterizationRateLayerDescriptor* layer(NS::UInteger layerIndex); void setLayer(const class RasterizationRateLayerDescriptor* layer, NS::UInteger layerIndex); class RasterizationRateLayerArray* layers() const; MTL::Size screenSize() const; void setScreenSize(MTL::Size screenSize); NS::String* label() const; void setLabel(const NS::String* label); NS::UInteger layerCount() const; }; class RasterizationRateMap : public NS::Referencing { public: class Device* device() const; NS::String* label() const; MTL::Size screenSize() const; MTL::Size physicalGranularity() const; NS::UInteger layerCount() const; MTL::SizeAndAlign parameterBufferSizeAndAlign() const; void copyParameterDataToBuffer(const class Buffer* buffer, NS::UInteger offset); MTL::Size physicalSize(NS::UInteger layerIndex); MTL::Coordinate2D mapScreenToPhysicalCoordinates(MTL::Coordinate2D screenCoordinates, NS::UInteger layerIndex); MTL::Coordinate2D mapPhysicalToScreenCoordinates(MTL::Coordinate2D physicalCoordinates, NS::UInteger layerIndex); }; } _MTL_INLINE MTL::RasterizationRateSampleArray* MTL::RasterizationRateSampleArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRasterizationRateSampleArray)); } _MTL_INLINE MTL::RasterizationRateSampleArray* MTL::RasterizationRateSampleArray::init() { return NS::Object::init(); } _MTL_INLINE NS::Number* MTL::RasterizationRateSampleArray::object(NS::UInteger index) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), index); } _MTL_INLINE void MTL::RasterizationRateSampleArray::setObject(const NS::Number* value, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), value, index); } _MTL_INLINE MTL::RasterizationRateLayerDescriptor* MTL::RasterizationRateLayerDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRasterizationRateLayerDescriptor)); } _MTL_INLINE MTL::RasterizationRateLayerDescriptor* MTL::RasterizationRateLayerDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::RasterizationRateLayerDescriptor* MTL::RasterizationRateLayerDescriptor::init(MTL::Size sampleCount) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(initWithSampleCount_), sampleCount); } _MTL_INLINE MTL::RasterizationRateLayerDescriptor* MTL::RasterizationRateLayerDescriptor::init(MTL::Size sampleCount, const float* horizontal, const float* vertical) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(initWithSampleCount_horizontal_vertical_), sampleCount, horizontal, vertical); } _MTL_INLINE MTL::Size MTL::RasterizationRateLayerDescriptor::sampleCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleCount)); } _MTL_INLINE MTL::Size MTL::RasterizationRateLayerDescriptor::maxSampleCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxSampleCount)); } _MTL_INLINE float* MTL::RasterizationRateLayerDescriptor::horizontalSampleStorage() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(horizontalSampleStorage)); } _MTL_INLINE float* MTL::RasterizationRateLayerDescriptor::verticalSampleStorage() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(verticalSampleStorage)); } _MTL_INLINE MTL::RasterizationRateSampleArray* MTL::RasterizationRateLayerDescriptor::horizontal() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(horizontal)); } _MTL_INLINE MTL::RasterizationRateSampleArray* MTL::RasterizationRateLayerDescriptor::vertical() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertical)); } _MTL_INLINE void MTL::RasterizationRateLayerDescriptor::setSampleCount(MTL::Size sampleCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSampleCount_), sampleCount); } _MTL_INLINE MTL::RasterizationRateLayerArray* MTL::RasterizationRateLayerArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRasterizationRateLayerArray)); } _MTL_INLINE MTL::RasterizationRateLayerArray* MTL::RasterizationRateLayerArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::RasterizationRateLayerDescriptor* MTL::RasterizationRateLayerArray::object(NS::UInteger layerIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), layerIndex); } _MTL_INLINE void MTL::RasterizationRateLayerArray::setObject(const MTL::RasterizationRateLayerDescriptor* layer, NS::UInteger layerIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), layer, layerIndex); } _MTL_INLINE MTL::RasterizationRateMapDescriptor* MTL::RasterizationRateMapDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRasterizationRateMapDescriptor)); } _MTL_INLINE MTL::RasterizationRateMapDescriptor* MTL::RasterizationRateMapDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::RasterizationRateMapDescriptor* MTL::RasterizationRateMapDescriptor::rasterizationRateMapDescriptor(MTL::Size screenSize) { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLRasterizationRateMapDescriptor), _MTL_PRIVATE_SEL(rasterizationRateMapDescriptorWithScreenSize_), screenSize); } _MTL_INLINE MTL::RasterizationRateMapDescriptor* MTL::RasterizationRateMapDescriptor::rasterizationRateMapDescriptor(MTL::Size screenSize, const MTL::RasterizationRateLayerDescriptor* layer) { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLRasterizationRateMapDescriptor), _MTL_PRIVATE_SEL(rasterizationRateMapDescriptorWithScreenSize_layer_), screenSize, layer); } _MTL_INLINE MTL::RasterizationRateMapDescriptor* MTL::RasterizationRateMapDescriptor::rasterizationRateMapDescriptor(MTL::Size screenSize, NS::UInteger layerCount, const MTL::RasterizationRateLayerDescriptor* const* layers) { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLRasterizationRateMapDescriptor), _MTL_PRIVATE_SEL(rasterizationRateMapDescriptorWithScreenSize_layerCount_layers_), screenSize, layerCount, layers); } _MTL_INLINE MTL::RasterizationRateLayerDescriptor* MTL::RasterizationRateMapDescriptor::layer(NS::UInteger layerIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(layerAtIndex_), layerIndex); } _MTL_INLINE void MTL::RasterizationRateMapDescriptor::setLayer(const MTL::RasterizationRateLayerDescriptor* layer, NS::UInteger layerIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLayer_atIndex_), layer, layerIndex); } _MTL_INLINE MTL::RasterizationRateLayerArray* MTL::RasterizationRateMapDescriptor::layers() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(layers)); } _MTL_INLINE MTL::Size MTL::RasterizationRateMapDescriptor::screenSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(screenSize)); } _MTL_INLINE void MTL::RasterizationRateMapDescriptor::setScreenSize(MTL::Size screenSize) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setScreenSize_), screenSize); } _MTL_INLINE NS::String* MTL::RasterizationRateMapDescriptor::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::RasterizationRateMapDescriptor::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE NS::UInteger MTL::RasterizationRateMapDescriptor::layerCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(layerCount)); } _MTL_INLINE MTL::Device* MTL::RasterizationRateMap::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE NS::String* MTL::RasterizationRateMap::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE MTL::Size MTL::RasterizationRateMap::screenSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(screenSize)); } _MTL_INLINE MTL::Size MTL::RasterizationRateMap::physicalGranularity() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(physicalGranularity)); } _MTL_INLINE NS::UInteger MTL::RasterizationRateMap::layerCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(layerCount)); } _MTL_INLINE MTL::SizeAndAlign MTL::RasterizationRateMap::parameterBufferSizeAndAlign() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(parameterBufferSizeAndAlign)); } _MTL_INLINE void MTL::RasterizationRateMap::copyParameterDataToBuffer(const MTL::Buffer* buffer, NS::UInteger offset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(copyParameterDataToBuffer_offset_), buffer, offset); } _MTL_INLINE MTL::Size MTL::RasterizationRateMap::physicalSize(NS::UInteger layerIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(physicalSizeForLayer_), layerIndex); } _MTL_INLINE MTL::Coordinate2D MTL::RasterizationRateMap::mapScreenToPhysicalCoordinates(MTL::Coordinate2D screenCoordinates, NS::UInteger layerIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(mapScreenToPhysicalCoordinates_forLayer_), screenCoordinates, layerIndex); } _MTL_INLINE MTL::Coordinate2D MTL::RasterizationRateMap::mapPhysicalToScreenCoordinates(MTL::Coordinate2D physicalCoordinates, NS::UInteger layerIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(mapPhysicalToScreenCoordinates_forLayer_), physicalCoordinates, layerIndex); } #pragma once namespace MTL { _MTL_ENUM(NS::UInteger, BlendFactor) { BlendFactorZero = 0, BlendFactorOne = 1, BlendFactorSourceColor = 2, BlendFactorOneMinusSourceColor = 3, BlendFactorSourceAlpha = 4, BlendFactorOneMinusSourceAlpha = 5, BlendFactorDestinationColor = 6, BlendFactorOneMinusDestinationColor = 7, BlendFactorDestinationAlpha = 8, BlendFactorOneMinusDestinationAlpha = 9, BlendFactorSourceAlphaSaturated = 10, BlendFactorBlendColor = 11, BlendFactorOneMinusBlendColor = 12, BlendFactorBlendAlpha = 13, BlendFactorOneMinusBlendAlpha = 14, BlendFactorSource1Color = 15, BlendFactorOneMinusSource1Color = 16, BlendFactorSource1Alpha = 17, BlendFactorOneMinusSource1Alpha = 18, }; _MTL_ENUM(NS::UInteger, BlendOperation) { BlendOperationAdd = 0, BlendOperationSubtract = 1, BlendOperationReverseSubtract = 2, BlendOperationMin = 3, BlendOperationMax = 4, }; _MTL_OPTIONS(NS::UInteger, ColorWriteMask) { ColorWriteMaskNone = 0, ColorWriteMaskRed = 8, ColorWriteMaskGreen = 4, ColorWriteMaskBlue = 2, ColorWriteMaskAlpha = 1, ColorWriteMaskAll = 15, }; _MTL_ENUM(NS::UInteger, PrimitiveTopologyClass) { PrimitiveTopologyClassUnspecified = 0, PrimitiveTopologyClassPoint = 1, PrimitiveTopologyClassLine = 2, PrimitiveTopologyClassTriangle = 3, }; _MTL_ENUM(NS::UInteger, TessellationPartitionMode) { TessellationPartitionModePow2 = 0, TessellationPartitionModeInteger = 1, TessellationPartitionModeFractionalOdd = 2, TessellationPartitionModeFractionalEven = 3, }; _MTL_ENUM(NS::UInteger, TessellationFactorStepFunction) { TessellationFactorStepFunctionConstant = 0, TessellationFactorStepFunctionPerPatch = 1, TessellationFactorStepFunctionPerInstance = 2, TessellationFactorStepFunctionPerPatchAndPerInstance = 3, }; _MTL_ENUM(NS::UInteger, TessellationFactorFormat) { TessellationFactorFormatHalf = 0, }; _MTL_ENUM(NS::UInteger, TessellationControlPointIndexType) { TessellationControlPointIndexTypeNone = 0, TessellationControlPointIndexTypeUInt16 = 1, TessellationControlPointIndexTypeUInt32 = 2, }; class RenderPipelineColorAttachmentDescriptor : public NS::Copying { public: static class RenderPipelineColorAttachmentDescriptor* alloc(); class RenderPipelineColorAttachmentDescriptor* init(); MTL::PixelFormat pixelFormat() const; void setPixelFormat(MTL::PixelFormat pixelFormat); bool blendingEnabled() const; void setBlendingEnabled(bool blendingEnabled); MTL::BlendFactor sourceRGBBlendFactor() const; void setSourceRGBBlendFactor(MTL::BlendFactor sourceRGBBlendFactor); MTL::BlendFactor destinationRGBBlendFactor() const; void setDestinationRGBBlendFactor(MTL::BlendFactor destinationRGBBlendFactor); MTL::BlendOperation rgbBlendOperation() const; void setRgbBlendOperation(MTL::BlendOperation rgbBlendOperation); MTL::BlendFactor sourceAlphaBlendFactor() const; void setSourceAlphaBlendFactor(MTL::BlendFactor sourceAlphaBlendFactor); MTL::BlendFactor destinationAlphaBlendFactor() const; void setDestinationAlphaBlendFactor(MTL::BlendFactor destinationAlphaBlendFactor); MTL::BlendOperation alphaBlendOperation() const; void setAlphaBlendOperation(MTL::BlendOperation alphaBlendOperation); MTL::ColorWriteMask writeMask() const; void setWriteMask(MTL::ColorWriteMask writeMask); }; class RenderPipelineReflection : public NS::Referencing { public: static class RenderPipelineReflection* alloc(); class RenderPipelineReflection* init(); NS::Array* vertexBindings() const; NS::Array* fragmentBindings() const; NS::Array* tileBindings() const; NS::Array* objectBindings() const; NS::Array* meshBindings() const; NS::Array* vertexArguments() const; NS::Array* fragmentArguments() const; NS::Array* tileArguments() const; }; class RenderPipelineDescriptor : public NS::Copying { public: static class RenderPipelineDescriptor* alloc(); class RenderPipelineDescriptor* init(); NS::String* label() const; void setLabel(const NS::String* label); class Function* vertexFunction() const; void setVertexFunction(const class Function* vertexFunction); class Function* fragmentFunction() const; void setFragmentFunction(const class Function* fragmentFunction); class VertexDescriptor* vertexDescriptor() const; void setVertexDescriptor(const class VertexDescriptor* vertexDescriptor); NS::UInteger sampleCount() const; void setSampleCount(NS::UInteger sampleCount); NS::UInteger rasterSampleCount() const; void setRasterSampleCount(NS::UInteger rasterSampleCount); bool alphaToCoverageEnabled() const; void setAlphaToCoverageEnabled(bool alphaToCoverageEnabled); bool alphaToOneEnabled() const; void setAlphaToOneEnabled(bool alphaToOneEnabled); bool rasterizationEnabled() const; void setRasterizationEnabled(bool rasterizationEnabled); NS::UInteger maxVertexAmplificationCount() const; void setMaxVertexAmplificationCount(NS::UInteger maxVertexAmplificationCount); class RenderPipelineColorAttachmentDescriptorArray* colorAttachments() const; MTL::PixelFormat depthAttachmentPixelFormat() const; void setDepthAttachmentPixelFormat(MTL::PixelFormat depthAttachmentPixelFormat); MTL::PixelFormat stencilAttachmentPixelFormat() const; void setStencilAttachmentPixelFormat(MTL::PixelFormat stencilAttachmentPixelFormat); MTL::PrimitiveTopologyClass inputPrimitiveTopology() const; void setInputPrimitiveTopology(MTL::PrimitiveTopologyClass inputPrimitiveTopology); MTL::TessellationPartitionMode tessellationPartitionMode() const; void setTessellationPartitionMode(MTL::TessellationPartitionMode tessellationPartitionMode); NS::UInteger maxTessellationFactor() const; void setMaxTessellationFactor(NS::UInteger maxTessellationFactor); bool tessellationFactorScaleEnabled() const; void setTessellationFactorScaleEnabled(bool tessellationFactorScaleEnabled); MTL::TessellationFactorFormat tessellationFactorFormat() const; void setTessellationFactorFormat(MTL::TessellationFactorFormat tessellationFactorFormat); MTL::TessellationControlPointIndexType tessellationControlPointIndexType() const; void setTessellationControlPointIndexType(MTL::TessellationControlPointIndexType tessellationControlPointIndexType); MTL::TessellationFactorStepFunction tessellationFactorStepFunction() const; void setTessellationFactorStepFunction(MTL::TessellationFactorStepFunction tessellationFactorStepFunction); MTL::Winding tessellationOutputWindingOrder() const; void setTessellationOutputWindingOrder(MTL::Winding tessellationOutputWindingOrder); class PipelineBufferDescriptorArray* vertexBuffers() const; class PipelineBufferDescriptorArray* fragmentBuffers() const; bool supportIndirectCommandBuffers() const; void setSupportIndirectCommandBuffers(bool supportIndirectCommandBuffers); NS::Array* binaryArchives() const; void setBinaryArchives(const NS::Array* binaryArchives); NS::Array* vertexPreloadedLibraries() const; void setVertexPreloadedLibraries(const NS::Array* vertexPreloadedLibraries); NS::Array* fragmentPreloadedLibraries() const; void setFragmentPreloadedLibraries(const NS::Array* fragmentPreloadedLibraries); class LinkedFunctions* vertexLinkedFunctions() const; void setVertexLinkedFunctions(const class LinkedFunctions* vertexLinkedFunctions); class LinkedFunctions* fragmentLinkedFunctions() const; void setFragmentLinkedFunctions(const class LinkedFunctions* fragmentLinkedFunctions); bool supportAddingVertexBinaryFunctions() const; void setSupportAddingVertexBinaryFunctions(bool supportAddingVertexBinaryFunctions); bool supportAddingFragmentBinaryFunctions() const; void setSupportAddingFragmentBinaryFunctions(bool supportAddingFragmentBinaryFunctions); NS::UInteger maxVertexCallStackDepth() const; void setMaxVertexCallStackDepth(NS::UInteger maxVertexCallStackDepth); NS::UInteger maxFragmentCallStackDepth() const; void setMaxFragmentCallStackDepth(NS::UInteger maxFragmentCallStackDepth); void reset(); MTL::ShaderValidation shaderValidation() const; void setShaderValidation(MTL::ShaderValidation shaderValidation); }; class RenderPipelineFunctionsDescriptor : public NS::Copying { public: static class RenderPipelineFunctionsDescriptor* alloc(); class RenderPipelineFunctionsDescriptor* init(); NS::Array* vertexAdditionalBinaryFunctions() const; void setVertexAdditionalBinaryFunctions(const NS::Array* vertexAdditionalBinaryFunctions); NS::Array* fragmentAdditionalBinaryFunctions() const; void setFragmentAdditionalBinaryFunctions(const NS::Array* fragmentAdditionalBinaryFunctions); NS::Array* tileAdditionalBinaryFunctions() const; void setTileAdditionalBinaryFunctions(const NS::Array* tileAdditionalBinaryFunctions); }; class RenderPipelineState : public NS::Referencing { public: NS::String* label() const; class Device* device() const; NS::UInteger maxTotalThreadsPerThreadgroup() const; bool threadgroupSizeMatchesTileSize() const; NS::UInteger imageblockSampleLength() const; NS::UInteger imageblockMemoryLength(MTL::Size imageblockDimensions); bool supportIndirectCommandBuffers() const; NS::UInteger maxTotalThreadsPerObjectThreadgroup() const; NS::UInteger maxTotalThreadsPerMeshThreadgroup() const; NS::UInteger objectThreadExecutionWidth() const; NS::UInteger meshThreadExecutionWidth() const; NS::UInteger maxTotalThreadgroupsPerMeshGrid() const; MTL::ResourceID gpuResourceID() const; class FunctionHandle* functionHandle(const class Function* function, MTL::RenderStages stage); class VisibleFunctionTable* newVisibleFunctionTable(const class VisibleFunctionTableDescriptor* descriptor, MTL::RenderStages stage); class IntersectionFunctionTable* newIntersectionFunctionTable(const class IntersectionFunctionTableDescriptor* descriptor, MTL::RenderStages stage); class RenderPipelineState* newRenderPipelineState(const class RenderPipelineFunctionsDescriptor* additionalBinaryFunctions, NS::Error** error); MTL::ShaderValidation shaderValidation() const; }; class RenderPipelineColorAttachmentDescriptorArray : public NS::Referencing { public: static class RenderPipelineColorAttachmentDescriptorArray* alloc(); class RenderPipelineColorAttachmentDescriptorArray* init(); class RenderPipelineColorAttachmentDescriptor* object(NS::UInteger attachmentIndex); void setObject(const class RenderPipelineColorAttachmentDescriptor* attachment, NS::UInteger attachmentIndex); }; class TileRenderPipelineColorAttachmentDescriptor : public NS::Copying { public: static class TileRenderPipelineColorAttachmentDescriptor* alloc(); class TileRenderPipelineColorAttachmentDescriptor* init(); MTL::PixelFormat pixelFormat() const; void setPixelFormat(MTL::PixelFormat pixelFormat); }; class TileRenderPipelineColorAttachmentDescriptorArray : public NS::Referencing { public: static class TileRenderPipelineColorAttachmentDescriptorArray* alloc(); class TileRenderPipelineColorAttachmentDescriptorArray* init(); class TileRenderPipelineColorAttachmentDescriptor* object(NS::UInteger attachmentIndex); void setObject(const class TileRenderPipelineColorAttachmentDescriptor* attachment, NS::UInteger attachmentIndex); }; class TileRenderPipelineDescriptor : public NS::Copying { public: static class TileRenderPipelineDescriptor* alloc(); class TileRenderPipelineDescriptor* init(); NS::String* label() const; void setLabel(const NS::String* label); class Function* tileFunction() const; void setTileFunction(const class Function* tileFunction); NS::UInteger rasterSampleCount() const; void setRasterSampleCount(NS::UInteger rasterSampleCount); class TileRenderPipelineColorAttachmentDescriptorArray* colorAttachments() const; bool threadgroupSizeMatchesTileSize() const; void setThreadgroupSizeMatchesTileSize(bool threadgroupSizeMatchesTileSize); class PipelineBufferDescriptorArray* tileBuffers() const; NS::UInteger maxTotalThreadsPerThreadgroup() const; void setMaxTotalThreadsPerThreadgroup(NS::UInteger maxTotalThreadsPerThreadgroup); NS::Array* binaryArchives() const; void setBinaryArchives(const NS::Array* binaryArchives); NS::Array* preloadedLibraries() const; void setPreloadedLibraries(const NS::Array* preloadedLibraries); class LinkedFunctions* linkedFunctions() const; void setLinkedFunctions(const class LinkedFunctions* linkedFunctions); bool supportAddingBinaryFunctions() const; void setSupportAddingBinaryFunctions(bool supportAddingBinaryFunctions); NS::UInteger maxCallStackDepth() const; void setMaxCallStackDepth(NS::UInteger maxCallStackDepth); void reset(); MTL::ShaderValidation shaderValidation() const; void setShaderValidation(MTL::ShaderValidation shaderValidation); }; class MeshRenderPipelineDescriptor : public NS::Copying { public: static class MeshRenderPipelineDescriptor* alloc(); class MeshRenderPipelineDescriptor* init(); NS::String* label() const; void setLabel(const NS::String* label); class Function* objectFunction() const; void setObjectFunction(const class Function* objectFunction); class Function* meshFunction() const; void setMeshFunction(const class Function* meshFunction); class Function* fragmentFunction() const; void setFragmentFunction(const class Function* fragmentFunction); NS::UInteger maxTotalThreadsPerObjectThreadgroup() const; void setMaxTotalThreadsPerObjectThreadgroup(NS::UInteger maxTotalThreadsPerObjectThreadgroup); NS::UInteger maxTotalThreadsPerMeshThreadgroup() const; void setMaxTotalThreadsPerMeshThreadgroup(NS::UInteger maxTotalThreadsPerMeshThreadgroup); bool objectThreadgroupSizeIsMultipleOfThreadExecutionWidth() const; void setObjectThreadgroupSizeIsMultipleOfThreadExecutionWidth(bool objectThreadgroupSizeIsMultipleOfThreadExecutionWidth); bool meshThreadgroupSizeIsMultipleOfThreadExecutionWidth() const; void setMeshThreadgroupSizeIsMultipleOfThreadExecutionWidth(bool meshThreadgroupSizeIsMultipleOfThreadExecutionWidth); NS::UInteger payloadMemoryLength() const; void setPayloadMemoryLength(NS::UInteger payloadMemoryLength); NS::UInteger maxTotalThreadgroupsPerMeshGrid() const; void setMaxTotalThreadgroupsPerMeshGrid(NS::UInteger maxTotalThreadgroupsPerMeshGrid); class PipelineBufferDescriptorArray* objectBuffers() const; class PipelineBufferDescriptorArray* meshBuffers() const; class PipelineBufferDescriptorArray* fragmentBuffers() const; NS::UInteger rasterSampleCount() const; void setRasterSampleCount(NS::UInteger rasterSampleCount); bool alphaToCoverageEnabled() const; void setAlphaToCoverageEnabled(bool alphaToCoverageEnabled); bool alphaToOneEnabled() const; void setAlphaToOneEnabled(bool alphaToOneEnabled); bool rasterizationEnabled() const; void setRasterizationEnabled(bool rasterizationEnabled); NS::UInteger maxVertexAmplificationCount() const; void setMaxVertexAmplificationCount(NS::UInteger maxVertexAmplificationCount); class RenderPipelineColorAttachmentDescriptorArray* colorAttachments() const; MTL::PixelFormat depthAttachmentPixelFormat() const; void setDepthAttachmentPixelFormat(MTL::PixelFormat depthAttachmentPixelFormat); MTL::PixelFormat stencilAttachmentPixelFormat() const; void setStencilAttachmentPixelFormat(MTL::PixelFormat stencilAttachmentPixelFormat); bool supportIndirectCommandBuffers() const; void setSupportIndirectCommandBuffers(bool supportIndirectCommandBuffers); NS::Array* binaryArchives() const; void setBinaryArchives(const NS::Array* binaryArchives); class LinkedFunctions* objectLinkedFunctions() const; void setObjectLinkedFunctions(const class LinkedFunctions* objectLinkedFunctions); class LinkedFunctions* meshLinkedFunctions() const; void setMeshLinkedFunctions(const class LinkedFunctions* meshLinkedFunctions); class LinkedFunctions* fragmentLinkedFunctions() const; void setFragmentLinkedFunctions(const class LinkedFunctions* fragmentLinkedFunctions); void reset(); MTL::ShaderValidation shaderValidation() const; void setShaderValidation(MTL::ShaderValidation shaderValidation); }; } _MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptor* MTL::RenderPipelineColorAttachmentDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRenderPipelineColorAttachmentDescriptor)); } _MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptor* MTL::RenderPipelineColorAttachmentDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::PixelFormat MTL::RenderPipelineColorAttachmentDescriptor::pixelFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(pixelFormat)); } _MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setPixelFormat(MTL::PixelFormat pixelFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPixelFormat_), pixelFormat); } _MTL_INLINE bool MTL::RenderPipelineColorAttachmentDescriptor::blendingEnabled() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isBlendingEnabled)); } _MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setBlendingEnabled(bool blendingEnabled) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBlendingEnabled_), blendingEnabled); } _MTL_INLINE MTL::BlendFactor MTL::RenderPipelineColorAttachmentDescriptor::sourceRGBBlendFactor() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sourceRGBBlendFactor)); } _MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setSourceRGBBlendFactor(MTL::BlendFactor sourceRGBBlendFactor) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSourceRGBBlendFactor_), sourceRGBBlendFactor); } _MTL_INLINE MTL::BlendFactor MTL::RenderPipelineColorAttachmentDescriptor::destinationRGBBlendFactor() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(destinationRGBBlendFactor)); } _MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setDestinationRGBBlendFactor(MTL::BlendFactor destinationRGBBlendFactor) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDestinationRGBBlendFactor_), destinationRGBBlendFactor); } _MTL_INLINE MTL::BlendOperation MTL::RenderPipelineColorAttachmentDescriptor::rgbBlendOperation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(rgbBlendOperation)); } _MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setRgbBlendOperation(MTL::BlendOperation rgbBlendOperation) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRgbBlendOperation_), rgbBlendOperation); } _MTL_INLINE MTL::BlendFactor MTL::RenderPipelineColorAttachmentDescriptor::sourceAlphaBlendFactor() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sourceAlphaBlendFactor)); } _MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setSourceAlphaBlendFactor(MTL::BlendFactor sourceAlphaBlendFactor) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSourceAlphaBlendFactor_), sourceAlphaBlendFactor); } _MTL_INLINE MTL::BlendFactor MTL::RenderPipelineColorAttachmentDescriptor::destinationAlphaBlendFactor() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(destinationAlphaBlendFactor)); } _MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setDestinationAlphaBlendFactor(MTL::BlendFactor destinationAlphaBlendFactor) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDestinationAlphaBlendFactor_), destinationAlphaBlendFactor); } _MTL_INLINE MTL::BlendOperation MTL::RenderPipelineColorAttachmentDescriptor::alphaBlendOperation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(alphaBlendOperation)); } _MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setAlphaBlendOperation(MTL::BlendOperation alphaBlendOperation) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setAlphaBlendOperation_), alphaBlendOperation); } _MTL_INLINE MTL::ColorWriteMask MTL::RenderPipelineColorAttachmentDescriptor::writeMask() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(writeMask)); } _MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptor::setWriteMask(MTL::ColorWriteMask writeMask) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setWriteMask_), writeMask); } _MTL_INLINE MTL::RenderPipelineReflection* MTL::RenderPipelineReflection::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRenderPipelineReflection)); } _MTL_INLINE MTL::RenderPipelineReflection* MTL::RenderPipelineReflection::init() { return NS::Object::init(); } _MTL_INLINE NS::Array* MTL::RenderPipelineReflection::vertexBindings() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexBindings)); } _MTL_INLINE NS::Array* MTL::RenderPipelineReflection::fragmentBindings() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(fragmentBindings)); } _MTL_INLINE NS::Array* MTL::RenderPipelineReflection::tileBindings() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tileBindings)); } _MTL_INLINE NS::Array* MTL::RenderPipelineReflection::objectBindings() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectBindings)); } _MTL_INLINE NS::Array* MTL::RenderPipelineReflection::meshBindings() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(meshBindings)); } _MTL_INLINE NS::Array* MTL::RenderPipelineReflection::vertexArguments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexArguments)); } _MTL_INLINE NS::Array* MTL::RenderPipelineReflection::fragmentArguments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(fragmentArguments)); } _MTL_INLINE NS::Array* MTL::RenderPipelineReflection::tileArguments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tileArguments)); } _MTL_INLINE MTL::RenderPipelineDescriptor* MTL::RenderPipelineDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRenderPipelineDescriptor)); } _MTL_INLINE MTL::RenderPipelineDescriptor* MTL::RenderPipelineDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::RenderPipelineDescriptor::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::Function* MTL::RenderPipelineDescriptor::vertexFunction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexFunction)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setVertexFunction(const MTL::Function* vertexFunction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexFunction_), vertexFunction); } _MTL_INLINE MTL::Function* MTL::RenderPipelineDescriptor::fragmentFunction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(fragmentFunction)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setFragmentFunction(const MTL::Function* fragmentFunction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentFunction_), fragmentFunction); } _MTL_INLINE MTL::VertexDescriptor* MTL::RenderPipelineDescriptor::vertexDescriptor() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexDescriptor)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setVertexDescriptor(const MTL::VertexDescriptor* vertexDescriptor) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexDescriptor_), vertexDescriptor); } _MTL_INLINE NS::UInteger MTL::RenderPipelineDescriptor::sampleCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleCount)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setSampleCount(NS::UInteger sampleCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSampleCount_), sampleCount); } _MTL_INLINE NS::UInteger MTL::RenderPipelineDescriptor::rasterSampleCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(rasterSampleCount)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setRasterSampleCount(NS::UInteger rasterSampleCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRasterSampleCount_), rasterSampleCount); } _MTL_INLINE bool MTL::RenderPipelineDescriptor::alphaToCoverageEnabled() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isAlphaToCoverageEnabled)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setAlphaToCoverageEnabled(bool alphaToCoverageEnabled) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setAlphaToCoverageEnabled_), alphaToCoverageEnabled); } _MTL_INLINE bool MTL::RenderPipelineDescriptor::alphaToOneEnabled() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isAlphaToOneEnabled)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setAlphaToOneEnabled(bool alphaToOneEnabled) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setAlphaToOneEnabled_), alphaToOneEnabled); } _MTL_INLINE bool MTL::RenderPipelineDescriptor::rasterizationEnabled() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isRasterizationEnabled)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setRasterizationEnabled(bool rasterizationEnabled) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRasterizationEnabled_), rasterizationEnabled); } _MTL_INLINE NS::UInteger MTL::RenderPipelineDescriptor::maxVertexAmplificationCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxVertexAmplificationCount)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setMaxVertexAmplificationCount(NS::UInteger maxVertexAmplificationCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxVertexAmplificationCount_), maxVertexAmplificationCount); } _MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptorArray* MTL::RenderPipelineDescriptor::colorAttachments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(colorAttachments)); } _MTL_INLINE MTL::PixelFormat MTL::RenderPipelineDescriptor::depthAttachmentPixelFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(depthAttachmentPixelFormat)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setDepthAttachmentPixelFormat(MTL::PixelFormat depthAttachmentPixelFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthAttachmentPixelFormat_), depthAttachmentPixelFormat); } _MTL_INLINE MTL::PixelFormat MTL::RenderPipelineDescriptor::stencilAttachmentPixelFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stencilAttachmentPixelFormat)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setStencilAttachmentPixelFormat(MTL::PixelFormat stencilAttachmentPixelFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStencilAttachmentPixelFormat_), stencilAttachmentPixelFormat); } _MTL_INLINE MTL::PrimitiveTopologyClass MTL::RenderPipelineDescriptor::inputPrimitiveTopology() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(inputPrimitiveTopology)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setInputPrimitiveTopology(MTL::PrimitiveTopologyClass inputPrimitiveTopology) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInputPrimitiveTopology_), inputPrimitiveTopology); } _MTL_INLINE MTL::TessellationPartitionMode MTL::RenderPipelineDescriptor::tessellationPartitionMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tessellationPartitionMode)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setTessellationPartitionMode(MTL::TessellationPartitionMode tessellationPartitionMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTessellationPartitionMode_), tessellationPartitionMode); } _MTL_INLINE NS::UInteger MTL::RenderPipelineDescriptor::maxTessellationFactor() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxTessellationFactor)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setMaxTessellationFactor(NS::UInteger maxTessellationFactor) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxTessellationFactor_), maxTessellationFactor); } _MTL_INLINE bool MTL::RenderPipelineDescriptor::tessellationFactorScaleEnabled() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isTessellationFactorScaleEnabled)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setTessellationFactorScaleEnabled(bool tessellationFactorScaleEnabled) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTessellationFactorScaleEnabled_), tessellationFactorScaleEnabled); } _MTL_INLINE MTL::TessellationFactorFormat MTL::RenderPipelineDescriptor::tessellationFactorFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tessellationFactorFormat)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setTessellationFactorFormat(MTL::TessellationFactorFormat tessellationFactorFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTessellationFactorFormat_), tessellationFactorFormat); } _MTL_INLINE MTL::TessellationControlPointIndexType MTL::RenderPipelineDescriptor::tessellationControlPointIndexType() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tessellationControlPointIndexType)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setTessellationControlPointIndexType(MTL::TessellationControlPointIndexType tessellationControlPointIndexType) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTessellationControlPointIndexType_), tessellationControlPointIndexType); } _MTL_INLINE MTL::TessellationFactorStepFunction MTL::RenderPipelineDescriptor::tessellationFactorStepFunction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tessellationFactorStepFunction)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setTessellationFactorStepFunction(MTL::TessellationFactorStepFunction tessellationFactorStepFunction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTessellationFactorStepFunction_), tessellationFactorStepFunction); } _MTL_INLINE MTL::Winding MTL::RenderPipelineDescriptor::tessellationOutputWindingOrder() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tessellationOutputWindingOrder)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setTessellationOutputWindingOrder(MTL::Winding tessellationOutputWindingOrder) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTessellationOutputWindingOrder_), tessellationOutputWindingOrder); } _MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::RenderPipelineDescriptor::vertexBuffers() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexBuffers)); } _MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::RenderPipelineDescriptor::fragmentBuffers() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(fragmentBuffers)); } _MTL_INLINE bool MTL::RenderPipelineDescriptor::supportIndirectCommandBuffers() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportIndirectCommandBuffers)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setSupportIndirectCommandBuffers(bool supportIndirectCommandBuffers) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSupportIndirectCommandBuffers_), supportIndirectCommandBuffers); } _MTL_INLINE NS::Array* MTL::RenderPipelineDescriptor::binaryArchives() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(binaryArchives)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setBinaryArchives(const NS::Array* binaryArchives) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBinaryArchives_), binaryArchives); } _MTL_INLINE NS::Array* MTL::RenderPipelineDescriptor::vertexPreloadedLibraries() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexPreloadedLibraries)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setVertexPreloadedLibraries(const NS::Array* vertexPreloadedLibraries) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexPreloadedLibraries_), vertexPreloadedLibraries); } _MTL_INLINE NS::Array* MTL::RenderPipelineDescriptor::fragmentPreloadedLibraries() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(fragmentPreloadedLibraries)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setFragmentPreloadedLibraries(const NS::Array* fragmentPreloadedLibraries) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentPreloadedLibraries_), fragmentPreloadedLibraries); } _MTL_INLINE MTL::LinkedFunctions* MTL::RenderPipelineDescriptor::vertexLinkedFunctions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexLinkedFunctions)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setVertexLinkedFunctions(const MTL::LinkedFunctions* vertexLinkedFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexLinkedFunctions_), vertexLinkedFunctions); } _MTL_INLINE MTL::LinkedFunctions* MTL::RenderPipelineDescriptor::fragmentLinkedFunctions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(fragmentLinkedFunctions)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setFragmentLinkedFunctions(const MTL::LinkedFunctions* fragmentLinkedFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentLinkedFunctions_), fragmentLinkedFunctions); } _MTL_INLINE bool MTL::RenderPipelineDescriptor::supportAddingVertexBinaryFunctions() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportAddingVertexBinaryFunctions)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setSupportAddingVertexBinaryFunctions(bool supportAddingVertexBinaryFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSupportAddingVertexBinaryFunctions_), supportAddingVertexBinaryFunctions); } _MTL_INLINE bool MTL::RenderPipelineDescriptor::supportAddingFragmentBinaryFunctions() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportAddingFragmentBinaryFunctions)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setSupportAddingFragmentBinaryFunctions(bool supportAddingFragmentBinaryFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSupportAddingFragmentBinaryFunctions_), supportAddingFragmentBinaryFunctions); } _MTL_INLINE NS::UInteger MTL::RenderPipelineDescriptor::maxVertexCallStackDepth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxVertexCallStackDepth)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setMaxVertexCallStackDepth(NS::UInteger maxVertexCallStackDepth) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxVertexCallStackDepth_), maxVertexCallStackDepth); } _MTL_INLINE NS::UInteger MTL::RenderPipelineDescriptor::maxFragmentCallStackDepth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxFragmentCallStackDepth)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setMaxFragmentCallStackDepth(NS::UInteger maxFragmentCallStackDepth) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxFragmentCallStackDepth_), maxFragmentCallStackDepth); } _MTL_INLINE void MTL::RenderPipelineDescriptor::reset() { Object::sendMessage(this, _MTL_PRIVATE_SEL(reset)); } _MTL_INLINE MTL::ShaderValidation MTL::RenderPipelineDescriptor::shaderValidation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(shaderValidation)); } _MTL_INLINE void MTL::RenderPipelineDescriptor::setShaderValidation(MTL::ShaderValidation shaderValidation) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setShaderValidation_), shaderValidation); } _MTL_INLINE MTL::RenderPipelineFunctionsDescriptor* MTL::RenderPipelineFunctionsDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRenderPipelineFunctionsDescriptor)); } _MTL_INLINE MTL::RenderPipelineFunctionsDescriptor* MTL::RenderPipelineFunctionsDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::Array* MTL::RenderPipelineFunctionsDescriptor::vertexAdditionalBinaryFunctions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(vertexAdditionalBinaryFunctions)); } _MTL_INLINE void MTL::RenderPipelineFunctionsDescriptor::setVertexAdditionalBinaryFunctions(const NS::Array* vertexAdditionalBinaryFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setVertexAdditionalBinaryFunctions_), vertexAdditionalBinaryFunctions); } _MTL_INLINE NS::Array* MTL::RenderPipelineFunctionsDescriptor::fragmentAdditionalBinaryFunctions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(fragmentAdditionalBinaryFunctions)); } _MTL_INLINE void MTL::RenderPipelineFunctionsDescriptor::setFragmentAdditionalBinaryFunctions(const NS::Array* fragmentAdditionalBinaryFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentAdditionalBinaryFunctions_), fragmentAdditionalBinaryFunctions); } _MTL_INLINE NS::Array* MTL::RenderPipelineFunctionsDescriptor::tileAdditionalBinaryFunctions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tileAdditionalBinaryFunctions)); } _MTL_INLINE void MTL::RenderPipelineFunctionsDescriptor::setTileAdditionalBinaryFunctions(const NS::Array* tileAdditionalBinaryFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileAdditionalBinaryFunctions_), tileAdditionalBinaryFunctions); } _MTL_INLINE NS::String* MTL::RenderPipelineState::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE MTL::Device* MTL::RenderPipelineState::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE NS::UInteger MTL::RenderPipelineState::maxTotalThreadsPerThreadgroup() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerThreadgroup)); } _MTL_INLINE bool MTL::RenderPipelineState::threadgroupSizeMatchesTileSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(threadgroupSizeMatchesTileSize)); } _MTL_INLINE NS::UInteger MTL::RenderPipelineState::imageblockSampleLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(imageblockSampleLength)); } _MTL_INLINE NS::UInteger MTL::RenderPipelineState::imageblockMemoryLength(MTL::Size imageblockDimensions) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(imageblockMemoryLengthForDimensions_), imageblockDimensions); } _MTL_INLINE bool MTL::RenderPipelineState::supportIndirectCommandBuffers() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportIndirectCommandBuffers)); } _MTL_INLINE NS::UInteger MTL::RenderPipelineState::maxTotalThreadsPerObjectThreadgroup() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerObjectThreadgroup)); } _MTL_INLINE NS::UInteger MTL::RenderPipelineState::maxTotalThreadsPerMeshThreadgroup() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerMeshThreadgroup)); } _MTL_INLINE NS::UInteger MTL::RenderPipelineState::objectThreadExecutionWidth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectThreadExecutionWidth)); } _MTL_INLINE NS::UInteger MTL::RenderPipelineState::meshThreadExecutionWidth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(meshThreadExecutionWidth)); } _MTL_INLINE NS::UInteger MTL::RenderPipelineState::maxTotalThreadgroupsPerMeshGrid() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxTotalThreadgroupsPerMeshGrid)); } _MTL_INLINE MTL::ResourceID MTL::RenderPipelineState::gpuResourceID() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(gpuResourceID)); } _MTL_INLINE MTL::FunctionHandle* MTL::RenderPipelineState::functionHandle(const MTL::Function* function, MTL::RenderStages stage) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(functionHandleWithFunction_stage_), function, stage); } _MTL_INLINE MTL::VisibleFunctionTable* MTL::RenderPipelineState::newVisibleFunctionTable(const MTL::VisibleFunctionTableDescriptor* descriptor, MTL::RenderStages stage) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newVisibleFunctionTableWithDescriptor_stage_), descriptor, stage); } _MTL_INLINE MTL::IntersectionFunctionTable* MTL::RenderPipelineState::newIntersectionFunctionTable(const MTL::IntersectionFunctionTableDescriptor* descriptor, MTL::RenderStages stage) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newIntersectionFunctionTableWithDescriptor_stage_), descriptor, stage); } _MTL_INLINE MTL::RenderPipelineState* MTL::RenderPipelineState::newRenderPipelineState(const MTL::RenderPipelineFunctionsDescriptor* additionalBinaryFunctions, NS::Error** error) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(newRenderPipelineStateWithAdditionalBinaryFunctions_error_), additionalBinaryFunctions, error); } _MTL_INLINE MTL::ShaderValidation MTL::RenderPipelineState::shaderValidation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(shaderValidation)); } _MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptorArray* MTL::RenderPipelineColorAttachmentDescriptorArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLRenderPipelineColorAttachmentDescriptorArray)); } _MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptorArray* MTL::RenderPipelineColorAttachmentDescriptorArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptor* MTL::RenderPipelineColorAttachmentDescriptorArray::object(NS::UInteger attachmentIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex); } _MTL_INLINE void MTL::RenderPipelineColorAttachmentDescriptorArray::setObject(const MTL::RenderPipelineColorAttachmentDescriptor* attachment, NS::UInteger attachmentIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex); } _MTL_INLINE MTL::TileRenderPipelineColorAttachmentDescriptor* MTL::TileRenderPipelineColorAttachmentDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLTileRenderPipelineColorAttachmentDescriptor)); } _MTL_INLINE MTL::TileRenderPipelineColorAttachmentDescriptor* MTL::TileRenderPipelineColorAttachmentDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::PixelFormat MTL::TileRenderPipelineColorAttachmentDescriptor::pixelFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(pixelFormat)); } _MTL_INLINE void MTL::TileRenderPipelineColorAttachmentDescriptor::setPixelFormat(MTL::PixelFormat pixelFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPixelFormat_), pixelFormat); } _MTL_INLINE MTL::TileRenderPipelineColorAttachmentDescriptorArray* MTL::TileRenderPipelineColorAttachmentDescriptorArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLTileRenderPipelineColorAttachmentDescriptorArray)); } _MTL_INLINE MTL::TileRenderPipelineColorAttachmentDescriptorArray* MTL::TileRenderPipelineColorAttachmentDescriptorArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::TileRenderPipelineColorAttachmentDescriptor* MTL::TileRenderPipelineColorAttachmentDescriptorArray::object(NS::UInteger attachmentIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex); } _MTL_INLINE void MTL::TileRenderPipelineColorAttachmentDescriptorArray::setObject(const MTL::TileRenderPipelineColorAttachmentDescriptor* attachment, NS::UInteger attachmentIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex); } _MTL_INLINE MTL::TileRenderPipelineDescriptor* MTL::TileRenderPipelineDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLTileRenderPipelineDescriptor)); } _MTL_INLINE MTL::TileRenderPipelineDescriptor* MTL::TileRenderPipelineDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::TileRenderPipelineDescriptor::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::TileRenderPipelineDescriptor::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::Function* MTL::TileRenderPipelineDescriptor::tileFunction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tileFunction)); } _MTL_INLINE void MTL::TileRenderPipelineDescriptor::setTileFunction(const MTL::Function* tileFunction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTileFunction_), tileFunction); } _MTL_INLINE NS::UInteger MTL::TileRenderPipelineDescriptor::rasterSampleCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(rasterSampleCount)); } _MTL_INLINE void MTL::TileRenderPipelineDescriptor::setRasterSampleCount(NS::UInteger rasterSampleCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRasterSampleCount_), rasterSampleCount); } _MTL_INLINE MTL::TileRenderPipelineColorAttachmentDescriptorArray* MTL::TileRenderPipelineDescriptor::colorAttachments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(colorAttachments)); } _MTL_INLINE bool MTL::TileRenderPipelineDescriptor::threadgroupSizeMatchesTileSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(threadgroupSizeMatchesTileSize)); } _MTL_INLINE void MTL::TileRenderPipelineDescriptor::setThreadgroupSizeMatchesTileSize(bool threadgroupSizeMatchesTileSize) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setThreadgroupSizeMatchesTileSize_), threadgroupSizeMatchesTileSize); } _MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::TileRenderPipelineDescriptor::tileBuffers() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tileBuffers)); } _MTL_INLINE NS::UInteger MTL::TileRenderPipelineDescriptor::maxTotalThreadsPerThreadgroup() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerThreadgroup)); } _MTL_INLINE void MTL::TileRenderPipelineDescriptor::setMaxTotalThreadsPerThreadgroup(NS::UInteger maxTotalThreadsPerThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxTotalThreadsPerThreadgroup_), maxTotalThreadsPerThreadgroup); } _MTL_INLINE NS::Array* MTL::TileRenderPipelineDescriptor::binaryArchives() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(binaryArchives)); } _MTL_INLINE void MTL::TileRenderPipelineDescriptor::setBinaryArchives(const NS::Array* binaryArchives) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBinaryArchives_), binaryArchives); } _MTL_INLINE NS::Array* MTL::TileRenderPipelineDescriptor::preloadedLibraries() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(preloadedLibraries)); } _MTL_INLINE void MTL::TileRenderPipelineDescriptor::setPreloadedLibraries(const NS::Array* preloadedLibraries) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPreloadedLibraries_), preloadedLibraries); } _MTL_INLINE MTL::LinkedFunctions* MTL::TileRenderPipelineDescriptor::linkedFunctions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(linkedFunctions)); } _MTL_INLINE void MTL::TileRenderPipelineDescriptor::setLinkedFunctions(const MTL::LinkedFunctions* linkedFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLinkedFunctions_), linkedFunctions); } _MTL_INLINE bool MTL::TileRenderPipelineDescriptor::supportAddingBinaryFunctions() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportAddingBinaryFunctions)); } _MTL_INLINE void MTL::TileRenderPipelineDescriptor::setSupportAddingBinaryFunctions(bool supportAddingBinaryFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSupportAddingBinaryFunctions_), supportAddingBinaryFunctions); } _MTL_INLINE NS::UInteger MTL::TileRenderPipelineDescriptor::maxCallStackDepth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxCallStackDepth)); } _MTL_INLINE void MTL::TileRenderPipelineDescriptor::setMaxCallStackDepth(NS::UInteger maxCallStackDepth) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxCallStackDepth_), maxCallStackDepth); } _MTL_INLINE void MTL::TileRenderPipelineDescriptor::reset() { Object::sendMessage(this, _MTL_PRIVATE_SEL(reset)); } _MTL_INLINE MTL::ShaderValidation MTL::TileRenderPipelineDescriptor::shaderValidation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(shaderValidation)); } _MTL_INLINE void MTL::TileRenderPipelineDescriptor::setShaderValidation(MTL::ShaderValidation shaderValidation) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setShaderValidation_), shaderValidation); } _MTL_INLINE MTL::MeshRenderPipelineDescriptor* MTL::MeshRenderPipelineDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLMeshRenderPipelineDescriptor)); } _MTL_INLINE MTL::MeshRenderPipelineDescriptor* MTL::MeshRenderPipelineDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::MeshRenderPipelineDescriptor::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE MTL::Function* MTL::MeshRenderPipelineDescriptor::objectFunction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectFunction)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setObjectFunction(const MTL::Function* objectFunction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectFunction_), objectFunction); } _MTL_INLINE MTL::Function* MTL::MeshRenderPipelineDescriptor::meshFunction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(meshFunction)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMeshFunction(const MTL::Function* meshFunction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshFunction_), meshFunction); } _MTL_INLINE MTL::Function* MTL::MeshRenderPipelineDescriptor::fragmentFunction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(fragmentFunction)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setFragmentFunction(const MTL::Function* fragmentFunction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentFunction_), fragmentFunction); } _MTL_INLINE NS::UInteger MTL::MeshRenderPipelineDescriptor::maxTotalThreadsPerObjectThreadgroup() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerObjectThreadgroup)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMaxTotalThreadsPerObjectThreadgroup(NS::UInteger maxTotalThreadsPerObjectThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxTotalThreadsPerObjectThreadgroup_), maxTotalThreadsPerObjectThreadgroup); } _MTL_INLINE NS::UInteger MTL::MeshRenderPipelineDescriptor::maxTotalThreadsPerMeshThreadgroup() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxTotalThreadsPerMeshThreadgroup)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMaxTotalThreadsPerMeshThreadgroup(NS::UInteger maxTotalThreadsPerMeshThreadgroup) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxTotalThreadsPerMeshThreadgroup_), maxTotalThreadsPerMeshThreadgroup); } _MTL_INLINE bool MTL::MeshRenderPipelineDescriptor::objectThreadgroupSizeIsMultipleOfThreadExecutionWidth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectThreadgroupSizeIsMultipleOfThreadExecutionWidth)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setObjectThreadgroupSizeIsMultipleOfThreadExecutionWidth(bool objectThreadgroupSizeIsMultipleOfThreadExecutionWidth) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectThreadgroupSizeIsMultipleOfThreadExecutionWidth_), objectThreadgroupSizeIsMultipleOfThreadExecutionWidth); } _MTL_INLINE bool MTL::MeshRenderPipelineDescriptor::meshThreadgroupSizeIsMultipleOfThreadExecutionWidth() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(meshThreadgroupSizeIsMultipleOfThreadExecutionWidth)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMeshThreadgroupSizeIsMultipleOfThreadExecutionWidth(bool meshThreadgroupSizeIsMultipleOfThreadExecutionWidth) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshThreadgroupSizeIsMultipleOfThreadExecutionWidth_), meshThreadgroupSizeIsMultipleOfThreadExecutionWidth); } _MTL_INLINE NS::UInteger MTL::MeshRenderPipelineDescriptor::payloadMemoryLength() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(payloadMemoryLength)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setPayloadMemoryLength(NS::UInteger payloadMemoryLength) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setPayloadMemoryLength_), payloadMemoryLength); } _MTL_INLINE NS::UInteger MTL::MeshRenderPipelineDescriptor::maxTotalThreadgroupsPerMeshGrid() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxTotalThreadgroupsPerMeshGrid)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMaxTotalThreadgroupsPerMeshGrid(NS::UInteger maxTotalThreadgroupsPerMeshGrid) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxTotalThreadgroupsPerMeshGrid_), maxTotalThreadgroupsPerMeshGrid); } _MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::MeshRenderPipelineDescriptor::objectBuffers() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectBuffers)); } _MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::MeshRenderPipelineDescriptor::meshBuffers() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(meshBuffers)); } _MTL_INLINE MTL::PipelineBufferDescriptorArray* MTL::MeshRenderPipelineDescriptor::fragmentBuffers() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(fragmentBuffers)); } _MTL_INLINE NS::UInteger MTL::MeshRenderPipelineDescriptor::rasterSampleCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(rasterSampleCount)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setRasterSampleCount(NS::UInteger rasterSampleCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRasterSampleCount_), rasterSampleCount); } _MTL_INLINE bool MTL::MeshRenderPipelineDescriptor::alphaToCoverageEnabled() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isAlphaToCoverageEnabled)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setAlphaToCoverageEnabled(bool alphaToCoverageEnabled) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setAlphaToCoverageEnabled_), alphaToCoverageEnabled); } _MTL_INLINE bool MTL::MeshRenderPipelineDescriptor::alphaToOneEnabled() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isAlphaToOneEnabled)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setAlphaToOneEnabled(bool alphaToOneEnabled) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setAlphaToOneEnabled_), alphaToOneEnabled); } _MTL_INLINE bool MTL::MeshRenderPipelineDescriptor::rasterizationEnabled() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(isRasterizationEnabled)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setRasterizationEnabled(bool rasterizationEnabled) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRasterizationEnabled_), rasterizationEnabled); } _MTL_INLINE NS::UInteger MTL::MeshRenderPipelineDescriptor::maxVertexAmplificationCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxVertexAmplificationCount)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMaxVertexAmplificationCount(NS::UInteger maxVertexAmplificationCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxVertexAmplificationCount_), maxVertexAmplificationCount); } _MTL_INLINE MTL::RenderPipelineColorAttachmentDescriptorArray* MTL::MeshRenderPipelineDescriptor::colorAttachments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(colorAttachments)); } _MTL_INLINE MTL::PixelFormat MTL::MeshRenderPipelineDescriptor::depthAttachmentPixelFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(depthAttachmentPixelFormat)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setDepthAttachmentPixelFormat(MTL::PixelFormat depthAttachmentPixelFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setDepthAttachmentPixelFormat_), depthAttachmentPixelFormat); } _MTL_INLINE MTL::PixelFormat MTL::MeshRenderPipelineDescriptor::stencilAttachmentPixelFormat() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stencilAttachmentPixelFormat)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setStencilAttachmentPixelFormat(MTL::PixelFormat stencilAttachmentPixelFormat) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStencilAttachmentPixelFormat_), stencilAttachmentPixelFormat); } _MTL_INLINE bool MTL::MeshRenderPipelineDescriptor::supportIndirectCommandBuffers() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportIndirectCommandBuffers)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setSupportIndirectCommandBuffers(bool supportIndirectCommandBuffers) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSupportIndirectCommandBuffers_), supportIndirectCommandBuffers); } _MTL_INLINE NS::Array* MTL::MeshRenderPipelineDescriptor::binaryArchives() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(binaryArchives)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setBinaryArchives(const NS::Array* binaryArchives) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBinaryArchives_), binaryArchives); } _MTL_INLINE MTL::LinkedFunctions* MTL::MeshRenderPipelineDescriptor::objectLinkedFunctions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectLinkedFunctions)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setObjectLinkedFunctions(const MTL::LinkedFunctions* objectLinkedFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObjectLinkedFunctions_), objectLinkedFunctions); } _MTL_INLINE MTL::LinkedFunctions* MTL::MeshRenderPipelineDescriptor::meshLinkedFunctions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(meshLinkedFunctions)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setMeshLinkedFunctions(const MTL::LinkedFunctions* meshLinkedFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMeshLinkedFunctions_), meshLinkedFunctions); } _MTL_INLINE MTL::LinkedFunctions* MTL::MeshRenderPipelineDescriptor::fragmentLinkedFunctions() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(fragmentLinkedFunctions)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setFragmentLinkedFunctions(const MTL::LinkedFunctions* fragmentLinkedFunctions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFragmentLinkedFunctions_), fragmentLinkedFunctions); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::reset() { Object::sendMessage(this, _MTL_PRIVATE_SEL(reset)); } _MTL_INLINE MTL::ShaderValidation MTL::MeshRenderPipelineDescriptor::shaderValidation() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(shaderValidation)); } _MTL_INLINE void MTL::MeshRenderPipelineDescriptor::setShaderValidation(MTL::ShaderValidation shaderValidation) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setShaderValidation_), shaderValidation); } #pragma once namespace MTL { class ResidencySetDescriptor : public NS::Copying { public: static class ResidencySetDescriptor* alloc(); class ResidencySetDescriptor* init(); NS::String* label() const; void setLabel(const NS::String* label); NS::UInteger initialCapacity() const; void setInitialCapacity(NS::UInteger initialCapacity); }; class ResidencySet : public NS::Referencing { public: class Device* device() const; NS::String* label() const; uint64_t allocatedSize() const; void requestResidency(); void endResidency(); void addAllocation(const class Allocation* allocation); void addAllocations(const class Allocation* const allocations[], NS::UInteger count); void removeAllocation(const class Allocation* allocation); void removeAllocations(const class Allocation* const allocations[], NS::UInteger count); void removeAllAllocations(); bool containsAllocation(const class Allocation* anAllocation); NS::Array* allAllocations() const; NS::UInteger allocationCount() const; void commit(); }; } _MTL_INLINE MTL::ResidencySetDescriptor* MTL::ResidencySetDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLResidencySetDescriptor)); } _MTL_INLINE MTL::ResidencySetDescriptor* MTL::ResidencySetDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::String* MTL::ResidencySetDescriptor::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::ResidencySetDescriptor::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE NS::UInteger MTL::ResidencySetDescriptor::initialCapacity() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(initialCapacity)); } _MTL_INLINE void MTL::ResidencySetDescriptor::setInitialCapacity(NS::UInteger initialCapacity) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setInitialCapacity_), initialCapacity); } _MTL_INLINE MTL::Device* MTL::ResidencySet::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE NS::String* MTL::ResidencySet::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE uint64_t MTL::ResidencySet::allocatedSize() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(allocatedSize)); } _MTL_INLINE void MTL::ResidencySet::requestResidency() { Object::sendMessage(this, _MTL_PRIVATE_SEL(requestResidency)); } _MTL_INLINE void MTL::ResidencySet::endResidency() { Object::sendMessage(this, _MTL_PRIVATE_SEL(endResidency)); } _MTL_INLINE void MTL::ResidencySet::addAllocation(const MTL::Allocation* allocation) { Object::sendMessage(this, _MTL_PRIVATE_SEL(addAllocation_), allocation); } _MTL_INLINE void MTL::ResidencySet::addAllocations(const MTL::Allocation* const allocations[], NS::UInteger count) { Object::sendMessage(this, _MTL_PRIVATE_SEL(addAllocations_count_), allocations, count); } _MTL_INLINE void MTL::ResidencySet::removeAllocation(const MTL::Allocation* allocation) { Object::sendMessage(this, _MTL_PRIVATE_SEL(removeAllocation_), allocation); } _MTL_INLINE void MTL::ResidencySet::removeAllocations(const MTL::Allocation* const allocations[], NS::UInteger count) { Object::sendMessage(this, _MTL_PRIVATE_SEL(removeAllocations_count_), allocations, count); } _MTL_INLINE void MTL::ResidencySet::removeAllAllocations() { Object::sendMessage(this, _MTL_PRIVATE_SEL(removeAllAllocations)); } _MTL_INLINE bool MTL::ResidencySet::containsAllocation(const MTL::Allocation* anAllocation) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(containsAllocation_), anAllocation); } _MTL_INLINE NS::Array* MTL::ResidencySet::allAllocations() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(allAllocations)); } _MTL_INLINE NS::UInteger MTL::ResidencySet::allocationCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(allocationCount)); } _MTL_INLINE void MTL::ResidencySet::commit() { Object::sendMessage(this, _MTL_PRIVATE_SEL(commit)); } #pragma once namespace MTL { _MTL_ENUM(NS::UInteger, SparseTextureMappingMode) { SparseTextureMappingModeMap = 0, SparseTextureMappingModeUnmap = 1, }; struct MapIndirectArguments { uint32_t regionOriginX; uint32_t regionOriginY; uint32_t regionOriginZ; uint32_t regionSizeWidth; uint32_t regionSizeHeight; uint32_t regionSizeDepth; uint32_t mipMapLevel; uint32_t sliceId; } _MTL_PACKED; class ResourceStateCommandEncoder : public NS::Referencing { public: void updateTextureMappings(const class Texture* texture, const MTL::SparseTextureMappingMode mode, const MTL::Region* regions, const NS::UInteger* mipLevels, const NS::UInteger* slices, NS::UInteger numRegions); void updateTextureMapping(const class Texture* texture, const MTL::SparseTextureMappingMode mode, const MTL::Region region, const NS::UInteger mipLevel, const NS::UInteger slice); void updateTextureMapping(const class Texture* texture, const MTL::SparseTextureMappingMode mode, const class Buffer* indirectBuffer, NS::UInteger indirectBufferOffset); void updateFence(const class Fence* fence); void waitForFence(const class Fence* fence); void moveTextureMappingsFromTexture(const class Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const class Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin); }; } _MTL_INLINE void MTL::ResourceStateCommandEncoder::updateTextureMappings(const MTL::Texture* texture, const MTL::SparseTextureMappingMode mode, const MTL::Region* regions, const NS::UInteger* mipLevels, const NS::UInteger* slices, NS::UInteger numRegions) { Object::sendMessage(this, _MTL_PRIVATE_SEL(updateTextureMappings_mode_regions_mipLevels_slices_numRegions_), texture, mode, regions, mipLevels, slices, numRegions); } _MTL_INLINE void MTL::ResourceStateCommandEncoder::updateTextureMapping(const MTL::Texture* texture, const MTL::SparseTextureMappingMode mode, const MTL::Region region, const NS::UInteger mipLevel, const NS::UInteger slice) { Object::sendMessage(this, _MTL_PRIVATE_SEL(updateTextureMapping_mode_region_mipLevel_slice_), texture, mode, region, mipLevel, slice); } _MTL_INLINE void MTL::ResourceStateCommandEncoder::updateTextureMapping(const MTL::Texture* texture, const MTL::SparseTextureMappingMode mode, const MTL::Buffer* indirectBuffer, NS::UInteger indirectBufferOffset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(updateTextureMapping_mode_indirectBuffer_indirectBufferOffset_), texture, mode, indirectBuffer, indirectBufferOffset); } _MTL_INLINE void MTL::ResourceStateCommandEncoder::updateFence(const MTL::Fence* fence) { Object::sendMessage(this, _MTL_PRIVATE_SEL(updateFence_), fence); } _MTL_INLINE void MTL::ResourceStateCommandEncoder::waitForFence(const MTL::Fence* fence) { Object::sendMessage(this, _MTL_PRIVATE_SEL(waitForFence_), fence); } _MTL_INLINE void MTL::ResourceStateCommandEncoder::moveTextureMappingsFromTexture(const MTL::Texture* sourceTexture, NS::UInteger sourceSlice, NS::UInteger sourceLevel, MTL::Origin sourceOrigin, MTL::Size sourceSize, const MTL::Texture* destinationTexture, NS::UInteger destinationSlice, NS::UInteger destinationLevel, MTL::Origin destinationOrigin) { Object::sendMessage(this, _MTL_PRIVATE_SEL(moveTextureMappingsFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin_), sourceTexture, sourceSlice, sourceLevel, sourceOrigin, sourceSize, destinationTexture, destinationSlice, destinationLevel, destinationOrigin); } #pragma once namespace MTL { class ResourceStatePassSampleBufferAttachmentDescriptor : public NS::Copying { public: static class ResourceStatePassSampleBufferAttachmentDescriptor* alloc(); class ResourceStatePassSampleBufferAttachmentDescriptor* init(); class CounterSampleBuffer* sampleBuffer() const; void setSampleBuffer(const class CounterSampleBuffer* sampleBuffer); NS::UInteger startOfEncoderSampleIndex() const; void setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex); NS::UInteger endOfEncoderSampleIndex() const; void setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex); }; class ResourceStatePassSampleBufferAttachmentDescriptorArray : public NS::Referencing { public: static class ResourceStatePassSampleBufferAttachmentDescriptorArray* alloc(); class ResourceStatePassSampleBufferAttachmentDescriptorArray* init(); class ResourceStatePassSampleBufferAttachmentDescriptor* object(NS::UInteger attachmentIndex); void setObject(const class ResourceStatePassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex); }; class ResourceStatePassDescriptor : public NS::Copying { public: static class ResourceStatePassDescriptor* alloc(); class ResourceStatePassDescriptor* init(); static class ResourceStatePassDescriptor* resourceStatePassDescriptor(); class ResourceStatePassSampleBufferAttachmentDescriptorArray* sampleBufferAttachments() const; }; } _MTL_INLINE MTL::ResourceStatePassSampleBufferAttachmentDescriptor* MTL::ResourceStatePassSampleBufferAttachmentDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLResourceStatePassSampleBufferAttachmentDescriptor)); } _MTL_INLINE MTL::ResourceStatePassSampleBufferAttachmentDescriptor* MTL::ResourceStatePassSampleBufferAttachmentDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::CounterSampleBuffer* MTL::ResourceStatePassSampleBufferAttachmentDescriptor::sampleBuffer() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleBuffer)); } _MTL_INLINE void MTL::ResourceStatePassSampleBufferAttachmentDescriptor::setSampleBuffer(const MTL::CounterSampleBuffer* sampleBuffer) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSampleBuffer_), sampleBuffer); } _MTL_INLINE NS::UInteger MTL::ResourceStatePassSampleBufferAttachmentDescriptor::startOfEncoderSampleIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(startOfEncoderSampleIndex)); } _MTL_INLINE void MTL::ResourceStatePassSampleBufferAttachmentDescriptor::setStartOfEncoderSampleIndex(NS::UInteger startOfEncoderSampleIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStartOfEncoderSampleIndex_), startOfEncoderSampleIndex); } _MTL_INLINE NS::UInteger MTL::ResourceStatePassSampleBufferAttachmentDescriptor::endOfEncoderSampleIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(endOfEncoderSampleIndex)); } _MTL_INLINE void MTL::ResourceStatePassSampleBufferAttachmentDescriptor::setEndOfEncoderSampleIndex(NS::UInteger endOfEncoderSampleIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setEndOfEncoderSampleIndex_), endOfEncoderSampleIndex); } _MTL_INLINE MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray* MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLResourceStatePassSampleBufferAttachmentDescriptorArray)); } _MTL_INLINE MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray* MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::ResourceStatePassSampleBufferAttachmentDescriptor* MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray::object(NS::UInteger attachmentIndex) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), attachmentIndex); } _MTL_INLINE void MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray::setObject(const MTL::ResourceStatePassSampleBufferAttachmentDescriptor* attachment, NS::UInteger attachmentIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attachment, attachmentIndex); } _MTL_INLINE MTL::ResourceStatePassDescriptor* MTL::ResourceStatePassDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLResourceStatePassDescriptor)); } _MTL_INLINE MTL::ResourceStatePassDescriptor* MTL::ResourceStatePassDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::ResourceStatePassDescriptor* MTL::ResourceStatePassDescriptor::resourceStatePassDescriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLResourceStatePassDescriptor), _MTL_PRIVATE_SEL(resourceStatePassDescriptor)); } _MTL_INLINE MTL::ResourceStatePassSampleBufferAttachmentDescriptorArray* MTL::ResourceStatePassDescriptor::sampleBufferAttachments() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sampleBufferAttachments)); } #pragma once namespace MTL { _MTL_ENUM(NS::UInteger, SamplerMinMagFilter) { SamplerMinMagFilterNearest = 0, SamplerMinMagFilterLinear = 1, }; _MTL_ENUM(NS::UInteger, SamplerMipFilter) { SamplerMipFilterNotMipmapped = 0, SamplerMipFilterNearest = 1, SamplerMipFilterLinear = 2, }; _MTL_ENUM(NS::UInteger, SamplerAddressMode) { SamplerAddressModeClampToEdge = 0, SamplerAddressModeMirrorClampToEdge = 1, SamplerAddressModeRepeat = 2, SamplerAddressModeMirrorRepeat = 3, SamplerAddressModeClampToZero = 4, SamplerAddressModeClampToBorderColor = 5, }; _MTL_ENUM(NS::UInteger, SamplerBorderColor) { SamplerBorderColorTransparentBlack = 0, SamplerBorderColorOpaqueBlack = 1, SamplerBorderColorOpaqueWhite = 2, }; class SamplerDescriptor : public NS::Copying { public: static class SamplerDescriptor* alloc(); class SamplerDescriptor* init(); MTL::SamplerMinMagFilter minFilter() const; void setMinFilter(MTL::SamplerMinMagFilter minFilter); MTL::SamplerMinMagFilter magFilter() const; void setMagFilter(MTL::SamplerMinMagFilter magFilter); MTL::SamplerMipFilter mipFilter() const; void setMipFilter(MTL::SamplerMipFilter mipFilter); NS::UInteger maxAnisotropy() const; void setMaxAnisotropy(NS::UInteger maxAnisotropy); MTL::SamplerAddressMode sAddressMode() const; void setSAddressMode(MTL::SamplerAddressMode sAddressMode); MTL::SamplerAddressMode tAddressMode() const; void setTAddressMode(MTL::SamplerAddressMode tAddressMode); MTL::SamplerAddressMode rAddressMode() const; void setRAddressMode(MTL::SamplerAddressMode rAddressMode); MTL::SamplerBorderColor borderColor() const; void setBorderColor(MTL::SamplerBorderColor borderColor); bool normalizedCoordinates() const; void setNormalizedCoordinates(bool normalizedCoordinates); float lodMinClamp() const; void setLodMinClamp(float lodMinClamp); float lodMaxClamp() const; void setLodMaxClamp(float lodMaxClamp); bool lodAverage() const; void setLodAverage(bool lodAverage); MTL::CompareFunction compareFunction() const; void setCompareFunction(MTL::CompareFunction compareFunction); bool supportArgumentBuffers() const; void setSupportArgumentBuffers(bool supportArgumentBuffers); NS::String* label() const; void setLabel(const NS::String* label); }; class SamplerState : public NS::Referencing { public: NS::String* label() const; class Device* device() const; MTL::ResourceID gpuResourceID() const; }; } _MTL_INLINE MTL::SamplerDescriptor* MTL::SamplerDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLSamplerDescriptor)); } _MTL_INLINE MTL::SamplerDescriptor* MTL::SamplerDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::SamplerMinMagFilter MTL::SamplerDescriptor::minFilter() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(minFilter)); } _MTL_INLINE void MTL::SamplerDescriptor::setMinFilter(MTL::SamplerMinMagFilter minFilter) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMinFilter_), minFilter); } _MTL_INLINE MTL::SamplerMinMagFilter MTL::SamplerDescriptor::magFilter() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(magFilter)); } _MTL_INLINE void MTL::SamplerDescriptor::setMagFilter(MTL::SamplerMinMagFilter magFilter) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMagFilter_), magFilter); } _MTL_INLINE MTL::SamplerMipFilter MTL::SamplerDescriptor::mipFilter() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(mipFilter)); } _MTL_INLINE void MTL::SamplerDescriptor::setMipFilter(MTL::SamplerMipFilter mipFilter) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMipFilter_), mipFilter); } _MTL_INLINE NS::UInteger MTL::SamplerDescriptor::maxAnisotropy() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(maxAnisotropy)); } _MTL_INLINE void MTL::SamplerDescriptor::setMaxAnisotropy(NS::UInteger maxAnisotropy) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setMaxAnisotropy_), maxAnisotropy); } _MTL_INLINE MTL::SamplerAddressMode MTL::SamplerDescriptor::sAddressMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(sAddressMode)); } _MTL_INLINE void MTL::SamplerDescriptor::setSAddressMode(MTL::SamplerAddressMode sAddressMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSAddressMode_), sAddressMode); } _MTL_INLINE MTL::SamplerAddressMode MTL::SamplerDescriptor::tAddressMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(tAddressMode)); } _MTL_INLINE void MTL::SamplerDescriptor::setTAddressMode(MTL::SamplerAddressMode tAddressMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setTAddressMode_), tAddressMode); } _MTL_INLINE MTL::SamplerAddressMode MTL::SamplerDescriptor::rAddressMode() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(rAddressMode)); } _MTL_INLINE void MTL::SamplerDescriptor::setRAddressMode(MTL::SamplerAddressMode rAddressMode) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setRAddressMode_), rAddressMode); } _MTL_INLINE MTL::SamplerBorderColor MTL::SamplerDescriptor::borderColor() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(borderColor)); } _MTL_INLINE void MTL::SamplerDescriptor::setBorderColor(MTL::SamplerBorderColor borderColor) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBorderColor_), borderColor); } _MTL_INLINE bool MTL::SamplerDescriptor::normalizedCoordinates() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(normalizedCoordinates)); } _MTL_INLINE void MTL::SamplerDescriptor::setNormalizedCoordinates(bool normalizedCoordinates) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setNormalizedCoordinates_), normalizedCoordinates); } _MTL_INLINE float MTL::SamplerDescriptor::lodMinClamp() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(lodMinClamp)); } _MTL_INLINE void MTL::SamplerDescriptor::setLodMinClamp(float lodMinClamp) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLodMinClamp_), lodMinClamp); } _MTL_INLINE float MTL::SamplerDescriptor::lodMaxClamp() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(lodMaxClamp)); } _MTL_INLINE void MTL::SamplerDescriptor::setLodMaxClamp(float lodMaxClamp) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLodMaxClamp_), lodMaxClamp); } _MTL_INLINE bool MTL::SamplerDescriptor::lodAverage() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(lodAverage)); } _MTL_INLINE void MTL::SamplerDescriptor::setLodAverage(bool lodAverage) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLodAverage_), lodAverage); } _MTL_INLINE MTL::CompareFunction MTL::SamplerDescriptor::compareFunction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(compareFunction)); } _MTL_INLINE void MTL::SamplerDescriptor::setCompareFunction(MTL::CompareFunction compareFunction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setCompareFunction_), compareFunction); } _MTL_INLINE bool MTL::SamplerDescriptor::supportArgumentBuffers() const { return Object::sendMessageSafe(this, _MTL_PRIVATE_SEL(supportArgumentBuffers)); } _MTL_INLINE void MTL::SamplerDescriptor::setSupportArgumentBuffers(bool supportArgumentBuffers) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setSupportArgumentBuffers_), supportArgumentBuffers); } _MTL_INLINE NS::String* MTL::SamplerDescriptor::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE void MTL::SamplerDescriptor::setLabel(const NS::String* label) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setLabel_), label); } _MTL_INLINE NS::String* MTL::SamplerState::label() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(label)); } _MTL_INLINE MTL::Device* MTL::SamplerState::device() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(device)); } _MTL_INLINE MTL::ResourceID MTL::SamplerState::gpuResourceID() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(gpuResourceID)); } #pragma once namespace MTL { static const NS::UInteger BufferLayoutStrideDynamic = NS::UIntegerMax; _MTL_ENUM(NS::UInteger, VertexFormat) { VertexFormatInvalid = 0, VertexFormatUChar2 = 1, VertexFormatUChar3 = 2, VertexFormatUChar4 = 3, VertexFormatChar2 = 4, VertexFormatChar3 = 5, VertexFormatChar4 = 6, VertexFormatUChar2Normalized = 7, VertexFormatUChar3Normalized = 8, VertexFormatUChar4Normalized = 9, VertexFormatChar2Normalized = 10, VertexFormatChar3Normalized = 11, VertexFormatChar4Normalized = 12, VertexFormatUShort2 = 13, VertexFormatUShort3 = 14, VertexFormatUShort4 = 15, VertexFormatShort2 = 16, VertexFormatShort3 = 17, VertexFormatShort4 = 18, VertexFormatUShort2Normalized = 19, VertexFormatUShort3Normalized = 20, VertexFormatUShort4Normalized = 21, VertexFormatShort2Normalized = 22, VertexFormatShort3Normalized = 23, VertexFormatShort4Normalized = 24, VertexFormatHalf2 = 25, VertexFormatHalf3 = 26, VertexFormatHalf4 = 27, VertexFormatFloat = 28, VertexFormatFloat2 = 29, VertexFormatFloat3 = 30, VertexFormatFloat4 = 31, VertexFormatInt = 32, VertexFormatInt2 = 33, VertexFormatInt3 = 34, VertexFormatInt4 = 35, VertexFormatUInt = 36, VertexFormatUInt2 = 37, VertexFormatUInt3 = 38, VertexFormatUInt4 = 39, VertexFormatInt1010102Normalized = 40, VertexFormatUInt1010102Normalized = 41, VertexFormatUChar4Normalized_BGRA = 42, VertexFormatUChar = 45, VertexFormatChar = 46, VertexFormatUCharNormalized = 47, VertexFormatCharNormalized = 48, VertexFormatUShort = 49, VertexFormatShort = 50, VertexFormatUShortNormalized = 51, VertexFormatShortNormalized = 52, VertexFormatHalf = 53, VertexFormatFloatRG11B10 = 54, VertexFormatFloatRGB9E5 = 55, }; _MTL_ENUM(NS::UInteger, VertexStepFunction) { VertexStepFunctionConstant = 0, VertexStepFunctionPerVertex = 1, VertexStepFunctionPerInstance = 2, VertexStepFunctionPerPatch = 3, VertexStepFunctionPerPatchControlPoint = 4, }; class VertexBufferLayoutDescriptor : public NS::Copying { public: static class VertexBufferLayoutDescriptor* alloc(); class VertexBufferLayoutDescriptor* init(); NS::UInteger stride() const; void setStride(NS::UInteger stride); MTL::VertexStepFunction stepFunction() const; void setStepFunction(MTL::VertexStepFunction stepFunction); NS::UInteger stepRate() const; void setStepRate(NS::UInteger stepRate); }; class VertexBufferLayoutDescriptorArray : public NS::Referencing { public: static class VertexBufferLayoutDescriptorArray* alloc(); class VertexBufferLayoutDescriptorArray* init(); class VertexBufferLayoutDescriptor* object(NS::UInteger index); void setObject(const class VertexBufferLayoutDescriptor* bufferDesc, NS::UInteger index); }; class VertexAttributeDescriptor : public NS::Copying { public: static class VertexAttributeDescriptor* alloc(); class VertexAttributeDescriptor* init(); MTL::VertexFormat format() const; void setFormat(MTL::VertexFormat format); NS::UInteger offset() const; void setOffset(NS::UInteger offset); NS::UInteger bufferIndex() const; void setBufferIndex(NS::UInteger bufferIndex); }; class VertexAttributeDescriptorArray : public NS::Referencing { public: static class VertexAttributeDescriptorArray* alloc(); class VertexAttributeDescriptorArray* init(); class VertexAttributeDescriptor* object(NS::UInteger index); void setObject(const class VertexAttributeDescriptor* attributeDesc, NS::UInteger index); }; class VertexDescriptor : public NS::Copying { public: static class VertexDescriptor* alloc(); class VertexDescriptor* init(); static class VertexDescriptor* vertexDescriptor(); class VertexBufferLayoutDescriptorArray* layouts() const; class VertexAttributeDescriptorArray* attributes() const; void reset(); }; } _MTL_INLINE MTL::VertexBufferLayoutDescriptor* MTL::VertexBufferLayoutDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLVertexBufferLayoutDescriptor)); } _MTL_INLINE MTL::VertexBufferLayoutDescriptor* MTL::VertexBufferLayoutDescriptor::init() { return NS::Object::init(); } _MTL_INLINE NS::UInteger MTL::VertexBufferLayoutDescriptor::stride() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stride)); } _MTL_INLINE void MTL::VertexBufferLayoutDescriptor::setStride(NS::UInteger stride) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStride_), stride); } _MTL_INLINE MTL::VertexStepFunction MTL::VertexBufferLayoutDescriptor::stepFunction() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stepFunction)); } _MTL_INLINE void MTL::VertexBufferLayoutDescriptor::setStepFunction(MTL::VertexStepFunction stepFunction) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStepFunction_), stepFunction); } _MTL_INLINE NS::UInteger MTL::VertexBufferLayoutDescriptor::stepRate() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(stepRate)); } _MTL_INLINE void MTL::VertexBufferLayoutDescriptor::setStepRate(NS::UInteger stepRate) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setStepRate_), stepRate); } _MTL_INLINE MTL::VertexBufferLayoutDescriptorArray* MTL::VertexBufferLayoutDescriptorArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLVertexBufferLayoutDescriptorArray)); } _MTL_INLINE MTL::VertexBufferLayoutDescriptorArray* MTL::VertexBufferLayoutDescriptorArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::VertexBufferLayoutDescriptor* MTL::VertexBufferLayoutDescriptorArray::object(NS::UInteger index) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), index); } _MTL_INLINE void MTL::VertexBufferLayoutDescriptorArray::setObject(const MTL::VertexBufferLayoutDescriptor* bufferDesc, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), bufferDesc, index); } _MTL_INLINE MTL::VertexAttributeDescriptor* MTL::VertexAttributeDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLVertexAttributeDescriptor)); } _MTL_INLINE MTL::VertexAttributeDescriptor* MTL::VertexAttributeDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::VertexFormat MTL::VertexAttributeDescriptor::format() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(format)); } _MTL_INLINE void MTL::VertexAttributeDescriptor::setFormat(MTL::VertexFormat format) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFormat_), format); } _MTL_INLINE NS::UInteger MTL::VertexAttributeDescriptor::offset() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(offset)); } _MTL_INLINE void MTL::VertexAttributeDescriptor::setOffset(NS::UInteger offset) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setOffset_), offset); } _MTL_INLINE NS::UInteger MTL::VertexAttributeDescriptor::bufferIndex() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(bufferIndex)); } _MTL_INLINE void MTL::VertexAttributeDescriptor::setBufferIndex(NS::UInteger bufferIndex) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setBufferIndex_), bufferIndex); } _MTL_INLINE MTL::VertexAttributeDescriptorArray* MTL::VertexAttributeDescriptorArray::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLVertexAttributeDescriptorArray)); } _MTL_INLINE MTL::VertexAttributeDescriptorArray* MTL::VertexAttributeDescriptorArray::init() { return NS::Object::init(); } _MTL_INLINE MTL::VertexAttributeDescriptor* MTL::VertexAttributeDescriptorArray::object(NS::UInteger index) { return Object::sendMessage(this, _MTL_PRIVATE_SEL(objectAtIndexedSubscript_), index); } _MTL_INLINE void MTL::VertexAttributeDescriptorArray::setObject(const MTL::VertexAttributeDescriptor* attributeDesc, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setObject_atIndexedSubscript_), attributeDesc, index); } _MTL_INLINE MTL::VertexDescriptor* MTL::VertexDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLVertexDescriptor)); } _MTL_INLINE MTL::VertexDescriptor* MTL::VertexDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::VertexDescriptor* MTL::VertexDescriptor::vertexDescriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLVertexDescriptor), _MTL_PRIVATE_SEL(vertexDescriptor)); } _MTL_INLINE MTL::VertexBufferLayoutDescriptorArray* MTL::VertexDescriptor::layouts() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(layouts)); } _MTL_INLINE MTL::VertexAttributeDescriptorArray* MTL::VertexDescriptor::attributes() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(attributes)); } _MTL_INLINE void MTL::VertexDescriptor::reset() { Object::sendMessage(this, _MTL_PRIVATE_SEL(reset)); } #pragma once namespace MTL { class VisibleFunctionTableDescriptor : public NS::Copying { public: static class VisibleFunctionTableDescriptor* alloc(); class VisibleFunctionTableDescriptor* init(); static class VisibleFunctionTableDescriptor* visibleFunctionTableDescriptor(); NS::UInteger functionCount() const; void setFunctionCount(NS::UInteger functionCount); }; class VisibleFunctionTable : public NS::Referencing { public: MTL::ResourceID gpuResourceID() const; void setFunction(const class FunctionHandle* function, NS::UInteger index); void setFunctions(const class FunctionHandle* const functions[], NS::Range range); }; } _MTL_INLINE MTL::VisibleFunctionTableDescriptor* MTL::VisibleFunctionTableDescriptor::alloc() { return NS::Object::alloc(_MTL_PRIVATE_CLS(MTLVisibleFunctionTableDescriptor)); } _MTL_INLINE MTL::VisibleFunctionTableDescriptor* MTL::VisibleFunctionTableDescriptor::init() { return NS::Object::init(); } _MTL_INLINE MTL::VisibleFunctionTableDescriptor* MTL::VisibleFunctionTableDescriptor::visibleFunctionTableDescriptor() { return Object::sendMessage(_MTL_PRIVATE_CLS(MTLVisibleFunctionTableDescriptor), _MTL_PRIVATE_SEL(visibleFunctionTableDescriptor)); } _MTL_INLINE NS::UInteger MTL::VisibleFunctionTableDescriptor::functionCount() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(functionCount)); } _MTL_INLINE void MTL::VisibleFunctionTableDescriptor::setFunctionCount(NS::UInteger functionCount) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFunctionCount_), functionCount); } _MTL_INLINE MTL::ResourceID MTL::VisibleFunctionTable::gpuResourceID() const { return Object::sendMessage(this, _MTL_PRIVATE_SEL(gpuResourceID)); } _MTL_INLINE void MTL::VisibleFunctionTable::setFunction(const MTL::FunctionHandle* function, NS::UInteger index) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFunction_atIndex_), function, index); } _MTL_INLINE void MTL::VisibleFunctionTable::setFunctions(const MTL::FunctionHandle* const functions[], NS::Range range) { Object::sendMessage(this, _MTL_PRIVATE_SEL(setFunctions_withRange_), functions, range); } #define METALCPP_VERSION_MAJOR 367 #define METALCPP_VERSION_MINOR 4 #define METALCPP_VERSION_PATCH 2 #define METALCPP_SUPPORTS_VERSION(major, minor, patch) \ ((major < METALCPP_VERSION_MAJOR) || \ (major == METALCPP_VERSION_MAJOR && minor < METALCPP_VERSION_MINOR) || \ (major == METALCPP_VERSION_MAJOR && minor == METALCPP_VERSION_MINOR && patch <= METALCPP_VERSION_PATCH)) ================================================ FILE: deps/renderdoc/renderdoc_app.h ================================================ /****************************************************************************** * The MIT License (MIT) * * Copyright (c) 2019-2023 Baldur Karlsson * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. ******************************************************************************/ #pragma once ////////////////////////////////////////////////////////////////////////////////////////////////// // // Documentation for the API is available at https://renderdoc.org/docs/in_application_api.html // #if !defined(RENDERDOC_NO_STDINT) #include #endif #if defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER) #define RENDERDOC_CC __cdecl #elif defined(__linux__) #define RENDERDOC_CC #elif defined(__APPLE__) #define RENDERDOC_CC #else #error "Unknown platform" #endif #ifdef __cplusplus extern "C" { #endif ////////////////////////////////////////////////////////////////////////////////////////////////// // Constants not used directly in below API // This is a GUID/magic value used for when applications pass a path where shader debug // information can be found to match up with a stripped shader. // the define can be used like so: const GUID RENDERDOC_ShaderDebugMagicValue = // RENDERDOC_ShaderDebugMagicValue_value #define RENDERDOC_ShaderDebugMagicValue_struct \ { \ 0xeab25520, 0x6670, 0x4865, 0x84, 0x29, 0x6c, 0x8, 0x51, 0x54, 0x00, 0xff \ } // as an alternative when you want a byte array (assuming x86 endianness): #define RENDERDOC_ShaderDebugMagicValue_bytearray \ { \ 0x20, 0x55, 0xb2, 0xea, 0x70, 0x66, 0x65, 0x48, 0x84, 0x29, 0x6c, 0x8, 0x51, 0x54, 0x00, 0xff \ } // truncated version when only a uint64_t is available (e.g. Vulkan tags): #define RENDERDOC_ShaderDebugMagicValue_truncated 0x48656670eab25520ULL ////////////////////////////////////////////////////////////////////////////////////////////////// // RenderDoc capture options // typedef enum RENDERDOC_CaptureOption { // Allow the application to enable vsync // // Default - enabled // // 1 - The application can enable or disable vsync at will // 0 - vsync is force disabled eRENDERDOC_Option_AllowVSync = 0, // Allow the application to enable fullscreen // // Default - enabled // // 1 - The application can enable or disable fullscreen at will // 0 - fullscreen is force disabled eRENDERDOC_Option_AllowFullscreen = 1, // Record API debugging events and messages // // Default - disabled // // 1 - Enable built-in API debugging features and records the results into // the capture, which is matched up with events on replay // 0 - no API debugging is forcibly enabled eRENDERDOC_Option_APIValidation = 2, eRENDERDOC_Option_DebugDeviceMode = 2, // deprecated name of this enum // Capture CPU callstacks for API events // // Default - disabled // // 1 - Enables capturing of callstacks // 0 - no callstacks are captured eRENDERDOC_Option_CaptureCallstacks = 3, // When capturing CPU callstacks, only capture them from actions. // This option does nothing without the above option being enabled // // Default - disabled // // 1 - Only captures callstacks for actions. // Ignored if CaptureCallstacks is disabled // 0 - Callstacks, if enabled, are captured for every event. eRENDERDOC_Option_CaptureCallstacksOnlyDraws = 4, eRENDERDOC_Option_CaptureCallstacksOnlyActions = 4, // Specify a delay in seconds to wait for a debugger to attach, after // creating or injecting into a process, before continuing to allow it to run. // // 0 indicates no delay, and the process will run immediately after injection // // Default - 0 seconds // eRENDERDOC_Option_DelayForDebugger = 5, // Verify buffer access. This includes checking the memory returned by a Map() call to // detect any out-of-bounds modification, as well as initialising buffers with undefined contents // to a marker value to catch use of uninitialised memory. // // NOTE: This option is only valid for OpenGL and D3D11. Explicit APIs such as D3D12 and Vulkan do // not do the same kind of interception & checking and undefined contents are really undefined. // // Default - disabled // // 1 - Verify buffer access // 0 - No verification is performed, and overwriting bounds may cause crashes or corruption in // RenderDoc. eRENDERDOC_Option_VerifyBufferAccess = 6, // The old name for eRENDERDOC_Option_VerifyBufferAccess was eRENDERDOC_Option_VerifyMapWrites. // This option now controls the filling of uninitialised buffers with 0xdddddddd which was // previously always enabled eRENDERDOC_Option_VerifyMapWrites = eRENDERDOC_Option_VerifyBufferAccess, // Hooks any system API calls that create child processes, and injects // RenderDoc into them recursively with the same options. // // Default - disabled // // 1 - Hooks into spawned child processes // 0 - Child processes are not hooked by RenderDoc eRENDERDOC_Option_HookIntoChildren = 7, // By default RenderDoc only includes resources in the final capture necessary // for that frame, this allows you to override that behaviour. // // Default - disabled // // 1 - all live resources at the time of capture are included in the capture // and available for inspection // 0 - only the resources referenced by the captured frame are included eRENDERDOC_Option_RefAllResources = 8, // **NOTE**: As of RenderDoc v1.1 this option has been deprecated. Setting or // getting it will be ignored, to allow compatibility with older versions. // In v1.1 the option acts as if it's always enabled. // // By default RenderDoc skips saving initial states for resources where the // previous contents don't appear to be used, assuming that writes before // reads indicate previous contents aren't used. // // Default - disabled // // 1 - initial contents at the start of each captured frame are saved, even if // they are later overwritten or cleared before being used. // 0 - unless a read is detected, initial contents will not be saved and will // appear as black or empty data. eRENDERDOC_Option_SaveAllInitials = 9, // In APIs that allow for the recording of command lists to be replayed later, // RenderDoc may choose to not capture command lists before a frame capture is // triggered, to reduce overheads. This means any command lists recorded once // and replayed many times will not be available and may cause a failure to // capture. // // NOTE: This is only true for APIs where multithreading is difficult or // discouraged. Newer APIs like Vulkan and D3D12 will ignore this option // and always capture all command lists since the API is heavily oriented // around it and the overheads have been reduced by API design. // // 1 - All command lists are captured from the start of the application // 0 - Command lists are only captured if their recording begins during // the period when a frame capture is in progress. eRENDERDOC_Option_CaptureAllCmdLists = 10, // Mute API debugging output when the API validation mode option is enabled // // Default - enabled // // 1 - Mute any API debug messages from being displayed or passed through // 0 - API debugging is displayed as normal eRENDERDOC_Option_DebugOutputMute = 11, // Option to allow vendor extensions to be used even when they may be // incompatible with RenderDoc and cause corrupted replays or crashes. // // Default - inactive // // No values are documented, this option should only be used when absolutely // necessary as directed by a RenderDoc developer. eRENDERDOC_Option_AllowUnsupportedVendorExtensions = 12, } RENDERDOC_CaptureOption; // Sets an option that controls how RenderDoc behaves on capture. // // Returns 1 if the option and value are valid // Returns 0 if either is invalid and the option is unchanged typedef int(RENDERDOC_CC *pRENDERDOC_SetCaptureOptionU32)(RENDERDOC_CaptureOption opt, uint32_t val); typedef int(RENDERDOC_CC *pRENDERDOC_SetCaptureOptionF32)(RENDERDOC_CaptureOption opt, float val); // Gets the current value of an option as a uint32_t // // If the option is invalid, 0xffffffff is returned typedef uint32_t(RENDERDOC_CC *pRENDERDOC_GetCaptureOptionU32)(RENDERDOC_CaptureOption opt); // Gets the current value of an option as a float // // If the option is invalid, -FLT_MAX is returned typedef float(RENDERDOC_CC *pRENDERDOC_GetCaptureOptionF32)(RENDERDOC_CaptureOption opt); typedef enum RENDERDOC_InputButton { // '0' - '9' matches ASCII values eRENDERDOC_Key_0 = 0x30, eRENDERDOC_Key_1 = 0x31, eRENDERDOC_Key_2 = 0x32, eRENDERDOC_Key_3 = 0x33, eRENDERDOC_Key_4 = 0x34, eRENDERDOC_Key_5 = 0x35, eRENDERDOC_Key_6 = 0x36, eRENDERDOC_Key_7 = 0x37, eRENDERDOC_Key_8 = 0x38, eRENDERDOC_Key_9 = 0x39, // 'A' - 'Z' matches ASCII values eRENDERDOC_Key_A = 0x41, eRENDERDOC_Key_B = 0x42, eRENDERDOC_Key_C = 0x43, eRENDERDOC_Key_D = 0x44, eRENDERDOC_Key_E = 0x45, eRENDERDOC_Key_F = 0x46, eRENDERDOC_Key_G = 0x47, eRENDERDOC_Key_H = 0x48, eRENDERDOC_Key_I = 0x49, eRENDERDOC_Key_J = 0x4A, eRENDERDOC_Key_K = 0x4B, eRENDERDOC_Key_L = 0x4C, eRENDERDOC_Key_M = 0x4D, eRENDERDOC_Key_N = 0x4E, eRENDERDOC_Key_O = 0x4F, eRENDERDOC_Key_P = 0x50, eRENDERDOC_Key_Q = 0x51, eRENDERDOC_Key_R = 0x52, eRENDERDOC_Key_S = 0x53, eRENDERDOC_Key_T = 0x54, eRENDERDOC_Key_U = 0x55, eRENDERDOC_Key_V = 0x56, eRENDERDOC_Key_W = 0x57, eRENDERDOC_Key_X = 0x58, eRENDERDOC_Key_Y = 0x59, eRENDERDOC_Key_Z = 0x5A, // leave the rest of the ASCII range free // in case we want to use it later eRENDERDOC_Key_NonPrintable = 0x100, eRENDERDOC_Key_Divide, eRENDERDOC_Key_Multiply, eRENDERDOC_Key_Subtract, eRENDERDOC_Key_Plus, eRENDERDOC_Key_F1, eRENDERDOC_Key_F2, eRENDERDOC_Key_F3, eRENDERDOC_Key_F4, eRENDERDOC_Key_F5, eRENDERDOC_Key_F6, eRENDERDOC_Key_F7, eRENDERDOC_Key_F8, eRENDERDOC_Key_F9, eRENDERDOC_Key_F10, eRENDERDOC_Key_F11, eRENDERDOC_Key_F12, eRENDERDOC_Key_Home, eRENDERDOC_Key_End, eRENDERDOC_Key_Insert, eRENDERDOC_Key_Delete, eRENDERDOC_Key_PageUp, eRENDERDOC_Key_PageDn, eRENDERDOC_Key_Backspace, eRENDERDOC_Key_Tab, eRENDERDOC_Key_PrtScrn, eRENDERDOC_Key_Pause, eRENDERDOC_Key_Max, } RENDERDOC_InputButton; // Sets which key or keys can be used to toggle focus between multiple windows // // If keys is NULL or num is 0, toggle keys will be disabled typedef void(RENDERDOC_CC *pRENDERDOC_SetFocusToggleKeys)(RENDERDOC_InputButton *keys, int num); // Sets which key or keys can be used to capture the next frame // // If keys is NULL or num is 0, captures keys will be disabled typedef void(RENDERDOC_CC *pRENDERDOC_SetCaptureKeys)(RENDERDOC_InputButton *keys, int num); typedef enum RENDERDOC_OverlayBits { // This single bit controls whether the overlay is enabled or disabled globally eRENDERDOC_Overlay_Enabled = 0x1, // Show the average framerate over several seconds as well as min/max eRENDERDOC_Overlay_FrameRate = 0x2, // Show the current frame number eRENDERDOC_Overlay_FrameNumber = 0x4, // Show a list of recent captures, and how many captures have been made eRENDERDOC_Overlay_CaptureList = 0x8, // Default values for the overlay mask eRENDERDOC_Overlay_Default = (eRENDERDOC_Overlay_Enabled | eRENDERDOC_Overlay_FrameRate | eRENDERDOC_Overlay_FrameNumber | eRENDERDOC_Overlay_CaptureList), // Enable all bits eRENDERDOC_Overlay_All = ~0U, // Disable all bits eRENDERDOC_Overlay_None = 0, } RENDERDOC_OverlayBits; // returns the overlay bits that have been set typedef uint32_t(RENDERDOC_CC *pRENDERDOC_GetOverlayBits)(); // sets the overlay bits with an and & or mask typedef void(RENDERDOC_CC *pRENDERDOC_MaskOverlayBits)(uint32_t And, uint32_t Or); // this function will attempt to remove RenderDoc's hooks in the application. // // Note: that this can only work correctly if done immediately after // the module is loaded, before any API work happens. RenderDoc will remove its // injected hooks and shut down. Behaviour is undefined if this is called // after any API functions have been called, and there is still no guarantee of // success. typedef void(RENDERDOC_CC *pRENDERDOC_RemoveHooks)(); // DEPRECATED: compatibility for code compiled against pre-1.4.1 headers. typedef pRENDERDOC_RemoveHooks pRENDERDOC_Shutdown; // This function will unload RenderDoc's crash handler. // // If you use your own crash handler and don't want RenderDoc's handler to // intercede, you can call this function to unload it and any unhandled // exceptions will pass to the next handler. typedef void(RENDERDOC_CC *pRENDERDOC_UnloadCrashHandler)(); // Sets the capture file path template // // pathtemplate is a UTF-8 string that gives a template for how captures will be named // and where they will be saved. // // Any extension is stripped off the path, and captures are saved in the directory // specified, and named with the filename and the frame number appended. If the // directory does not exist it will be created, including any parent directories. // // If pathtemplate is NULL, the template will remain unchanged // // Example: // // SetCaptureFilePathTemplate("my_captures/example"); // // Capture #1 -> my_captures/example_frame123.rdc // Capture #2 -> my_captures/example_frame456.rdc typedef void(RENDERDOC_CC *pRENDERDOC_SetCaptureFilePathTemplate)(const char *pathtemplate); // returns the current capture path template, see SetCaptureFileTemplate above, as a UTF-8 string typedef const char *(RENDERDOC_CC *pRENDERDOC_GetCaptureFilePathTemplate)(); // DEPRECATED: compatibility for code compiled against pre-1.1.2 headers. typedef pRENDERDOC_SetCaptureFilePathTemplate pRENDERDOC_SetLogFilePathTemplate; typedef pRENDERDOC_GetCaptureFilePathTemplate pRENDERDOC_GetLogFilePathTemplate; // returns the number of captures that have been made typedef uint32_t(RENDERDOC_CC *pRENDERDOC_GetNumCaptures)(); // This function returns the details of a capture, by index. New captures are added // to the end of the list. // // filename will be filled with the absolute path to the capture file, as a UTF-8 string // pathlength will be written with the length in bytes of the filename string // timestamp will be written with the time of the capture, in seconds since the Unix epoch // // Any of the parameters can be NULL and they'll be skipped. // // The function will return 1 if the capture index is valid, or 0 if the index is invalid // If the index is invalid, the values will be unchanged // // Note: when captures are deleted in the UI they will remain in this list, so the // capture path may not exist anymore. typedef uint32_t(RENDERDOC_CC *pRENDERDOC_GetCapture)(uint32_t idx, char *filename, uint32_t *pathlength, uint64_t *timestamp); // Sets the comments associated with a capture file. These comments are displayed in the // UI program when opening. // // filePath should be a path to the capture file to add comments to. If set to NULL or "" // the most recent capture file created made will be used instead. // comments should be a NULL-terminated UTF-8 string to add as comments. // // Any existing comments will be overwritten. typedef void(RENDERDOC_CC *pRENDERDOC_SetCaptureFileComments)(const char *filePath, const char *comments); // returns 1 if the RenderDoc UI is connected to this application, 0 otherwise typedef uint32_t(RENDERDOC_CC *pRENDERDOC_IsTargetControlConnected)(); // DEPRECATED: compatibility for code compiled against pre-1.1.1 headers. // This was renamed to IsTargetControlConnected in API 1.1.1, the old typedef is kept here for // backwards compatibility with old code, it is castable either way since it's ABI compatible // as the same function pointer type. typedef pRENDERDOC_IsTargetControlConnected pRENDERDOC_IsRemoteAccessConnected; // This function will launch the Replay UI associated with the RenderDoc library injected // into the running application. // // if connectTargetControl is 1, the Replay UI will be launched with a command line parameter // to connect to this application // cmdline is the rest of the command line, as a UTF-8 string. E.g. a captures to open // if cmdline is NULL, the command line will be empty. // // returns the PID of the replay UI if successful, 0 if not successful. typedef uint32_t(RENDERDOC_CC *pRENDERDOC_LaunchReplayUI)(uint32_t connectTargetControl, const char *cmdline); // RenderDoc can return a higher version than requested if it's backwards compatible, // this function returns the actual version returned. If a parameter is NULL, it will be // ignored and the others will be filled out. typedef void(RENDERDOC_CC *pRENDERDOC_GetAPIVersion)(int *major, int *minor, int *patch); // Requests that the replay UI show itself (if hidden or not the current top window). This can be // used in conjunction with IsTargetControlConnected and LaunchReplayUI to intelligently handle // showing the UI after making a capture. // // This will return 1 if the request was successfully passed on, though it's not guaranteed that // the UI will be on top in all cases depending on OS rules. It will return 0 if there is no current // target control connection to make such a request, or if there was another error typedef uint32_t(RENDERDOC_CC *pRENDERDOC_ShowReplayUI)(); ////////////////////////////////////////////////////////////////////////// // Capturing functions // // A device pointer is a pointer to the API's root handle. // // This would be an ID3D11Device, HGLRC/GLXContext, ID3D12Device, etc typedef void *RENDERDOC_DevicePointer; // A window handle is the OS's native window handle // // This would be an HWND, GLXDrawable, etc typedef void *RENDERDOC_WindowHandle; // A helper macro for Vulkan, where the device handle cannot be used directly. // // Passing the VkInstance to this macro will return the RENDERDOC_DevicePointer to use. // // Specifically, the value needed is the dispatch table pointer, which sits as the first // pointer-sized object in the memory pointed to by the VkInstance. Thus we cast to a void** and // indirect once. #define RENDERDOC_DEVICEPOINTER_FROM_VKINSTANCE(inst) (*((void **)(inst))) // This sets the RenderDoc in-app overlay in the API/window pair as 'active' and it will // respond to keypresses. Neither parameter can be NULL typedef void(RENDERDOC_CC *pRENDERDOC_SetActiveWindow)(RENDERDOC_DevicePointer device, RENDERDOC_WindowHandle wndHandle); // capture the next frame on whichever window and API is currently considered active typedef void(RENDERDOC_CC *pRENDERDOC_TriggerCapture)(); // capture the next N frames on whichever window and API is currently considered active typedef void(RENDERDOC_CC *pRENDERDOC_TriggerMultiFrameCapture)(uint32_t numFrames); // When choosing either a device pointer or a window handle to capture, you can pass NULL. // Passing NULL specifies a 'wildcard' match against anything. This allows you to specify // any API rendering to a specific window, or a specific API instance rendering to any window, // or in the simplest case of one window and one API, you can just pass NULL for both. // // In either case, if there are two or more possible matching (device,window) pairs it // is undefined which one will be captured. // // Note: for headless rendering you can pass NULL for the window handle and either specify // a device pointer or leave it NULL as above. // Immediately starts capturing API calls on the specified device pointer and window handle. // // If there is no matching thing to capture (e.g. no supported API has been initialised), // this will do nothing. // // The results are undefined (including crashes) if two captures are started overlapping, // even on separate devices and/oror windows. typedef void(RENDERDOC_CC *pRENDERDOC_StartFrameCapture)(RENDERDOC_DevicePointer device, RENDERDOC_WindowHandle wndHandle); // Returns whether or not a frame capture is currently ongoing anywhere. // // This will return 1 if a capture is ongoing, and 0 if there is no capture running typedef uint32_t(RENDERDOC_CC *pRENDERDOC_IsFrameCapturing)(); // Ends capturing immediately. // // This will return 1 if the capture succeeded, and 0 if there was an error capturing. typedef uint32_t(RENDERDOC_CC *pRENDERDOC_EndFrameCapture)(RENDERDOC_DevicePointer device, RENDERDOC_WindowHandle wndHandle); // Ends capturing immediately and discard any data stored without saving to disk. // // This will return 1 if the capture was discarded, and 0 if there was an error or no capture // was in progress typedef uint32_t(RENDERDOC_CC *pRENDERDOC_DiscardFrameCapture)(RENDERDOC_DevicePointer device, RENDERDOC_WindowHandle wndHandle); // Only valid to be called between a call to StartFrameCapture and EndFrameCapture. Gives a custom // title to the capture produced which will be displayed in the UI. // // If multiple captures are ongoing, this title will be applied to the first capture to end after // this call. The second capture to end will have no title, unless this function is called again. // // Calling this function has no effect if no capture is currently running, and if it is called // multiple times only the last title will be used. typedef void(RENDERDOC_CC *pRENDERDOC_SetCaptureTitle)(const char *title); ////////////////////////////////////////////////////////////////////////////////////////////////// // RenderDoc API versions // // RenderDoc uses semantic versioning (http://semver.org/). // // MAJOR version is incremented when incompatible API changes happen. // MINOR version is incremented when functionality is added in a backwards-compatible manner. // PATCH version is incremented when backwards-compatible bug fixes happen. // // Note that this means the API returned can be higher than the one you might have requested. // e.g. if you are running against a newer RenderDoc that supports 1.0.1, it will be returned // instead of 1.0.0. You can check this with the GetAPIVersion entry point typedef enum RENDERDOC_Version { eRENDERDOC_API_Version_1_0_0 = 10000, // RENDERDOC_API_1_0_0 = 1 00 00 eRENDERDOC_API_Version_1_0_1 = 10001, // RENDERDOC_API_1_0_1 = 1 00 01 eRENDERDOC_API_Version_1_0_2 = 10002, // RENDERDOC_API_1_0_2 = 1 00 02 eRENDERDOC_API_Version_1_1_0 = 10100, // RENDERDOC_API_1_1_0 = 1 01 00 eRENDERDOC_API_Version_1_1_1 = 10101, // RENDERDOC_API_1_1_1 = 1 01 01 eRENDERDOC_API_Version_1_1_2 = 10102, // RENDERDOC_API_1_1_2 = 1 01 02 eRENDERDOC_API_Version_1_2_0 = 10200, // RENDERDOC_API_1_2_0 = 1 02 00 eRENDERDOC_API_Version_1_3_0 = 10300, // RENDERDOC_API_1_3_0 = 1 03 00 eRENDERDOC_API_Version_1_4_0 = 10400, // RENDERDOC_API_1_4_0 = 1 04 00 eRENDERDOC_API_Version_1_4_1 = 10401, // RENDERDOC_API_1_4_1 = 1 04 01 eRENDERDOC_API_Version_1_4_2 = 10402, // RENDERDOC_API_1_4_2 = 1 04 02 eRENDERDOC_API_Version_1_5_0 = 10500, // RENDERDOC_API_1_5_0 = 1 05 00 eRENDERDOC_API_Version_1_6_0 = 10600, // RENDERDOC_API_1_6_0 = 1 06 00 } RENDERDOC_Version; // API version changelog: // // 1.0.0 - initial release // 1.0.1 - Bugfix: IsFrameCapturing() was returning false for captures that were triggered // by keypress or TriggerCapture, instead of Start/EndFrameCapture. // 1.0.2 - Refactor: Renamed eRENDERDOC_Option_DebugDeviceMode to eRENDERDOC_Option_APIValidation // 1.1.0 - Add feature: TriggerMultiFrameCapture(). Backwards compatible with 1.0.x since the new // function pointer is added to the end of the struct, the original layout is identical // 1.1.1 - Refactor: Renamed remote access to target control (to better disambiguate from remote // replay/remote server concept in replay UI) // 1.1.2 - Refactor: Renamed "log file" in function names to just capture, to clarify that these // are captures and not debug logging files. This is the first API version in the v1.0 // branch. // 1.2.0 - Added feature: SetCaptureFileComments() to add comments to a capture file that will be // displayed in the UI program on load. // 1.3.0 - Added feature: New capture option eRENDERDOC_Option_AllowUnsupportedVendorExtensions // which allows users to opt-in to allowing unsupported vendor extensions to function. // Should be used at the user's own risk. // Refactor: Renamed eRENDERDOC_Option_VerifyMapWrites to // eRENDERDOC_Option_VerifyBufferAccess, which now also controls initialisation to // 0xdddddddd of uninitialised buffer contents. // 1.4.0 - Added feature: DiscardFrameCapture() to discard a frame capture in progress and stop // capturing without saving anything to disk. // 1.4.1 - Refactor: Renamed Shutdown to RemoveHooks to better clarify what is happening // 1.4.2 - Refactor: Renamed 'draws' to 'actions' in callstack capture option. // 1.5.0 - Added feature: ShowReplayUI() to request that the replay UI show itself if connected // 1.6.0 - Added feature: SetCaptureTitle() which can be used to set a title for a // capture made with StartFrameCapture() or EndFrameCapture() typedef struct RENDERDOC_API_1_6_0 { pRENDERDOC_GetAPIVersion GetAPIVersion; pRENDERDOC_SetCaptureOptionU32 SetCaptureOptionU32; pRENDERDOC_SetCaptureOptionF32 SetCaptureOptionF32; pRENDERDOC_GetCaptureOptionU32 GetCaptureOptionU32; pRENDERDOC_GetCaptureOptionF32 GetCaptureOptionF32; pRENDERDOC_SetFocusToggleKeys SetFocusToggleKeys; pRENDERDOC_SetCaptureKeys SetCaptureKeys; pRENDERDOC_GetOverlayBits GetOverlayBits; pRENDERDOC_MaskOverlayBits MaskOverlayBits; // Shutdown was renamed to RemoveHooks in 1.4.1. // These unions allow old code to continue compiling without changes union { pRENDERDOC_Shutdown Shutdown; pRENDERDOC_RemoveHooks RemoveHooks; }; pRENDERDOC_UnloadCrashHandler UnloadCrashHandler; // Get/SetLogFilePathTemplate was renamed to Get/SetCaptureFilePathTemplate in 1.1.2. // These unions allow old code to continue compiling without changes union { // deprecated name pRENDERDOC_SetLogFilePathTemplate SetLogFilePathTemplate; // current name pRENDERDOC_SetCaptureFilePathTemplate SetCaptureFilePathTemplate; }; union { // deprecated name pRENDERDOC_GetLogFilePathTemplate GetLogFilePathTemplate; // current name pRENDERDOC_GetCaptureFilePathTemplate GetCaptureFilePathTemplate; }; pRENDERDOC_GetNumCaptures GetNumCaptures; pRENDERDOC_GetCapture GetCapture; pRENDERDOC_TriggerCapture TriggerCapture; // IsRemoteAccessConnected was renamed to IsTargetControlConnected in 1.1.1. // This union allows old code to continue compiling without changes union { // deprecated name pRENDERDOC_IsRemoteAccessConnected IsRemoteAccessConnected; // current name pRENDERDOC_IsTargetControlConnected IsTargetControlConnected; }; pRENDERDOC_LaunchReplayUI LaunchReplayUI; pRENDERDOC_SetActiveWindow SetActiveWindow; pRENDERDOC_StartFrameCapture StartFrameCapture; pRENDERDOC_IsFrameCapturing IsFrameCapturing; pRENDERDOC_EndFrameCapture EndFrameCapture; // new function in 1.1.0 pRENDERDOC_TriggerMultiFrameCapture TriggerMultiFrameCapture; // new function in 1.2.0 pRENDERDOC_SetCaptureFileComments SetCaptureFileComments; // new function in 1.4.0 pRENDERDOC_DiscardFrameCapture DiscardFrameCapture; // new function in 1.5.0 pRENDERDOC_ShowReplayUI ShowReplayUI; // new function in 1.6.0 pRENDERDOC_SetCaptureTitle SetCaptureTitle; } RENDERDOC_API_1_6_0; typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_0_0; typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_0_1; typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_0_2; typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_1_0; typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_1_1; typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_1_2; typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_2_0; typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_3_0; typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_4_0; typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_4_1; typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_4_2; typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_5_0; ////////////////////////////////////////////////////////////////////////////////////////////////// // RenderDoc API entry point // // This entry point can be obtained via GetProcAddress/dlsym if RenderDoc is available. // // The name is the same as the typedef - "RENDERDOC_GetAPI" // // This function is not thread safe, and should not be called on multiple threads at once. // Ideally, call this once as early as possible in your application's startup, before doing // any API work, since some configuration functionality etc has to be done also before // initialising any APIs. // // Parameters: // version is a single value from the RENDERDOC_Version above. // // outAPIPointers will be filled out with a pointer to the corresponding struct of function // pointers. // // Returns: // 1 - if the outAPIPointers has been filled with a pointer to the API struct requested // 0 - if the requested version is not supported or the arguments are invalid. // typedef int(RENDERDOC_CC *pRENDERDOC_GetAPI)(RENDERDOC_Version version, void **outAPIPointers); #ifdef __cplusplus } // extern "C" #endif ================================================ FILE: deps/utest/utest.h ================================================ /* The latest version of this library is available on GitHub; https://github.com/sheredom/utest.h */ /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to */ #ifndef SHEREDOM_UTEST_H_INCLUDED #define SHEREDOM_UTEST_H_INCLUDED #ifdef _MSC_VER /* Disable warning about not inlining 'inline' functions. */ #pragma warning(disable : 4710) /* Disable warning about inlining functions that are not marked 'inline'. */ #pragma warning(disable : 4711) /* Disable warning for alignment padding added */ #pragma warning(disable : 4820) #if _MSC_VER > 1900 /* Disable warning about preprocessor macros not being defined in MSVC headers. */ #pragma warning(disable : 4668) /* Disable warning about no function prototype given in MSVC headers. */ #pragma warning(disable : 4255) /* Disable warning about pointer or reference to potentially throwing function. */ #pragma warning(disable : 5039) /* Disable warning about macro expansion producing 'defined' has undefined behavior. */ #pragma warning(disable : 5105) #endif #if _MSC_VER > 1930 /* Disable warning about 'const' variable is not used. */ #pragma warning(disable : 5264) #endif #pragma warning(push, 1) #endif #if defined(_MSC_VER) && (_MSC_VER < 1920) typedef __int64 utest_int64_t; typedef unsigned __int64 utest_uint64_t; typedef unsigned __int32 utest_uint32_t; #else #include typedef int64_t utest_int64_t; typedef uint64_t utest_uint64_t; typedef uint32_t utest_uint32_t; #endif #include #include #include #include #include #if defined(__cplusplus) #if defined(_MSC_VER) && !defined(_CPPUNWIND) /* We're on MSVC and the compiler is compiling without exception support! */ #elif !defined(_MSC_VER) && !defined(__EXCEPTIONS) /* We're on a GCC/Clang compiler that doesn't have exception support! */ #else #define UTEST_HAS_EXCEPTIONS 1 #endif #endif #if defined(UTEST_HAS_EXCEPTIONS) #include #endif #if defined(_MSC_VER) #pragma warning(pop) #endif #if defined(__cplusplus) #define UTEST_C_FUNC extern "C" #else #define UTEST_C_FUNC #endif #define UTEST_TEST_PASSED (0) #define UTEST_TEST_FAILURE (1) #define UTEST_TEST_SKIPPED (2) #if defined(__TINYC__) #define UTEST_ATTRIBUTE(a) __attribute((a)) #else #define UTEST_ATTRIBUTE(a) __attribute__((a)) #endif #if defined(_MSC_VER) || defined(__MINGW64__) || defined(__MINGW32__) #if defined(__MINGW64__) || defined(__MINGW32__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpragmas" #pragma GCC diagnostic ignored "-Wunknown-pragmas" #endif #if defined(_WINDOWS_) || defined(_WINDOWS_H) typedef LARGE_INTEGER utest_large_integer; #else // use old QueryPerformanceCounter definitions (not sure is this needed in some // edge cases or not) on Win7 with VS2015 these extern declaration cause "second // C linkage of overloaded function not allowed" error typedef union { struct { unsigned long LowPart; long HighPart; } DUMMYSTRUCTNAME; struct { unsigned long LowPart; long HighPart; } u; utest_int64_t QuadPart; } utest_large_integer; UTEST_C_FUNC __declspec(dllimport) int __stdcall QueryPerformanceCounter( utest_large_integer *); UTEST_C_FUNC __declspec(dllimport) int __stdcall QueryPerformanceFrequency( utest_large_integer *); #if defined(__MINGW64__) || defined(__MINGW32__) #pragma GCC diagnostic pop #endif #endif #elif defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) || \ defined(__NetBSD__) || defined(__DragonFly__) || defined(__sun__) || \ defined(__HAIKU__) /* slightly obscure include here - we need to include glibc's features.h, but we don't want to just include a header that might not be defined for other c libraries like musl. Instead we include limits.h, which we know on all glibc distributions includes features.h */ #include #if defined(__GLIBC__) && defined(__GLIBC_MINOR__) #include #if ((2 < __GLIBC__) || ((2 == __GLIBC__) && (17 <= __GLIBC_MINOR__))) /* glibc is version 2.17 or above, so we can just use clock_gettime */ #define UTEST_USE_CLOCKGETTIME #else #include #include #endif #else // Other libc implementations #include #define UTEST_USE_CLOCKGETTIME #endif #elif defined(__APPLE__) #include #endif #if defined(_MSC_VER) && (_MSC_VER < 1920) #define UTEST_PRId64 "I64d" #define UTEST_PRIu64 "I64u" #else #include #define UTEST_PRId64 PRId64 #define UTEST_PRIu64 PRIu64 #endif #if defined(__cplusplus) #define UTEST_INLINE inline #if defined(__clang__) #define UTEST_INITIALIZER_BEGIN_DISABLE_WARNINGS \ _Pragma("clang diagnostic push") \ _Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") #define UTEST_INITIALIZER_END_DISABLE_WARNINGS _Pragma("clang diagnostic pop") #else #define UTEST_INITIALIZER_BEGIN_DISABLE_WARNINGS #define UTEST_INITIALIZER_END_DISABLE_WARNINGS #endif #define UTEST_INITIALIZER(f) \ struct f##_cpp_struct { \ f##_cpp_struct(); \ }; \ UTEST_INITIALIZER_BEGIN_DISABLE_WARNINGS static f##_cpp_struct \ f##_cpp_global UTEST_INITIALIZER_END_DISABLE_WARNINGS; \ f##_cpp_struct::f##_cpp_struct() #elif defined(_MSC_VER) #define UTEST_INLINE __forceinline #if defined(_WIN64) #define UTEST_SYMBOL_PREFIX #else #define UTEST_SYMBOL_PREFIX "_" #endif #if defined(__clang__) #define UTEST_INITIALIZER_BEGIN_DISABLE_WARNINGS \ _Pragma("clang diagnostic push") \ _Pragma("clang diagnostic ignored \"-Wmissing-variable-declarations\"") #define UTEST_INITIALIZER_END_DISABLE_WARNINGS _Pragma("clang diagnostic pop") #else #define UTEST_INITIALIZER_BEGIN_DISABLE_WARNINGS #define UTEST_INITIALIZER_END_DISABLE_WARNINGS #endif #pragma section(".CRT$XCU", read) #define UTEST_INITIALIZER(f) \ static void __cdecl f(void); \ UTEST_INITIALIZER_BEGIN_DISABLE_WARNINGS \ __pragma(comment(linker, "/include:" UTEST_SYMBOL_PREFIX #f "_")) \ UTEST_C_FUNC \ __declspec(allocate(".CRT$XCU")) void(__cdecl * f##_)(void) = f; \ UTEST_INITIALIZER_END_DISABLE_WARNINGS \ static void __cdecl f(void) #else #if defined(__linux__) #if defined(__clang__) #if __has_warning("-Wreserved-id-macro") #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wreserved-id-macro" #endif #endif #define __STDC_FORMAT_MACROS 1 #if defined(__clang__) #if __has_warning("-Wreserved-id-macro") #pragma clang diagnostic pop #endif #endif #endif #define UTEST_INLINE inline #define UTEST_INITIALIZER(f) \ static void f(void) UTEST_ATTRIBUTE(constructor); \ static void f(void) #endif #if defined(__cplusplus) #define UTEST_CAST(type, x) static_cast(x) #define UTEST_PTR_CAST(type, x) reinterpret_cast(x) #define UTEST_EXTERN extern "C" #define UTEST_NULL NULL #else #define UTEST_CAST(type, x) ((type)(x)) #define UTEST_PTR_CAST(type, x) ((type)(x)) #define UTEST_EXTERN extern #define UTEST_NULL 0 #endif #ifdef _MSC_VER /* io.h contains definitions for some structures with natural padding. This is uninteresting, but for some reason MSVC's behaviour is to warn about including this system header. That *is* interesting */ #pragma warning(disable : 4820) #pragma warning(push, 1) #include #pragma warning(pop) #define UTEST_COLOUR_OUTPUT() (_isatty(_fileno(stdout))) #else #if defined(__EMSCRIPTEN__) #include #define UTEST_COLOUR_OUTPUT() false #else #include #define UTEST_COLOUR_OUTPUT() (isatty(STDOUT_FILENO)) #endif #endif static UTEST_INLINE void *utest_realloc(void *const pointer, size_t new_size) { void *const new_pointer = realloc(pointer, new_size); if (UTEST_NULL == new_pointer) { free(pointer); } return new_pointer; } // Prevent 64-bit integer overflow when computing a timestamp by using a trick // from Sokol: // https://github.com/floooh/sokol/blob/189843bf4f86969ca4cc4b6d94e793a37c5128a7/sokol_time.h#L204 static UTEST_INLINE utest_int64_t utest_mul_div(const utest_int64_t value, const utest_int64_t numer, const utest_int64_t denom) { const utest_int64_t q = value / denom; const utest_int64_t r = value % denom; return q * numer + r * numer / denom; } static UTEST_INLINE utest_int64_t utest_ns(void) { #if defined(_MSC_VER) || defined(__MINGW64__) || defined(__MINGW32__) utest_large_integer counter; utest_large_integer frequency; QueryPerformanceCounter(&counter); QueryPerformanceFrequency(&frequency); return utest_mul_div(counter.QuadPart, 1000000000, frequency.QuadPart); #elif defined(__linux__) && defined(__STRICT_ANSI__) return utest_mul_div(clock(), 1000000000, CLOCKS_PER_SEC); #elif defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) || \ defined(__NetBSD__) || defined(__DragonFly__) || defined(__sun__) || \ defined(__HAIKU__) struct timespec ts; #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \ !defined(__HAIKU__) timespec_get(&ts, TIME_UTC); #else const clockid_t cid = CLOCK_REALTIME; #if defined(UTEST_USE_CLOCKGETTIME) clock_gettime(cid, &ts); #else syscall(SYS_clock_gettime, cid, &ts); #endif #endif return UTEST_CAST(utest_int64_t, ts.tv_sec) * 1000 * 1000 * 1000 + ts.tv_nsec; #elif __APPLE__ return UTEST_CAST(utest_int64_t, clock_gettime_nsec_np(CLOCK_UPTIME_RAW)); #elif __EMSCRIPTEN__ return emscripten_performance_now() * 1000000.0; #else #error Unsupported platform! #endif } typedef void (*utest_testcase_t)(int *, size_t); struct utest_test_state_s { utest_testcase_t func; size_t index; char *name; }; struct utest_state_s { struct utest_test_state_s *tests; size_t tests_length; FILE *output; }; /* extern to the global state utest needs to execute */ UTEST_EXTERN struct utest_state_s utest_state; #if defined(_MSC_VER) #define UTEST_WEAK __forceinline #elif defined(__MINGW32__) || defined(__MINGW64__) #define UTEST_WEAK static UTEST_ATTRIBUTE(used) #elif defined(__clang__) || defined(__GNUC__) || defined(__TINYC__) #define UTEST_WEAK UTEST_ATTRIBUTE(weak) #else #error Non clang, non gcc, non MSVC, non tcc compiler found! #endif #if defined(_MSC_VER) #define UTEST_UNUSED #else #define UTEST_UNUSED UTEST_ATTRIBUTE(unused) #endif #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wvariadic-macros" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #endif #define UTEST_PRINTF(...) \ if (utest_state.output) { \ fprintf(utest_state.output, __VA_ARGS__); \ } \ printf(__VA_ARGS__) #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wvariadic-macros" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #endif #ifdef _MSC_VER #define UTEST_SNPRINTF(BUFFER, N, ...) _snprintf_s(BUFFER, N, N, __VA_ARGS__) #else #define UTEST_SNPRINTF(...) snprintf(__VA_ARGS__) #endif #ifdef __clang__ #pragma clang diagnostic pop #endif #if defined(__cplusplus) /* if we are using c++ we can use overloaded methods (its in the language) */ #define UTEST_OVERLOADABLE #elif defined(__clang__) /* otherwise, if we are using clang with c - use the overloadable attribute */ #define UTEST_OVERLOADABLE UTEST_ATTRIBUTE(overloadable) #endif #if defined(__cplusplus) && (__cplusplus >= 201103L) #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #endif #include template ::value> struct utest_type_deducer final { static void _(const T t); }; template <> struct utest_type_deducer { static void _(const char c) { if (std::is_signed::value) { UTEST_PRINTF("%d", static_cast(c)); } else { UTEST_PRINTF("%u", static_cast(c)); } } }; template <> struct utest_type_deducer { static void _(const signed char c) { UTEST_PRINTF("%d", static_cast(c)); } }; template <> struct utest_type_deducer { static void _(const unsigned char c) { UTEST_PRINTF("%u", static_cast(c)); } }; template <> struct utest_type_deducer { static void _(const short s) { UTEST_PRINTF("%d", static_cast(s)); } }; template <> struct utest_type_deducer { static void _(const unsigned short s) { UTEST_PRINTF("%u", static_cast(s)); } }; template <> struct utest_type_deducer { static void _(const float f) { UTEST_PRINTF("%f", static_cast(f)); } }; template <> struct utest_type_deducer { static void _(const double d) { UTEST_PRINTF("%f", d); } }; template <> struct utest_type_deducer { static void _(const long double d) { #if defined(__MINGW32__) || defined(__MINGW64__) /* MINGW is weird - doesn't like LF at all?! */ UTEST_PRINTF("%f", (double)d); #else UTEST_PRINTF("%Lf", d); #endif } }; template <> struct utest_type_deducer { static void _(const int i) { UTEST_PRINTF("%d", i); } }; template <> struct utest_type_deducer { static void _(const unsigned int i) { UTEST_PRINTF("%u", i); } }; template <> struct utest_type_deducer { static void _(const long i) { UTEST_PRINTF("%ld", i); } }; template <> struct utest_type_deducer { static void _(const unsigned long i) { UTEST_PRINTF("%lu", i); } }; template <> struct utest_type_deducer { static void _(const long long i) { UTEST_PRINTF("%lld", i); } }; template <> struct utest_type_deducer { static void _(const unsigned long long i) { UTEST_PRINTF("%llu", i); } }; template <> struct utest_type_deducer { static void _(const bool i) { UTEST_PRINTF(i ? "true" : "false"); } }; template struct utest_type_deducer { static void _(const T *t) { UTEST_PRINTF("%p", static_cast(t)); } }; template struct utest_type_deducer { static void _(T *t) { UTEST_PRINTF("%p", static_cast(t)); } }; template struct utest_type_deducer { static void _(const T t) { UTEST_PRINTF("%llu", static_cast(t)); } }; // default printer for all other objects (specialize for custom printing) template struct utest_type_deducer { static void _(const T& t) { UTEST_PRINTF("(object %p)", static_cast(&t)); } }; template <> struct utest_type_deducer { static void _(std::nullptr_t t) { UTEST_PRINTF("%p", static_cast(t)); } }; template UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(const T& t) { utest_type_deducer::_(t); } #ifdef __clang__ #pragma clang diagnostic pop #endif #elif defined(UTEST_OVERLOADABLE) UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(signed char c); UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(signed char c) { UTEST_PRINTF("%d", UTEST_CAST(int, c)); } UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(unsigned char c); UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(unsigned char c) { UTEST_PRINTF("%u", UTEST_CAST(unsigned int, c)); } UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(float f); UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(float f) { UTEST_PRINTF("%f", UTEST_CAST(double, f)); } UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(double d); UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(double d) { UTEST_PRINTF("%f", d); } UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long double d); UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long double d) { #if defined(__MINGW32__) || defined(__MINGW64__) /* MINGW is weird - doesn't like LF at all?! */ UTEST_PRINTF("%f", (double)d); #else UTEST_PRINTF("%Lf", d); #endif } UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(int i); UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(int i) { UTEST_PRINTF("%d", i); } UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(unsigned int i); UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(unsigned int i) { UTEST_PRINTF("%u", i); } UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long int i); UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long int i) { UTEST_PRINTF("%ld", i); } UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long unsigned int i); UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long unsigned int i) { UTEST_PRINTF("%lu", i); } UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(const void *p); UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(const void *p) { UTEST_PRINTF("%p", p); } /* long long is a c++11 extension */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || \ defined(__cplusplus) && (__cplusplus >= 201103L) || \ (defined(__MINGW32__) || defined(__MINGW64__)) #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #endif UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long long int i); UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long long int i) { UTEST_PRINTF("%lld", i); } UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long long unsigned int i); UTEST_WEAK UTEST_OVERLOADABLE void utest_type_printer(long long unsigned int i) { UTEST_PRINTF("%llu", i); } #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \ !(defined(__MINGW32__) || defined(__MINGW64__)) || \ defined(__TINYC__) #define utest_type_printer(val) \ UTEST_PRINTF( \ _Generic((val), \ signed char: "%d", \ unsigned char: "%u", \ short: "%d", \ unsigned short: "%u", \ int: "%d", \ long: "%ld", \ long long: "%lld", \ unsigned: "%u", \ unsigned long: "%lu", \ unsigned long long: "%llu", \ float: "%f", \ double: "%f", \ long double: "%Lf", \ default: _Generic((val - val), ptrdiff_t: "%p", default: "undef")), \ (val)) #else /* we don't have the ability to print the values we got, so we create a macro to tell our users we can't do anything fancy */ #define utest_type_printer(...) UTEST_PRINTF("undef") #endif #if defined(_MSC_VER) #define UTEST_SURPRESS_WARNING_BEGIN \ __pragma(warning(push)) __pragma(warning(disable : 4127)) \ __pragma(warning(disable : 4571)) __pragma(warning(disable : 4130)) #define UTEST_SURPRESS_WARNING_END __pragma(warning(pop)) #else #define UTEST_SURPRESS_WARNING_BEGIN #define UTEST_SURPRESS_WARNING_END #endif #if defined(__cplusplus) && (__cplusplus >= 201103L) #define UTEST_AUTO(x) const auto& #elif !defined(__cplusplus) #if defined(__clang__) /* clang-format off */ /* had to disable clang-format here because it malforms the pragmas */ #define UTEST_AUTO(x) \ _Pragma("clang diagnostic push") \ _Pragma("clang diagnostic ignored \"-Wgnu-auto-type\"") __auto_type \ _Pragma("clang diagnostic pop") /* clang-format on */ #else #define UTEST_AUTO(x) __typeof__(x + 0) #endif #else #define UTEST_AUTO(x) typeof(x + 0) #endif #if defined(__clang__) #define UTEST_STRNCMP(x, y, size) \ _Pragma("clang diagnostic push") \ _Pragma("clang diagnostic ignored \"-Wdisabled-macro-expansion\"") \ strncmp(x, y, size) _Pragma("clang diagnostic pop") #else #define UTEST_STRNCMP(x, y, size) strncmp(x, y, size) #endif #if defined(_MSC_VER) #define UTEST_STRNCPY(x, y, size) strcpy_s(x, size, y) #elif !defined(__clang__) && defined(__GNUC__) static UTEST_INLINE char * utest_strncpy_gcc(char *const dst, const char *const src, const size_t size) { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstringop-overflow" return strncpy(dst, src, size); #pragma GCC diagnostic pop } #define UTEST_STRNCPY(x, y, size) utest_strncpy_gcc(x, y, size) #else #define UTEST_STRNCPY(x, y, size) strncpy(x, y, size) #endif #define UTEST_SKIP(msg) \ do { \ UTEST_PRINTF(" Skipped : '%s'\n", (msg)); \ *utest_result = UTEST_TEST_SKIPPED; \ return; \ } while (0) #if defined(__clang__) #define UTEST_COND(x, y, cond, msg, is_assert) \ UTEST_SURPRESS_WARNING_BEGIN do { \ _Pragma("clang diagnostic push") \ _Pragma("clang diagnostic ignored \"-Wlanguage-extension-token\"") \ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") \ _Pragma("clang diagnostic ignored \"-Wfloat-equal\"") \ UTEST_AUTO(x) xEval = (x); \ UTEST_AUTO(y) yEval = (y); \ if (!((xEval)cond(yEval))) { \ const char *const xAsString = #x; \ const char *const yAsString = #y; \ _Pragma("clang diagnostic pop") \ UTEST_PRINTF("%s:%i: Failure\n", __FILE__, __LINE__); \ UTEST_PRINTF(" Expected : ("); \ UTEST_PRINTF("%s) " #cond " (%s", xAsString, yAsString); \ UTEST_PRINTF(")\n"); \ UTEST_PRINTF(" Actual : "); \ utest_type_printer(xEval); \ UTEST_PRINTF(" vs "); \ utest_type_printer(yEval); \ UTEST_PRINTF("\n"); \ if (strlen(msg) > 0) { \ UTEST_PRINTF(" Message : %s\n", msg); \ } \ *utest_result = UTEST_TEST_FAILURE; \ if (is_assert) { \ return; \ } \ } \ } \ while (0) \ UTEST_SURPRESS_WARNING_END #elif defined(__GNUC__) || defined(__TINYC__) #define UTEST_COND(x, y, cond, msg, is_assert) \ UTEST_SURPRESS_WARNING_BEGIN do { \ UTEST_AUTO(x) xEval = (x); \ UTEST_AUTO(y) yEval = (y); \ if (!((xEval)cond(yEval))) { \ const char *const xAsString = #x; \ const char *const yAsString = #y; \ UTEST_PRINTF("%s:%i: Failure\n", __FILE__, __LINE__); \ UTEST_PRINTF(" Expected : ("); \ UTEST_PRINTF("%s) " #cond " (%s", xAsString, yAsString); \ UTEST_PRINTF(")\n"); \ UTEST_PRINTF(" Actual : "); \ utest_type_printer(xEval); \ UTEST_PRINTF(" vs "); \ utest_type_printer(yEval); \ UTEST_PRINTF("\n"); \ if (strlen(msg) > 0) { \ UTEST_PRINTF(" Message : %s\n", msg); \ } \ *utest_result = UTEST_TEST_FAILURE; \ if (is_assert) { \ return; \ } \ } \ } \ while (0) \ UTEST_SURPRESS_WARNING_END #else #define UTEST_COND(x, y, cond, msg, is_assert) \ UTEST_SURPRESS_WARNING_BEGIN do { \ if (!((x)cond(y))) { \ UTEST_PRINTF("%s:%i: Failure (Expected " #cond " Actual)", __FILE__, \ __LINE__); \ if (strlen(msg) > 0) { \ UTEST_PRINTF(" Message : %s", msg); \ } \ UTEST_PRINTF("\n"); \ *utest_result = UTEST_TEST_FAILURE; \ if (is_assert) { \ return; \ } \ } \ } \ while (0) \ UTEST_SURPRESS_WARNING_END #endif #define EXPECT_EQ(x, y) UTEST_COND(x, y, ==, "", 0) #define EXPECT_EQ_MSG(x, y, msg) UTEST_COND(x, y, ==, msg, 0) #define ASSERT_EQ(x, y) UTEST_COND(x, y, ==, "", 1) #define ASSERT_EQ_MSG(x, y, msg) UTEST_COND(x, y, ==, msg, 1) #define EXPECT_NE(x, y) UTEST_COND(x, y, !=, "", 0) #define EXPECT_NE_MSG(x, y, msg) UTEST_COND(x, y, !=, msg, 0) #define ASSERT_NE(x, y) UTEST_COND(x, y, !=, "", 1) #define ASSERT_NE_MSG(x, y, msg) UTEST_COND(x, y, !=, msg, 1) #define EXPECT_LT(x, y) UTEST_COND(x, y, <, "", 0) #define EXPECT_LT_MSG(x, y, msg) UTEST_COND(x, y, <, msg, 0) #define ASSERT_LT(x, y) UTEST_COND(x, y, <, "", 1) #define ASSERT_LT_MSG(x, y, msg) UTEST_COND(x, y, <, msg, 1) #define EXPECT_LE(x, y) UTEST_COND(x, y, <=, "", 0) #define EXPECT_LE_MSG(x, y, msg) UTEST_COND(x, y, <=, msg, 0) #define ASSERT_LE(x, y) UTEST_COND(x, y, <=, "", 1) #define ASSERT_LE_MSG(x, y, msg) UTEST_COND(x, y, <=, msg, 1) #define EXPECT_GT(x, y) UTEST_COND(x, y, >, "", 0) #define EXPECT_GT_MSG(x, y, msg) UTEST_COND(x, y, >, msg, 0) #define ASSERT_GT(x, y) UTEST_COND(x, y, >, "", 1) #define ASSERT_GT_MSG(x, y, msg) UTEST_COND(x, y, >, msg, 1) #define EXPECT_GE(x, y) UTEST_COND(x, y, >=, "", 0) #define EXPECT_GE_MSG(x, y, msg) UTEST_COND(x, y, >=, msg, 0) #define ASSERT_GE(x, y) UTEST_COND(x, y, >=, "", 1) #define ASSERT_GE_MSG(x, y, msg) UTEST_COND(x, y, >=, msg, 1) #define UTEST_TRUE(x, msg, is_assert) \ UTEST_SURPRESS_WARNING_BEGIN do { \ const int xEval = !!(x); \ if (!(xEval)) { \ UTEST_PRINTF("%s:%i: Failure\n", __FILE__, __LINE__); \ UTEST_PRINTF(" Expected : true\n"); \ UTEST_PRINTF(" Actual : %s\n", (xEval) ? "true" : "false"); \ if (strlen(msg) > 0) { \ UTEST_PRINTF(" Message : %s\n", msg); \ } \ *utest_result = UTEST_TEST_FAILURE; \ if (is_assert) { \ return; \ } \ } \ } \ while (0) \ UTEST_SURPRESS_WARNING_END #define EXPECT_TRUE(x) UTEST_TRUE(x, "", 0) #define EXPECT_TRUE_MSG(x, msg) UTEST_TRUE(x, msg, 0) #define ASSERT_TRUE(x) UTEST_TRUE(x, "", 1) #define ASSERT_TRUE_MSG(x, msg) UTEST_TRUE(x, msg, 1) #define UTEST_FALSE(x, msg, is_assert) \ UTEST_SURPRESS_WARNING_BEGIN do { \ const int xEval = !!(x); \ if (xEval) { \ UTEST_PRINTF("%s:%i: Failure\n", __FILE__, __LINE__); \ UTEST_PRINTF(" Expected : false\n"); \ UTEST_PRINTF(" Actual : %s\n", (xEval) ? "true" : "false"); \ if (strlen(msg) > 0) { \ UTEST_PRINTF(" Message : %s\n", msg); \ } \ *utest_result = UTEST_TEST_FAILURE; \ if (is_assert) { \ return; \ } \ } \ } \ while (0) \ UTEST_SURPRESS_WARNING_END #define EXPECT_FALSE(x) UTEST_FALSE(x, "", 0) #define EXPECT_FALSE_MSG(x, msg) UTEST_FALSE(x, msg, 0) #define ASSERT_FALSE(x) UTEST_FALSE(x, "", 1) #define ASSERT_FALSE_MSG(x, msg) UTEST_FALSE(x, msg, 1) #define UTEST_STREQ(x, y, msg, is_assert) \ UTEST_SURPRESS_WARNING_BEGIN do { \ const char *xEval = (x); \ const char *yEval = (y); \ if (UTEST_NULL == xEval || UTEST_NULL == yEval || \ 0 != strcmp(xEval, yEval)) { \ UTEST_PRINTF("%s:%i: Failure\n", __FILE__, __LINE__); \ UTEST_PRINTF(" Expected : \"%s\"\n", xEval); \ UTEST_PRINTF(" Actual : \"%s\"\n", yEval); \ if (strlen(msg) > 0) { \ UTEST_PRINTF(" Message : %s\n", msg); \ } \ *utest_result = UTEST_TEST_FAILURE; \ if (is_assert) { \ return; \ } \ } \ } \ while (0) \ UTEST_SURPRESS_WARNING_END #define EXPECT_STREQ(x, y) UTEST_STREQ(x, y, "", 0) #define EXPECT_STREQ_MSG(x, y, msg) UTEST_STREQ(x, y, msg, 0) #define ASSERT_STREQ(x, y) UTEST_STREQ(x, y, "", 1) #define ASSERT_STREQ_MSG(x, y, msg) UTEST_STREQ(x, y, msg, 1) #define UTEST_STRNE(x, y, msg, is_assert) \ UTEST_SURPRESS_WARNING_BEGIN do { \ const char *xEval = (x); \ const char *yEval = (y); \ if (UTEST_NULL == xEval || UTEST_NULL == yEval || \ 0 == strcmp(xEval, yEval)) { \ UTEST_PRINTF("%s:%i: Failure\n", __FILE__, __LINE__); \ UTEST_PRINTF(" Expected : \"%s\"\n", xEval); \ UTEST_PRINTF(" Actual : \"%s\"\n", yEval); \ if (strlen(msg) > 0) { \ UTEST_PRINTF(" Message : %s\n", msg); \ } \ *utest_result = UTEST_TEST_FAILURE; \ if (is_assert) { \ return; \ } \ } \ } \ while (0) \ UTEST_SURPRESS_WARNING_END #define EXPECT_STRNE(x, y) UTEST_STRNE(x, y, "", 0) #define EXPECT_STRNE_MSG(x, y, msg) UTEST_STRNE(x, y, msg, 0) #define ASSERT_STRNE(x, y) UTEST_STRNE(x, y, "", 1) #define ASSERT_STRNE_MSG(x, y, msg) UTEST_STRNE(x, y, msg, 1) #define UTEST_STRNEQ(x, y, n, msg, is_assert) \ UTEST_SURPRESS_WARNING_BEGIN do { \ const char *xEval = (x); \ const char *yEval = (y); \ const size_t nEval = UTEST_CAST(size_t, n); \ if (UTEST_NULL == xEval || UTEST_NULL == yEval || \ 0 != UTEST_STRNCMP(xEval, yEval, nEval)) { \ UTEST_PRINTF("%s:%i: Failure\n", __FILE__, __LINE__); \ UTEST_PRINTF(" Expected : \"%.*s\"\n", UTEST_CAST(int, nEval), xEval); \ UTEST_PRINTF(" Actual : \"%.*s\"\n", UTEST_CAST(int, nEval), yEval); \ if (strlen(msg) > 0) { \ UTEST_PRINTF(" Message : %s\n", msg); \ } \ *utest_result = UTEST_TEST_FAILURE; \ if (is_assert) { \ return; \ } \ } \ } \ while (0) \ UTEST_SURPRESS_WARNING_END #define EXPECT_STRNEQ(x, y, n) UTEST_STRNEQ(x, y, n, "", 0) #define EXPECT_STRNEQ_MSG(x, y, n, msg) UTEST_STRNEQ(x, y, n, msg, 0) #define ASSERT_STRNEQ(x, y, n) UTEST_STRNEQ(x, y, n, "", 1) #define ASSERT_STRNEQ_MSG(x, y, n, msg) UTEST_STRNEQ(x, y, n, msg, 1) #define UTEST_STRNNE(x, y, n, msg, is_assert) \ UTEST_SURPRESS_WARNING_BEGIN do { \ const char *xEval = (x); \ const char *yEval = (y); \ const size_t nEval = UTEST_CAST(size_t, n); \ if (UTEST_NULL == xEval || UTEST_NULL == yEval || \ 0 == UTEST_STRNCMP(xEval, yEval, nEval)) { \ UTEST_PRINTF("%s:%i: Failure\n", __FILE__, __LINE__); \ UTEST_PRINTF(" Expected : \"%.*s\"\n", UTEST_CAST(int, nEval), xEval); \ UTEST_PRINTF(" Actual : \"%.*s\"\n", UTEST_CAST(int, nEval), yEval); \ if (strlen(msg) > 0) { \ UTEST_PRINTF(" Message : %s\n", msg); \ } \ *utest_result = UTEST_TEST_FAILURE; \ if (is_assert) { \ return; \ } \ } \ } \ while (0) \ UTEST_SURPRESS_WARNING_END #define EXPECT_STRNNE(x, y, n) UTEST_STRNNE(x, y, n, "", 0) #define EXPECT_STRNNE_MSG(x, y, n, msg) UTEST_STRNNE(x, y, n, msg, 0) #define ASSERT_STRNNE(x, y, n) UTEST_STRNNE(x, y, n, "", 1) #define ASSERT_STRNNE_MSG(x, y, n, msg) UTEST_STRNNE(x, y, n, msg, 1) #define UTEST_NEAR(x, y, epsilon, msg, is_assert) \ UTEST_SURPRESS_WARNING_BEGIN do { \ const double diff = \ utest_fabs(UTEST_CAST(double, x) - UTEST_CAST(double, y)); \ if (diff > UTEST_CAST(double, epsilon) || utest_isnan(diff)) { \ UTEST_PRINTF("%s:%i: Failure\n", __FILE__, __LINE__); \ UTEST_PRINTF(" Expected : %f\n", UTEST_CAST(double, x)); \ UTEST_PRINTF(" Actual : %f\n", UTEST_CAST(double, y)); \ if (strlen(msg) > 0) { \ UTEST_PRINTF(" Message : %s\n", msg); \ } \ *utest_result = UTEST_TEST_FAILURE; \ if (is_assert) { \ return; \ } \ } \ } \ while (0) \ UTEST_SURPRESS_WARNING_END #define EXPECT_NEAR(x, y, epsilon) UTEST_NEAR(x, y, epsilon, "", 0) #define EXPECT_NEAR_MSG(x, y, epsilon, msg) UTEST_NEAR(x, y, epsilon, msg, 0) #define ASSERT_NEAR(x, y, epsilon) UTEST_NEAR(x, y, epsilon, "", 1) #define ASSERT_NEAR_MSG(x, y, epsilon, msg) UTEST_NEAR(x, y, epsilon, msg, 1) #if defined(UTEST_HAS_EXCEPTIONS) #define UTEST_EXCEPTION(x, exception_type, msg, is_assert) \ UTEST_SURPRESS_WARNING_BEGIN do { \ int exception_caught = 0; \ try { \ x; \ } catch (const exception_type &) { \ exception_caught = 1; \ } catch (...) { \ exception_caught = 2; \ } \ if (1 != exception_caught) { \ UTEST_PRINTF("%s:%i: Failure\n", __FILE__, __LINE__); \ UTEST_PRINTF(" Expected : %s exception\n", #exception_type); \ UTEST_PRINTF(" Actual : %s\n", (2 == exception_caught) \ ? "Unexpected exception" \ : "No exception"); \ if (strlen(msg) > 0) { \ UTEST_PRINTF(" Message : %s\n", msg); \ } \ *utest_result = UTEST_TEST_FAILURE; \ if (is_assert) { \ return; \ } \ } \ } \ while (0) \ UTEST_SURPRESS_WARNING_END #define EXPECT_EXCEPTION(x, exception_type) \ UTEST_EXCEPTION(x, exception_type, "", 0) #define EXPECT_EXCEPTION_MSG(x, exception_type, msg) \ UTEST_EXCEPTION(x, exception_type, msg, 0) #define ASSERT_EXCEPTION(x, exception_type) \ UTEST_EXCEPTION(x, exception_type, "", 1) #define ASSERT_EXCEPTION_MSG(x, exception_type, msg) \ UTEST_EXCEPTION(x, exception_type, msg, 1) #define UTEST_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message, \ msg, is_assert) \ UTEST_SURPRESS_WARNING_BEGIN do { \ int exception_caught = 0; \ char *message_caught = UTEST_NULL; \ try { \ x; \ } catch (const exception_type &e) { \ const char *const what = e.what(); \ exception_caught = 1; \ if (0 != \ UTEST_STRNCMP(what, exception_message, strlen(exception_message))) { \ const size_t message_size = strlen(what) + 1; \ message_caught = UTEST_PTR_CAST(char *, malloc(message_size)); \ UTEST_STRNCPY(message_caught, what, message_size); \ } \ } catch (...) { \ exception_caught = 2; \ } \ if (1 != exception_caught) { \ UTEST_PRINTF("%s:%i: Failure\n", __FILE__, __LINE__); \ UTEST_PRINTF(" Expected : %s exception\n", #exception_type); \ UTEST_PRINTF(" Actual : %s\n", (2 == exception_caught) \ ? "Unexpected exception" \ : "No exception"); \ if (strlen(msg) > 0) { \ UTEST_PRINTF(" Message : %s\n", msg); \ } \ *utest_result = UTEST_TEST_FAILURE; \ if (is_assert) { \ return; \ } \ } else if (UTEST_NULL != message_caught) { \ UTEST_PRINTF("%s:%i: Failure\n", __FILE__, __LINE__); \ UTEST_PRINTF(" Expected : %s exception with message %s\n", \ #exception_type, exception_message); \ UTEST_PRINTF(" Actual message : %s\n", message_caught); \ if (strlen(msg) > 0) { \ UTEST_PRINTF(" Message : %s\n", msg); \ } \ *utest_result = UTEST_TEST_FAILURE; \ free(message_caught); \ if (is_assert) { \ return; \ } \ } \ } \ while (0) \ UTEST_SURPRESS_WARNING_END #define EXPECT_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message) \ UTEST_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message, "", 0) #define EXPECT_EXCEPTION_WITH_MESSAGE_MSG(x, exception_type, \ exception_message, msg) \ UTEST_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message, msg, 0) #define ASSERT_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message) \ UTEST_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message, "", 1) #define ASSERT_EXCEPTION_WITH_MESSAGE_MSG(x, exception_type, \ exception_message, msg) \ UTEST_EXCEPTION_WITH_MESSAGE(x, exception_type, exception_message, msg, 1) #endif #if defined(__clang__) #if __has_warning("-Wunsafe-buffer-usage") #define UTEST_SURPRESS_WARNINGS_BEGIN \ _Pragma("clang diagnostic push") \ _Pragma("clang diagnostic ignored \"-Wunsafe-buffer-usage\"") #define UTEST_SURPRESS_WARNINGS_END _Pragma("clang diagnostic pop") #else #define UTEST_SURPRESS_WARNINGS_BEGIN #define UTEST_SURPRESS_WARNINGS_END #endif #elif defined(__GNUC__) && __GNUC__ >= 8 && defined(__cplusplus) #define UTEST_SURPRESS_WARNINGS_BEGIN \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wclass-memaccess\"") #define UTEST_SURPRESS_WARNINGS_END _Pragma("GCC diagnostic pop") #else #define UTEST_SURPRESS_WARNINGS_BEGIN #define UTEST_SURPRESS_WARNINGS_END #endif #define UTEST(SET, NAME) \ UTEST_SURPRESS_WARNINGS_BEGIN \ UTEST_EXTERN struct utest_state_s utest_state; \ static void utest_run_##SET##_##NAME(int *utest_result); \ static void utest_##SET##_##NAME(int *utest_result, size_t utest_index) { \ (void)utest_index; \ utest_run_##SET##_##NAME(utest_result); \ } \ UTEST_INITIALIZER(utest_register_##SET##_##NAME) { \ const size_t index = utest_state.tests_length++; \ const char name_part[] = #SET "." #NAME; \ const size_t name_size = strlen(name_part) + 1; \ char *name = UTEST_PTR_CAST(char *, malloc(name_size)); \ utest_state.tests = UTEST_PTR_CAST( \ struct utest_test_state_s *, \ utest_realloc(UTEST_PTR_CAST(void *, utest_state.tests), \ sizeof(struct utest_test_state_s) * \ utest_state.tests_length)); \ if (utest_state.tests && name) { \ utest_state.tests[index].func = &utest_##SET##_##NAME; \ utest_state.tests[index].name = name; \ utest_state.tests[index].index = 0; \ UTEST_SNPRINTF(name, name_size, "%s", name_part); \ } else { \ if (utest_state.tests) { \ free(utest_state.tests); \ utest_state.tests = NULL; \ } \ if (name) { \ free(name); \ } \ } \ } \ UTEST_SURPRESS_WARNINGS_END \ void utest_run_##SET##_##NAME(int *utest_result) #define UTEST_F_SETUP(FIXTURE) \ static void utest_f_setup_##FIXTURE(int *utest_result, \ struct FIXTURE *utest_fixture) #define UTEST_F_TEARDOWN(FIXTURE) \ static void utest_f_teardown_##FIXTURE(int *utest_result, \ struct FIXTURE *utest_fixture) #define UTEST_F(FIXTURE, NAME) \ UTEST_SURPRESS_WARNINGS_BEGIN \ UTEST_EXTERN struct utest_state_s utest_state; \ static void utest_f_setup_##FIXTURE(int *, struct FIXTURE *); \ static void utest_f_teardown_##FIXTURE(int *, struct FIXTURE *); \ static void utest_run_##FIXTURE##_##NAME(int *, struct FIXTURE *); \ static void utest_f_##FIXTURE##_##NAME(int *utest_result, \ size_t utest_index) { \ struct FIXTURE fixture; \ (void)utest_index; \ memset(&fixture, 0, sizeof(fixture)); \ utest_f_setup_##FIXTURE(utest_result, &fixture); \ if (UTEST_TEST_PASSED != *utest_result) { \ return; \ } \ utest_run_##FIXTURE##_##NAME(utest_result, &fixture); \ utest_f_teardown_##FIXTURE(utest_result, &fixture); \ } \ UTEST_INITIALIZER(utest_register_##FIXTURE##_##NAME) { \ const size_t index = utest_state.tests_length++; \ const char name_part[] = #FIXTURE "." #NAME; \ const size_t name_size = strlen(name_part) + 1; \ char *name = UTEST_PTR_CAST(char *, malloc(name_size)); \ utest_state.tests = UTEST_PTR_CAST( \ struct utest_test_state_s *, \ utest_realloc(UTEST_PTR_CAST(void *, utest_state.tests), \ sizeof(struct utest_test_state_s) * \ utest_state.tests_length)); \ if (utest_state.tests && name) { \ utest_state.tests[index].func = &utest_f_##FIXTURE##_##NAME; \ utest_state.tests[index].name = name; \ utest_state.tests[index].index = 0; \ UTEST_SNPRINTF(name, name_size, "%s", name_part); \ } else { \ if (utest_state.tests) { \ free(utest_state.tests); \ utest_state.tests = NULL; \ } \ if (name) { \ free(name); \ } \ } \ } \ UTEST_SURPRESS_WARNINGS_END \ void utest_run_##FIXTURE##_##NAME(int *utest_result, \ struct FIXTURE *utest_fixture) #define UTEST_I_SETUP(FIXTURE) \ static void utest_i_setup_##FIXTURE( \ int *utest_result, struct FIXTURE *utest_fixture, size_t utest_index) #define UTEST_I_TEARDOWN(FIXTURE) \ static void utest_i_teardown_##FIXTURE( \ int *utest_result, struct FIXTURE *utest_fixture, size_t utest_index) #define UTEST_I(FIXTURE, NAME, INDEX) \ UTEST_SURPRESS_WARNINGS_BEGIN \ UTEST_EXTERN struct utest_state_s utest_state; \ static void utest_run_##FIXTURE##_##NAME##_##INDEX(int *, struct FIXTURE *); \ static void utest_i_##FIXTURE##_##NAME##_##INDEX(int *utest_result, \ size_t index) { \ struct FIXTURE fixture; \ memset(&fixture, 0, sizeof(fixture)); \ utest_i_setup_##FIXTURE(utest_result, &fixture, index); \ if (UTEST_TEST_PASSED != *utest_result) { \ return; \ } \ utest_run_##FIXTURE##_##NAME##_##INDEX(utest_result, &fixture); \ utest_i_teardown_##FIXTURE(utest_result, &fixture, index); \ } \ UTEST_INITIALIZER(utest_register_##FIXTURE##_##NAME##_##INDEX) { \ size_t i; \ utest_uint64_t iUp; \ for (i = 0; i < (INDEX); i++) { \ const size_t index = utest_state.tests_length++; \ const char name_part[] = #FIXTURE "." #NAME; \ const size_t name_size = strlen(name_part) + 32; \ char *name = UTEST_PTR_CAST(char *, malloc(name_size)); \ utest_state.tests = UTEST_PTR_CAST( \ struct utest_test_state_s *, \ utest_realloc(UTEST_PTR_CAST(void *, utest_state.tests), \ sizeof(struct utest_test_state_s) * \ utest_state.tests_length)); \ if (utest_state.tests && name) { \ utest_state.tests[index].func = &utest_i_##FIXTURE##_##NAME##_##INDEX; \ utest_state.tests[index].index = i; \ utest_state.tests[index].name = name; \ iUp = UTEST_CAST(utest_uint64_t, i); \ UTEST_SNPRINTF(name, name_size, "%s/%" UTEST_PRIu64, name_part, iUp); \ } else { \ if (utest_state.tests) { \ free(utest_state.tests); \ utest_state.tests = NULL; \ } \ if (name) { \ free(name); \ } \ } \ } \ } \ UTEST_SURPRESS_WARNINGS_END \ void utest_run_##FIXTURE##_##NAME##_##INDEX(int *utest_result, \ struct FIXTURE *utest_fixture) #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #endif UTEST_WEAK double utest_fabs(double d); UTEST_WEAK double utest_fabs(double d) { union { double d; utest_uint64_t u; } both; both.d = d; both.u &= 0x7fffffffffffffffu; return both.d; } UTEST_WEAK int utest_isnan(double d); UTEST_WEAK int utest_isnan(double d) { union { double d; utest_uint64_t u; } both; both.d = d; both.u &= 0x7fffffffffffffffu; return both.u > 0x7ff0000000000000u; } #ifdef __clang__ #pragma clang diagnostic pop #endif #if defined(__clang__) #if __has_warning("-Wunsafe-buffer-usage") #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunsafe-buffer-usage" #endif #endif UTEST_WEAK int utest_should_filter_test(const char *filter, const char *testcase); UTEST_WEAK int utest_should_filter_test(const char *filter, const char *testcase) { if (filter) { const char *filter_cur = filter; const char *testcase_cur = testcase; const char *filter_wildcard = UTEST_NULL; while (('\0' != *filter_cur) && ('\0' != *testcase_cur)) { if ('*' == *filter_cur) { /* store the position of the wildcard */ filter_wildcard = filter_cur; /* skip the wildcard character */ filter_cur++; while (('\0' != *filter_cur) && ('\0' != *testcase_cur)) { if ('*' == *filter_cur) { /* we found another wildcard (filter is something like *foo*) so we exit the current loop, and return to the parent loop to handle the wildcard case */ break; } else if (*filter_cur != *testcase_cur) { /* otherwise our filter didn't match, so reset it */ filter_cur = filter_wildcard; } /* move testcase along */ testcase_cur++; /* move filter along */ filter_cur++; } if (('\0' == *filter_cur) && ('\0' == *testcase_cur)) { return 0; } /* if the testcase has been exhausted, we don't have a match! */ if ('\0' == *testcase_cur) { return 1; } } else { if (*testcase_cur != *filter_cur) { /* test case doesn't match filter */ return 1; } else { /* move our filter and testcase forward */ testcase_cur++; filter_cur++; } } } if (('\0' != *filter_cur) || (('\0' != *testcase_cur) && ((filter == filter_cur) || ('*' != filter_cur[-1])))) { /* we have a mismatch! */ return 1; } } return 0; } static UTEST_INLINE FILE *utest_fopen(const char *filename, const char *mode) { #ifdef _MSC_VER FILE *file; if (0 == fopen_s(&file, filename, mode)) { return file; } else { return UTEST_NULL; } #else return fopen(filename, mode); #endif } static UTEST_INLINE int utest_main(int argc, const char *const argv[]); int utest_main(int argc, const char *const argv[]) { utest_uint64_t failed = 0; utest_uint64_t skipped = 0; size_t index = 0; size_t *failed_testcases = UTEST_NULL; size_t failed_testcases_length = 0; size_t *skipped_testcases = UTEST_NULL; size_t skipped_testcases_length = 0; const char *filter = UTEST_NULL; utest_uint64_t ran_tests = 0; int enable_mixed_units = 0; int random_order = 0; utest_uint32_t seed = 0; enum colours { RESET, GREEN, RED, YELLOW }; const int use_colours = UTEST_COLOUR_OUTPUT(); const char *colours[] = {"\033[0m", "\033[32m", "\033[31m", "\033[33m"}; if (!use_colours) { for (index = 0; index < sizeof colours / sizeof colours[0]; index++) { colours[index] = ""; } } /* loop through all arguments looking for our options */ for (index = 1; index < UTEST_CAST(size_t, argc); index++) { /* Informational switches */ const char help_str[] = "--help"; const char list_str[] = "--list-tests"; /* Test config switches */ const char filter_str[] = "--filter="; const char output_str[] = "--output="; const char enable_mixed_units_str[] = "--enable-mixed-units"; const char random_order_str[] = "--random-order"; const char random_order_with_seed_str[] = "--random-order="; if (0 == UTEST_STRNCMP(argv[index], help_str, strlen(help_str))) { printf("utest.h - the single file unit testing solution for C/C++!\n" "Command line Options:\n" " --help Show this message and exit.\n" " --filter= Filter the test cases to run (EG. " "MyTest*.a would run MyTestCase.a but not MyTestCase.b).\n" " --list-tests List testnames, one per line. Output " "names can be passed to --filter.\n"); printf(" --output= Output an xunit XML file to the file " "specified in .\n" " --enable-mixed-units Enable the per-test output to contain " "mixed units (s/ms/us/ns).\n" " --random-order[=] Randomize the order that the tests are " "ran in. If the optional argument is not provided, then a " "random starting seed is used.\n"); goto cleanup; } else if (0 == UTEST_STRNCMP(argv[index], filter_str, strlen(filter_str))) { /* user wants to filter what test cases run! */ filter = argv[index] + strlen(filter_str); } else if (0 == UTEST_STRNCMP(argv[index], output_str, strlen(output_str))) { utest_state.output = utest_fopen(argv[index] + strlen(output_str), "w+"); } else if (0 == UTEST_STRNCMP(argv[index], list_str, strlen(list_str))) { for (index = 0; index < utest_state.tests_length; index++) { UTEST_PRINTF("%s\n", utest_state.tests[index].name); } /* when printing the test list, don't actually run the tests */ return 0; } else if (0 == UTEST_STRNCMP(argv[index], enable_mixed_units_str, strlen(enable_mixed_units_str))) { enable_mixed_units = 1; } else if (0 == UTEST_STRNCMP(argv[index], random_order_with_seed_str, strlen(random_order_with_seed_str))) { seed = UTEST_CAST(utest_uint32_t, strtoul(argv[index] + strlen(random_order_with_seed_str), UTEST_NULL, 10)); random_order = 1; } else if (0 == UTEST_STRNCMP(argv[index], random_order_str, strlen(random_order_str))) { const utest_int64_t ns = utest_ns(); // Some really poor pseudo-random using the current time. I do this // because I really want to avoid using C's rand() because that'd mean our // random would be affected by any srand() usage by the user (which I // don't want). seed = UTEST_CAST(utest_uint32_t, ns >> 32) * 31 + UTEST_CAST(utest_uint32_t, ns & 0xffffffff); random_order = 1; } } if (random_order) { // Use Fisher-Yates with the Durstenfield's version to randomly re-order the // tests. for (index = utest_state.tests_length; index > 1; index--) { // For the random order we'll use PCG. const utest_uint32_t state = seed; const utest_uint32_t word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u; const utest_uint32_t next = ((word >> 22u) ^ word) % UTEST_CAST(utest_uint32_t, index); // Swap the randomly chosen element into the last location. const struct utest_test_state_s copy = utest_state.tests[index - 1]; utest_state.tests[index - 1] = utest_state.tests[next]; utest_state.tests[next] = copy; // Move the seed onwards. seed = seed * 747796405u + 2891336453u; } } for (index = 0; index < utest_state.tests_length; index++) { if (utest_should_filter_test(filter, utest_state.tests[index].name)) { continue; } ran_tests++; } printf("%s[==========]%s Running %" UTEST_PRIu64 " test cases.\n", colours[GREEN], colours[RESET], UTEST_CAST(utest_uint64_t, ran_tests)); if (utest_state.output) { fprintf(utest_state.output, "\n"); fprintf(utest_state.output, "\n", UTEST_CAST(utest_uint64_t, ran_tests)); fprintf(utest_state.output, "\n", UTEST_CAST(utest_uint64_t, ran_tests)); } for (index = 0; index < utest_state.tests_length; index++) { int result = UTEST_TEST_PASSED; utest_int64_t ns = 0; if (utest_should_filter_test(filter, utest_state.tests[index].name)) { continue; } printf("%s[ RUN ]%s %s\n", colours[GREEN], colours[RESET], utest_state.tests[index].name); if (utest_state.output) { fprintf(utest_state.output, "", utest_state.tests[index].name); } ns = utest_ns(); errno = 0; #if defined(UTEST_HAS_EXCEPTIONS) UTEST_SURPRESS_WARNING_BEGIN try { utest_state.tests[index].func(&result, utest_state.tests[index].index); } catch (const std::exception &err) { printf(" Exception : %s\n", err.what()); result = UTEST_TEST_FAILURE; } catch (...) { printf(" Exception : Unknown\n"); result = UTEST_TEST_FAILURE; } UTEST_SURPRESS_WARNING_END #else utest_state.tests[index].func(&result, utest_state.tests[index].index); #endif ns = utest_ns() - ns; if (utest_state.output) { fprintf(utest_state.output, "\n"); } // Record the failing test. if (UTEST_TEST_FAILURE == result) { const size_t failed_testcase_index = failed_testcases_length++; failed_testcases = UTEST_PTR_CAST( size_t *, utest_realloc(UTEST_PTR_CAST(void *, failed_testcases), sizeof(size_t) * failed_testcases_length)); if (UTEST_NULL != failed_testcases) { failed_testcases[failed_testcase_index] = index; } failed++; } else if (UTEST_TEST_SKIPPED == result) { const size_t skipped_testcase_index = skipped_testcases_length++; skipped_testcases = UTEST_PTR_CAST( size_t *, utest_realloc(UTEST_PTR_CAST(void *, skipped_testcases), sizeof(size_t) * skipped_testcases_length)); if (UTEST_NULL != skipped_testcases) { skipped_testcases[skipped_testcase_index] = index; } skipped++; } { const char *const units[] = {"ns", "us", "ms", "s", UTEST_NULL}; unsigned int unit_index = 0; utest_int64_t time = ns; if (enable_mixed_units) { for (unit_index = 0; UTEST_NULL != units[unit_index]; unit_index++) { if (10000 > time) { break; } time /= 1000; } } if (UTEST_TEST_FAILURE == result) { printf("%s[ FAILED ]%s %s (%" UTEST_PRId64 "%s)\n", colours[RED], colours[RESET], utest_state.tests[index].name, time, units[unit_index]); } else if (UTEST_TEST_SKIPPED == result) { printf("%s[ SKIPPED ]%s %s (%" UTEST_PRId64 "%s)\n", colours[YELLOW], colours[RESET], utest_state.tests[index].name, time, units[unit_index]); } else { printf("%s[ OK ]%s %s (%" UTEST_PRId64 "%s)\n", colours[GREEN], colours[RESET], utest_state.tests[index].name, time, units[unit_index]); } } } printf("%s[==========]%s %" UTEST_PRIu64 " test cases ran.\n", colours[GREEN], colours[RESET], ran_tests); printf("%s[ PASSED ]%s %" UTEST_PRIu64 " tests.\n", colours[GREEN], colours[RESET], ran_tests - failed - skipped); if (0 != skipped) { printf("%s[ SKIPPED ]%s %" UTEST_PRIu64 " tests, listed below:\n", colours[YELLOW], colours[RESET], skipped); for (index = 0; index < skipped_testcases_length; index++) { printf("%s[ SKIPPED ]%s %s\n", colours[YELLOW], colours[RESET], utest_state.tests[skipped_testcases[index]].name); } } if (0 != failed) { printf("%s[ FAILED ]%s %" UTEST_PRIu64 " tests, listed below:\n", colours[RED], colours[RESET], failed); for (index = 0; index < failed_testcases_length; index++) { printf("%s[ FAILED ]%s %s\n", colours[RED], colours[RESET], utest_state.tests[failed_testcases[index]].name); } } if (utest_state.output) { fprintf(utest_state.output, "\n\n"); } cleanup: for (index = 0; index < utest_state.tests_length; index++) { free(UTEST_PTR_CAST(void *, utest_state.tests[index].name)); } free(UTEST_PTR_CAST(void *, skipped_testcases)); free(UTEST_PTR_CAST(void *, failed_testcases)); free(UTEST_PTR_CAST(void *, utest_state.tests)); if (utest_state.output) { fclose(utest_state.output); } return UTEST_CAST(int, failed); } #if defined(__clang__) #if __has_warning("-Wunsafe-buffer-usage") #pragma clang diagnostic pop #endif #endif /* we need, in exactly one source file, define the global struct that will hold the data we need to run utest. This macro allows the user to declare the data without having to use the UTEST_MAIN macro, thus allowing them to write their own main() function. */ #define UTEST_STATE() struct utest_state_s utest_state = {0, 0, 0} /* define a main() function to call into utest.h and start executing tests! A user can optionally not use this macro, and instead define their own main() function and manually call utest_main. The user must, in exactly one source file, use the UTEST_STATE macro to declare a global struct variable that utest requires. */ #define UTEST_MAIN() \ UTEST_STATE(); \ int main(int argc, const char *const argv[]) { \ return utest_main(argc, argv); \ } #endif /* SHEREDOM_UTEST_H_INCLUDED */ ================================================ FILE: deps/vma/CHANGELOG.md ================================================ # 3.2.1 (2025-02-05) Changes: - Fixed an assert in `vmaCreateAllocator` function incorrectly failing when Vulkan version 1.4 is used (#457). - Fix for importing function `vkGetPhysicalDeviceMemoryProperties2` / `vkGetPhysicalDeviceMemoryProperties2KHR` when `VMA_DYNAMIC_VULKAN_FUNCTIONS` macro is enabled (#410). - Other minor fixes and improvements... # 3.2.0 (2024-12-30) Additions to the library API: - Added support for Vulkan 1.4. - Added support for VK_KHR_external_memory_win32 extension - `VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT` flag, `vmaGetMemoryWin32Handle` function, and a whole new documentation chapter about it (#442). Other changes: - Fixed thread safety issue (#451). - Many other bug fixes and improvements in the library code, documentation, sample app, Cmake script, mostly to improve compatibility with various compilers and GPUs. # 3.1.0 (2024-05-27) This release gathers fixes and improvements made during many months of continuous development on the main branch, mostly based on issues and pull requests on GitHub. Additions to the library API: - Added convenience functions `vmaCopyMemoryToAllocation`, `vmaCopyAllocationToMemory`. - Added functions `vmaCreateAliasingBuffer2`, `vmaCreateAliasingImage2` that offer creating a buffer/image in an existing allocation with additional `allocationLocalOffset`. - Added function `vmaGetAllocationInfo2`, structure `VmaAllocationInfo2` that return additional information about an allocation, useful for interop with other APIs (#383, #340). - Added callback `VmaDefragmentationInfo::pfnBreakCallback` that allows breaking long execution of `vmaBeginDefragmentation`. Also added `PFN_vmaCheckDefragmentationBreakFunction`, `VmaDefragmentationInfo::pBreakCallbackUserData`. - Added support for VK_KHR_maintenance4 extension - `VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT` flag (#397). - Added support for VK_KHR_maintenance5 extension - `VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT` flag (#411). Other changes: - Changes in debug and configuration macros: - Split macros into separate `VMA_DEBUG_LOG` and `VMA_DEBUG_LOG_FORMAT` (#297). - Added macros `VMA_ASSERT_LEAK`, `VMA_LEAK_LOG_FORMAT` separate from normal `VMA_ASSERT`, `VMA_DEBUG_LOG_FORMAT` (#379, #385). - Added macro `VMA_EXTENDS_VK_STRUCT` (#347). - Countless bug fixes and improvements in the code and documentation, mostly to improve compatibility with various compilers and GPUs, including: - Fixed missing `#include` that resulted in compilation error about `snprintf` not declared on some compilers (#312). - Fixed main memory type selection algorithm for GPUs that have no `HOST_CACHED` memory type, like Raspberry Pi (#362). - Major changes in Cmake script. - Fixes in GpuMemDumpVis.py script. # 3.0.1 (2022-05-26) - Fixes in defragmentation algorithm. - Fixes in GpuMemDumpVis.py regarding image height calculation. - Other bug fixes, optimizations, and improvements in the code and documentation. # 3.0.0 (2022-03-25) It has been a long time since the previous official release, so hopefully everyone has been using the latest code from "master" branch, which is always maintained in a good state, not the old version. For completeness, here is the list of changes since v2.3.0. The major version number has changed, so there are some compatibility-breaking changes, but the basic API stays the same and is mostly backward-compatible. Major features added (some compatibility-breaking): - Added new API for selecting preferred memory type: flags `VMA_MEMORY_USAGE_AUTO`, `VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE`, `VMA_MEMORY_USAGE_AUTO_PREFER_HOST`, `VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT`, `VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT`, `VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT`. Old values like `VMA_MEMORY_USAGE_GPU_ONLY` still work as before, for backward compatibility, but are not recommended. - Added new defragmentation API and algorithm, replacing the old one. See structure `VmaDefragmentationInfo`, `VmaDefragmentationMove`, `VmaDefragmentationPassMoveInfo`, `VmaDefragmentationStats`, function `vmaBeginDefragmentation`, `vmaEndDefragmentation`, `vmaBeginDefragmentationPass`, `vmaEndDefragmentationPass`. - Redesigned API for statistics, replacing the old one. See structures: `VmaStatistics`, `VmaDetailedStatistics`, `VmaTotalStatistics`. `VmaBudget`, functions: `vmaGetHeapBudgets`, `vmaCalculateStatistics`, `vmaGetPoolStatistics`, `vmaCalculatePoolStatistics`, `vmaGetVirtualBlockStatistics`, `vmaCalculateVirtualBlockStatistics`. - Added "Virtual allocator" feature - possibility to use core allocation algorithms for allocation of custom memory, not necessarily Vulkan device memory. See functions like `vmaCreateVirtualBlock`, `vmaDestroyVirtualBlock` and many more. - `VmaAllocation` now keeps both `void* pUserData` and `char* pName`. Added function `vmaSetAllocationName`, member `VmaAllocationInfo::pName`. Flag `VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT` is now deprecated. - Clarified and cleaned up various ways of importing Vulkan functions. See macros `VMA_STATIC_VULKAN_FUNCTIONS`, `VMA_DYNAMIC_VULKAN_FUNCTIONS`, structure `VmaVulkanFunctions`. Added members `VmaVulkanFunctions::vkGetInstanceProcAddr`, `vkGetDeviceProcAddr`, which are now required when using `VMA_DYNAMIC_VULKAN_FUNCTIONS`. Removed (compatibility-breaking): - Removed whole "lost allocations" feature. Removed from the interface: `VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT`, `VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT`, `vmaCreateLostAllocation`, `vmaMakePoolAllocationsLost`, `vmaTouchAllocation`, `VmaAllocatorCreateInfo::frameInUseCount`, `VmaPoolCreateInfo::frameInUseCount`. - Removed whole "record & replay" feature. Removed from the API: `VmaAllocatorCreateInfo::pRecordSettings`, `VmaRecordSettings`, `VmaRecordFlagBits`, `VmaRecordFlags`. Removed VmaReplay application. - Removed "buddy" algorithm - removed flag `VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT`. Minor but compatibility-breaking changes: - Changes in `ALLOCATION_CREATE_STRATEGY` flags. Removed flags: `VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT`, `VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT`, `VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT`, which were aliases to other existing flags. - Added a member `void* pUserData` to `VmaDeviceMemoryCallbacks`. Updated `PFN_vmaAllocateDeviceMemoryFunction`, `PFN_vmaFreeDeviceMemoryFunction` to use the new `pUserData` member. - Removed function `vmaResizeAllocation` that was already deprecated. Other major changes: - Added new features to custom pools: support for dedicated allocations, new member `VmaPoolCreateInfo::pMemoryAllocateNext`, `minAllocationAlignment`. - Added support for Vulkan 1.2, 1.3. - Added support for VK_KHR_buffer_device_address extension - flag `VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT`. - Added support for VK_EXT_memory_priority extension - flag `VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT`, members `VmaAllocationCreateInfo::priority`, `VmaPoolCreateInfo::priority`. - Added support for VK_AMD_device_coherent_memory extension - flag `VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT`. - Added member `VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes`. - Added function `vmaGetAllocatorInfo`, structure `VmaAllocatorInfo`. - Added functions `vmaFlushAllocations`, `vmaInvalidateAllocations` for multiple allocations at once. - Added flag `VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT`. - Added function `vmaCreateBufferWithAlignment`. - Added convenience function `vmaGetAllocationMemoryProperties`. - Added convenience functions: `vmaCreateAliasingBuffer`, `vmaCreateAliasingImage`. Other minor changes: - Implemented Two-Level Segregated Fit (TLSF) allocation algorithm, replacing previous default one. It is much faster, especially when freeing many allocations at once or when `bufferImageGranularity` is large. - Renamed debug macro `VMA_DEBUG_ALIGNMENT` to `VMA_MIN_ALIGNMENT`. - Added CMake support - CMakeLists.txt files. Removed Premake support. - Changed `vmaInvalidateAllocation` and `vmaFlushAllocation` to return `VkResult`. - Added nullability annotations for Clang: `VMA_NULLABLE`, `VMA_NOT_NULL`, `VMA_NULLABLE_NON_DISPATCHABLE`, `VMA_NOT_NULL_NON_DISPATCHABLE`, `VMA_LEN_IF_NOT_NULL`. - JSON dump format has changed. - Countless fixes and improvements, including performance optimizations, compatibility with various platforms and compilers, documentation. # 2.3.0 (2019-12-04) Major release after a year of development in "master" branch and feature branches. Notable new features: supporting Vulkan 1.1, supporting query for memory budget. Major changes: - Added support for Vulkan 1.1. - Added member `VmaAllocatorCreateInfo::vulkanApiVersion`. - When Vulkan 1.1 is used, there is no need to enable VK_KHR_dedicated_allocation or VK_KHR_bind_memory2 extensions, as they are promoted to Vulkan itself. - Added support for query for memory budget and staying within the budget. - Added function `vmaGetBudget`, structure `VmaBudget`. This can also serve as simple statistics, more efficient than `vmaCalculateStats`. - By default the budget it is estimated based on memory heap sizes. It may be queried from the system using VK_EXT_memory_budget extension if you use `VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT` flag and `VmaAllocatorCreateInfo::instance` member. - Added flag `VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT` that fails an allocation if it would exceed the budget. - Added new memory usage options: - `VMA_MEMORY_USAGE_CPU_COPY` for memory that is preferably not `DEVICE_LOCAL` but not guaranteed to be `HOST_VISIBLE`. - `VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED` for memory that is `LAZILY_ALLOCATED`. - Added support for VK_KHR_bind_memory2 extension: - Added `VMA_ALLOCATION_CREATE_DONT_BIND_BIT` flag that lets you create both buffer/image and allocation, but don't bind them together. - Added flag `VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT`, functions `vmaBindBufferMemory2`, `vmaBindImageMemory2` that let you specify additional local offset and `pNext` pointer while binding. - Added functions `vmaSetPoolName`, `vmaGetPoolName` that let you assign string names to custom pools. JSON dump file format and VmaDumpVis tool is updated to show these names. - Defragmentation is legal only on buffers and images in `VK_IMAGE_TILING_LINEAR`. This is due to the way it is currently implemented in the library and the restrictions of the Vulkan specification. Clarified documentation in this regard. See discussion in #59. Minor changes: - Made `vmaResizeAllocation` function deprecated, always returning failure. - Made changes in the internal algorithm for the choice of memory type. Be careful! You may now get a type that is not `HOST_VISIBLE` or `HOST_COHERENT` if it's not stated as always ensured by some `VMA_MEMORY_USAGE_*` flag. - Extended VmaReplay application with more detailed statistics printed at the end. - Added macros `VMA_CALL_PRE`, `VMA_CALL_POST` that let you decorate declarations of all library functions if you want to e.g. export/import them as dynamically linked library. - Optimized `VmaAllocation` objects to be allocated out of an internal free-list allocator. This makes allocation and deallocation causing 0 dynamic CPU heap allocations on average. - Updated recording CSV file format version to 1.8, to support new functions. - Many additions and fixes in documentation. Many compatibility fixes for various compilers and platforms. Other internal bugfixes, optimizations, updates, refactoring... # 2.2.0 (2018-12-13) Major release after many months of development in "master" branch and feature branches. Notable new features: defragmentation of GPU memory, buddy algorithm, convenience functions for sparse binding. Major changes: - New, more powerful defragmentation: - Added structure `VmaDefragmentationInfo2`, functions `vmaDefragmentationBegin`, `vmaDefragmentationEnd`. - Added support for defragmentation of GPU memory. - Defragmentation of CPU memory now uses `memmove`, so it can move data to overlapping regions. - Defragmentation of CPU memory is now available for memory types that are `HOST_VISIBLE` but not `HOST_COHERENT`. - Added structure member `VmaVulkanFunctions::vkCmdCopyBuffer`. - Major internal changes in defragmentation algorithm. - VmaReplay: added parameters: `--DefragmentAfterLine`, `--DefragmentationFlags`. - Old interface (structure `VmaDefragmentationInfo`, function `vmaDefragment`) is now deprecated. - Added buddy algorithm, available for custom pools - flag `VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT`. - Added convenience functions for multiple allocations and deallocations at once, intended for sparse binding resources - functions `vmaAllocateMemoryPages`, `vmaFreeMemoryPages`. - Added function that tries to resize existing allocation in place: `vmaResizeAllocation`. - Added flags for allocation strategy: `VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT`, `VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT`, `VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT`, and their aliases: `VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT`, `VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT`, `VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT`. Minor changes: - Changed behavior of allocation functions to return `VK_ERROR_VALIDATION_FAILED_EXT` when trying to allocate memory of size 0, create buffer with size 0, or image with one of the dimensions 0. - VmaReplay: Added support for Windows end of lines. - Updated recording CSV file format version to 1.5, to support new functions. - Internal optimization: using read-write mutex on some platforms. - Many additions and fixes in documentation. Many compatibility fixes for various compilers. Other internal bugfixes, optimizations, refactoring, added more internal validation... # 2.1.0 (2018-09-10) Minor bugfixes. # 2.1.0-beta.1 (2018-08-27) Major release after many months of development in "development" branch and features branches. Many new features added, some bugs fixed. API stays backward-compatible. Major changes: - Added linear allocation algorithm, accessible for custom pools, that can be used as free-at-once, stack, double stack, or ring buffer. See "Linear allocation algorithm" documentation chapter. - Added `VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT`, `VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT`. - Added feature to record sequence of calls to the library to a file and replay it using dedicated application. See documentation chapter "Record and replay". - Recording: added `VmaAllocatorCreateInfo::pRecordSettings`. - Replaying: added VmaReplay project. - Recording file format: added document "docs/Recording file format.md". - Improved support for non-coherent memory. - Added functions: `vmaFlushAllocation`, `vmaInvalidateAllocation`. - `nonCoherentAtomSize` is now respected automatically. - Added `VmaVulkanFunctions::vkFlushMappedMemoryRanges`, `vkInvalidateMappedMemoryRanges`. - Improved debug features related to detecting incorrect mapped memory usage. See documentation chapter "Debugging incorrect memory usage". - Added debug macro `VMA_DEBUG_DETECT_CORRUPTION`, functions `vmaCheckCorruption`, `vmaCheckPoolCorruption`. - Added debug macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to initialize contents of allocations with a bit pattern. - Changed behavior of `VMA_DEBUG_MARGIN` macro - it now adds margin also before first and after last allocation in a block. - Changed format of JSON dump returned by `vmaBuildStatsString` (not backward compatible!). - Custom pools and memory blocks now have IDs that don't change after sorting. - Added properties: "CreationFrameIndex", "LastUseFrameIndex", "Usage". - Changed VmaDumpVis tool to use these new properties for better coloring. - Changed behavior of `vmaGetAllocationInfo` and `vmaTouchAllocation` to update `allocation.lastUseFrameIndex` even if allocation cannot become lost. Minor changes: - Changes in custom pools: - Added new structure member `VmaPoolStats::blockCount`. - Changed behavior of `VmaPoolCreateInfo::blockSize` = 0 (default) - it now means that pool may use variable block sizes, just like default pools do. - Improved logic of `vmaFindMemoryTypeIndex` for some cases, especially integrated GPUs. - VulkanSample application: Removed dependency on external library MathFu. Added own vector and matrix structures. - Changes that improve compatibility with various platforms, including: Visual Studio 2012, 32-bit code, C compilers. - Changed usage of "VK_KHR_dedicated_allocation" extension in the code to be optional, driven by macro `VMA_DEDICATED_ALLOCATION`, for compatibility with Android. - Many additions and fixes in documentation, including description of new features, as well as "Validation layer warnings". - Other bugfixes. # 2.0.0 (2018-03-19) A major release with many compatibility-breaking changes. Notable new features: - Introduction of `VmaAllocation` handle that you must retrieve from allocation functions and pass to deallocation functions next to normal `VkBuffer` and `VkImage`. - Introduction of `VmaAllocationInfo` structure that you can retrieve from `VmaAllocation` handle to access parameters of the allocation (like `VkDeviceMemory` and offset) instead of retrieving them directly from allocation functions. - Support for reference-counted mapping and persistently mapped allocations - see `vmaMapMemory`, `VMA_ALLOCATION_CREATE_MAPPED_BIT`. - Support for custom memory pools - see `VmaPool` handle, `VmaPoolCreateInfo` structure, `vmaCreatePool` function. - Support for defragmentation (compaction) of allocations - see function `vmaDefragment` and related structures. - Support for "lost allocations" - see appropriate chapter on documentation Main Page. # 1.0.1 (2017-07-04) - Fixes for Linux GCC compilation. - Changed "CONFIGURATION SECTION" to contain #ifndef so you can define these macros before including this header, not necessarily change them in the file. # 1.0.0 (2017-06-16) First public release. ================================================ FILE: deps/vma/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 3.10) project(vma) add_library(vma STATIC ${CMAKE_CURRENT_LIST_DIR}/src/vk_mem_alloc.cpp ${CMAKE_CURRENT_LIST_DIR}/src/vk_mem_alloc.h) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") target_compile_options(vma PRIVATE -Wno-nullability-completeness) endif() target_include_directories(vma PRIVATE ${CMAKE_CURRENT_LIST_DIR}/../vulkan-headers) target_include_directories(vma PUBLIC ${CMAKE_CURRENT_LIST_DIR}/src) target_compile_definitions(vma PUBLIC VMA_STATS_STRING_ENABLED=0) ================================================ FILE: deps/vma/LICENSE.txt ================================================ Copyright (c) 2017-2025 Advanced Micro Devices, Inc. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: deps/vma/README.md ================================================ # Vulkan Memory Allocator Easy to integrate Vulkan memory allocation library. **Documentation:** Browse online: [Vulkan Memory Allocator](https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/) (generated from Doxygen-style comments in [include/vk_mem_alloc.h](include/vk_mem_alloc.h)) **License:** MIT. See [LICENSE.txt](LICENSE.txt) **Changelog:** See [CHANGELOG.md](CHANGELOG.md) **Product page:** [Vulkan Memory Allocator on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/) **Build status:** - Windows: [![Build status](https://ci.appveyor.com/api/projects/status/4vlcrb0emkaio2pn/branch/master?svg=true)](https://ci.appveyor.com/project/adam-sawicki-amd/vulkanmemoryallocator/branch/master) - Linux: [![Build Status](https://app.travis-ci.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.svg?branch=master)](https://app.travis-ci.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) [![Average time to resolve an issue](http://isitmaintained.com/badge/resolution/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.svg)](http://isitmaintained.com/project/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator "Average time to resolve an issue") # Problem Memory allocation and resource (buffer and image) creation in Vulkan is difficult (comparing to older graphics APIs, like D3D11 or OpenGL) for several reasons: - It requires a lot of boilerplate code, just like everything else in Vulkan, because it is a low-level and high-performance API. - There is additional level of indirection: `VkDeviceMemory` is allocated separately from creating `VkBuffer`/`VkImage` and they must be bound together. - Driver must be queried for supported memory heaps and memory types. Different GPU vendors provide different types of it. - It is recommended to allocate bigger chunks of memory and assign parts of them to particular resources, as there is a limit on maximum number of memory blocks that can be allocated. # Features This library can help game developers to manage memory allocations and resource creation by offering some higher-level functions: 1. Functions that help to choose correct and optimal memory type based on intended usage of the memory. - Required or preferred traits of the memory are expressed using higher-level description comparing to Vulkan flags. 2. Functions that allocate memory blocks, reserve and return parts of them (`VkDeviceMemory` + offset + size) to the user. - Library keeps track of allocated memory blocks, used and unused ranges inside them, finds best matching unused ranges for new allocations, respects all the rules of alignment and buffer/image granularity. 3. Functions that can create an image/buffer, allocate memory for it and bind them together - all in one call. Additional features: - Well-documented - description of all functions and structures provided, along with chapters that contain general description and example code. - Thread-safety: Library is designed to be used in multithreaded code. Access to a single device memory block referred by different buffers and textures (binding, mapping) is synchronized internally. Memory mapping is reference-counted. - Configuration: Fill optional members of `VmaAllocatorCreateInfo` structure to provide custom CPU memory allocator, pointers to Vulkan functions and other parameters. - Customization and integration with custom engines: Predefine appropriate macros to provide your own implementation of all external facilities used by the library like assert, mutex, atomic. - Support for memory mapping, reference-counted internally. Support for persistently mapped memory: Just allocate with appropriate flag and access the pointer to already mapped memory. - Support for non-coherent memory. Functions that flush/invalidate memory. `nonCoherentAtomSize` is respected automatically. - Support for resource aliasing (overlap). - Support for sparse binding and sparse residency: Convenience functions that allocate or free multiple memory pages at once. - Custom memory pools: Create a pool with desired parameters (e.g. fixed or limited maximum size) and allocate memory out of it. - Linear allocator: Create a pool with linear algorithm and use it for much faster allocations and deallocations in free-at-once, stack, double stack, or ring buffer fashion. - Support for Vulkan 1.0...1.4. - Support for extensions (and equivalent functionality included in new Vulkan versions): - VK_KHR_dedicated_allocation: Just enable it and it will be used automatically by the library. - VK_KHR_bind_memory2. - VK_KHR_maintenance4. - VK_KHR_maintenance5, including `VkBufferUsageFlags2CreateInfoKHR`. - VK_EXT_memory_budget: Used internally if available to query for current usage and budget. If not available, it falls back to an estimation based on memory heap sizes. - VK_KHR_buffer_device_address: Flag `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR` is automatically added to memory allocations where needed. - VK_EXT_memory_priority: Set `priority` of allocations or custom pools and it will be set automatically using this extension. - VK_AMD_device_coherent_memory. - VK_KHR_external_memory_win32. - Defragmentation of GPU and CPU memory: Let the library move data around to free some memory blocks and make your allocations better compacted. - Statistics: Obtain brief or detailed statistics about the amount of memory used, unused, number of allocated blocks, number of allocations etc. - globally, per memory heap, and per memory type. - Debug annotations: Associate custom `void* pUserData` and debug `char* pName` with each allocation. - JSON dump: Obtain a string in JSON format with detailed map of internal state, including list of allocations, their string names, and gaps between them. - Convert this JSON dump into a picture to visualize your memory. See [tools/GpuMemDumpVis](tools/GpuMemDumpVis/README.md). - Debugging incorrect memory usage: Enable initialization of all allocated memory with a bit pattern to detect usage of uninitialized or freed memory. Enable validation of a magic number after every allocation to detect out-of-bounds memory corruption. - Support for interoperability with OpenGL. - Virtual allocator: Interface for using core allocation algorithm to allocate any custom data, e.g. pieces of one large buffer. # Prerequisites - Self-contained C++ library in single header file. No external dependencies other than standard C and C++ library and of course Vulkan. Some features of C++14 used. STL containers, RTTI, or C++ exceptions are not used. - Public interface in C, in same convention as Vulkan API. Implementation in C++. - Error handling implemented by returning `VkResult` error codes - same way as in Vulkan. - Interface documented using Doxygen-style comments. - Platform-independent, but developed and tested on Windows using Visual Studio. Continuous integration setup for Windows and Linux. Used also on Android, MacOS, and other platforms. # Example Basic usage of this library is very simple. Advanced features are optional. After you created global `VmaAllocator` object, a complete code needed to create a buffer may look like this: ```cpp VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; bufferInfo.size = 65536; bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VmaAllocationCreateInfo allocInfo = {}; allocInfo.usage = VMA_MEMORY_USAGE_AUTO; VkBuffer buffer; VmaAllocation allocation; vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); ``` With this one function call: 1. `VkBuffer` is created. 2. `VkDeviceMemory` block is allocated if needed. 3. An unused region of the memory block is bound to this buffer. `VmaAllocation` is an object that represents memory assigned to this buffer. It can be queried for parameters like `VkDeviceMemory` handle and offset. # How to build On Windows it is recommended to use [CMake GUI](https://cmake.org/runningcmake/). Alternatively you can generate/open a Visual Studio from the command line: ```sh # By default CMake picks the newest version of Visual Studio it can use cmake -S . -B build -D VMA_BUILD_SAMPLES=ON cmake --open build ``` On Linux: ```sh cmake -S . -B build # Since VMA has no source files, you can skip to installation immediately cmake --install build --prefix build/install ``` ## How to use After calling either `find_package` or `add_subdirectory` simply link the library. This automatically handles configuring the include directory. Example: ```cmake find_package(VulkanMemoryAllocator CONFIG REQUIRED) target_link_libraries(YourGameEngine PRIVATE GPUOpen::VulkanMemoryAllocator) ``` For more info on using CMake visit the official [CMake documentation](https://cmake.org/cmake/help/latest/index.html). ## Building using vcpkg You can download and install VulkanMemoryAllocator using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: git clone https://github.com/Microsoft/vcpkg.git cd vcpkg ./bootstrap-vcpkg.sh ./vcpkg integrate install ./vcpkg install vulkan-memory-allocator The VulkanMemoryAllocator port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. # Binaries The release comes with precompiled binary executable for "VulkanSample" application which contains test suite. It is compiled using Visual Studio 2022, so it requires appropriate libraries to work, including "MSVCP140.dll", "VCRUNTIME140.dll", "VCRUNTIME140_1.dll". If the launch fails with error message telling about those files missing, please download and install [Microsoft Visual C++ Redistributable](https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads), "X64" version. # Read more See **[Documentation](https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/)**. # Software using this library - **[Blender](https://www.blender.org)** - **[Qt Project](https://github.com/qt)** - **[Baldur's Gate III](https://www.mobygames.com/game/150689/baldurs-gate-iii/credits/windows/?autoplatform=true)** - **[Cyberpunk 2077](https://www.mobygames.com/game/128136/cyberpunk-2077/credits/windows/?autoplatform=true)** - **[X-Plane](https://x-plane.com/)** - **[Detroit: Become Human](https://gpuopen.com/learn/porting-detroit-3/)** - **[Vulkan Samples](https://github.com/LunarG/VulkanSamples)** - official Khronos Vulkan samples. License: Apache-style. - **[GFXReconstruct](https://github.com/LunarG/gfxreconstruct)** - a tools for the capture and replay of graphics API calls. License: MIT. - **[Anvil](https://github.com/GPUOpen-LibrariesAndSDKs/Anvil)** - cross-platform framework for Vulkan. License: MIT. - **[Filament](https://github.com/google/filament)** - physically based rendering engine for Android, Windows, Linux and macOS, from Google. Apache License 2.0. - **[Atypical Games - proprietary game engine](https://developer.samsung.com/galaxy-gamedev/gamedev-blog/infinitejet.html)** - **[Flax Engine](https://flaxengine.com/)** - **[Godot Engine](https://github.com/godotengine/godot/)** - multi-platform 2D and 3D game engine. License: MIT. - **[Lightweight Java Game Library (LWJGL)](https://www.lwjgl.org/)** - includes binding of the library for Java. License: BSD. - **[LightweightVK](https://github.com/corporateshark/lightweightvk)** - lightweight C++ bindless Vulkan 1.3 wrapper. License: MIT. - **[PowerVR SDK](https://github.com/powervr-graphics/Native_SDK)** - C++ cross-platform 3D graphics SDK, from Imagination. License: MIT. - **[Skia](https://github.com/google/skia)** - complete 2D graphic library for drawing Text, Geometries, and Images, from Google. - **[The Forge](https://github.com/ConfettiFX/The-Forge)** - cross-platform rendering framework. Apache License 2.0. - **[VK9](https://github.com/disks86/VK9)** - Direct3D 9 compatibility layer using Vulkan. Zlib license. - **[vkDOOM3](https://github.com/DustinHLand/vkDOOM3)** - Vulkan port of GPL DOOM 3 BFG Edition. License: GNU GPL. - **[vkQuake2](https://github.com/kondrak/vkQuake2)** - vanilla Quake 2 with Vulkan support. License: GNU GPL. - **[Vulkan Best Practice for Mobile Developers](https://github.com/ARM-software/vulkan_best_practice_for_mobile_developers)** from ARM. License: MIT. - **[RPCS3](https://github.com/RPCS3/rpcs3)** - PlayStation 3 emulator/debugger. License: GNU GPLv2. - **[PPSSPP](https://github.com/hrydgard/ppsspp)** - Playstation Portable emulator/debugger. License: GNU GPLv2+. - **[Wicked Engine](https://github.com/turanszkij/WickedEngine)** - 3D engine with modern graphics [Many other projects on GitHub](https://github.com/search?q=AMD_VULKAN_MEMORY_ALLOCATOR_H&type=Code) and some game development studios that use Vulkan in their games. # See also - **[D3D12 Memory Allocator](https://github.com/GPUOpen-LibrariesAndSDKs/D3D12MemoryAllocator)** - equivalent library for Direct3D 12. License: MIT. - **[Awesome Vulkan](https://github.com/vinjn/awesome-vulkan)** - a curated list of awesome Vulkan libraries, debuggers and resources. - **[vcpkg](https://github.com/Microsoft/vcpkg)** dependency manager from Microsoft also offers a port of this library. - **[VulkanMemoryAllocator-Hpp](https://github.com/YaaZ/VulkanMemoryAllocator-Hpp)** - C++ binding for this library. License: CC0-1.0. - **[PyVMA](https://github.com/realitix/pyvma)** - Python wrapper for this library. Author: Jean-Sébastien B. (@realitix). License: Apache 2.0. - **[vk-mem](https://github.com/gwihlidal/vk-mem-rs)** - Rust binding for this library. Author: Graham Wihlidal. License: Apache 2.0 or MIT. - **[Haskell bindings](https://hackage.haskell.org/package/VulkanMemoryAllocator)**, **[github](https://github.com/expipiplus1/vulkan/tree/master/VulkanMemoryAllocator)** - Haskell bindings for this library. Author: Ellie Hermaszewska (@expipiplus1). License BSD-3-Clause. - **[vma_sample_sdl](https://github.com/rextimmy/vma_sample_sdl)** - SDL port of the sample app of this library (with the goal of running it on multiple platforms, including MacOS). Author: @rextimmy. License: MIT. - **[vulkan-malloc](https://github.com/dylanede/vulkan-malloc)** - Vulkan memory allocation library for Rust. Based on version 1 of this library. Author: Dylan Ede (@dylanede). License: MIT / Apache 2.0. ================================================ FILE: deps/vma/src/vk_mem_alloc.cpp ================================================ #define VMA_IMPLEMENTATION #define WIN32_LEAN_AND_MEAN #include "vk_mem_alloc.h" ================================================ FILE: deps/vma/src/vk_mem_alloc.h ================================================ // // Copyright (c) 2017-2025 Advanced Micro Devices, Inc. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H #define AMD_VULKAN_MEMORY_ALLOCATOR_H /** \mainpage Vulkan Memory Allocator Version 3.2.1 Copyright (c) 2017-2025 Advanced Micro Devices, Inc. All rights reserved. \n License: MIT \n See also: [product page on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/), [repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) API documentation divided into groups: [Topics](topics.html) General documentation chapters: - User guide - \subpage quick_start - [Project setup](@ref quick_start_project_setup) - [Initialization](@ref quick_start_initialization) - [Resource allocation](@ref quick_start_resource_allocation) - \subpage choosing_memory_type - [Usage](@ref choosing_memory_type_usage) - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags) - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types) - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools) - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations) - \subpage memory_mapping - [Copy functions](@ref memory_mapping_copy_functions) - [Mapping functions](@ref memory_mapping_mapping_functions) - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) - [Cache flush and invalidate](@ref memory_mapping_cache_control) - \subpage staying_within_budget - [Querying for budget](@ref staying_within_budget_querying_for_budget) - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage) - \subpage resource_aliasing - \subpage custom_memory_pools - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex) - [When not to use custom pools](@ref custom_memory_pools_when_not_use) - [Linear allocation algorithm](@ref linear_algorithm) - [Free-at-once](@ref linear_algorithm_free_at_once) - [Stack](@ref linear_algorithm_stack) - [Double stack](@ref linear_algorithm_double_stack) - [Ring buffer](@ref linear_algorithm_ring_buffer) - \subpage defragmentation - \subpage statistics - [Numeric statistics](@ref statistics_numeric_statistics) - [JSON dump](@ref statistics_json_dump) - \subpage allocation_annotation - [Allocation user data](@ref allocation_user_data) - [Allocation names](@ref allocation_names) - \subpage virtual_allocator - \subpage debugging_memory_usage - [Memory initialization](@ref debugging_memory_usage_initialization) - [Margins](@ref debugging_memory_usage_margins) - [Corruption detection](@ref debugging_memory_usage_corruption_detection) - [Leak detection features](@ref debugging_memory_usage_leak_detection) - \subpage other_api_interop - \subpage usage_patterns - [GPU-only resource](@ref usage_patterns_gpu_only) - [Staging copy for upload](@ref usage_patterns_staging_copy_upload) - [Readback](@ref usage_patterns_readback) - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading) - [Other use cases](@ref usage_patterns_other_use_cases) - \subpage configuration - [Pointers to Vulkan functions](@ref config_Vulkan_functions) - [Custom host memory allocator](@ref custom_memory_allocator) - [Device memory allocation callbacks](@ref allocation_callbacks) - [Device heap memory limit](@ref heap_memory_limit) - Extension support - \subpage vk_khr_dedicated_allocation - \subpage enabling_buffer_device_address - \subpage vk_ext_memory_priority - \subpage vk_amd_device_coherent_memory - \subpage vk_khr_external_memory_win32 - \subpage general_considerations - [Thread safety](@ref general_considerations_thread_safety) - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility) - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) - [Allocation algorithm](@ref general_considerations_allocation_algorithm) - [Features not supported](@ref general_considerations_features_not_supported) \defgroup group_init Library initialization \brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object. \defgroup group_alloc Memory allocation \brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images. Most basic ones being: vmaCreateBuffer(), vmaCreateImage(). \defgroup group_virtual Virtual allocator \brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm for user-defined purpose without allocating any real GPU memory. \defgroup group_stats Statistics \brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format. See documentation chapter: \ref statistics. */ #ifdef __cplusplus extern "C" { #endif #if !defined(VULKAN_H_) #include #endif #if !defined(VMA_VULKAN_VERSION) #if defined(VK_VERSION_1_4) #define VMA_VULKAN_VERSION 1004000 #elif defined(VK_VERSION_1_3) #define VMA_VULKAN_VERSION 1003000 #elif defined(VK_VERSION_1_2) #define VMA_VULKAN_VERSION 1002000 #elif defined(VK_VERSION_1_1) #define VMA_VULKAN_VERSION 1001000 #else #define VMA_VULKAN_VERSION 1000000 #endif #endif #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; extern PFN_vkAllocateMemory vkAllocateMemory; extern PFN_vkFreeMemory vkFreeMemory; extern PFN_vkMapMemory vkMapMemory; extern PFN_vkUnmapMemory vkUnmapMemory; extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; extern PFN_vkBindBufferMemory vkBindBufferMemory; extern PFN_vkBindImageMemory vkBindImageMemory; extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; extern PFN_vkCreateBuffer vkCreateBuffer; extern PFN_vkDestroyBuffer vkDestroyBuffer; extern PFN_vkCreateImage vkCreateImage; extern PFN_vkDestroyImage vkDestroyImage; extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; #if VMA_VULKAN_VERSION >= 1001000 extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; extern PFN_vkBindImageMemory2 vkBindImageMemory2; extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; #endif // #if VMA_VULKAN_VERSION >= 1001000 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES #if !defined(VMA_DEDICATED_ALLOCATION) #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation #define VMA_DEDICATED_ALLOCATION 1 #else #define VMA_DEDICATED_ALLOCATION 0 #endif #endif #if !defined(VMA_BIND_MEMORY2) #if VK_KHR_bind_memory2 #define VMA_BIND_MEMORY2 1 #else #define VMA_BIND_MEMORY2 0 #endif #endif #if !defined(VMA_MEMORY_BUDGET) #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) #define VMA_MEMORY_BUDGET 1 #else #define VMA_MEMORY_BUDGET 0 #endif #endif // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers. #if !defined(VMA_BUFFER_DEVICE_ADDRESS) #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000 #define VMA_BUFFER_DEVICE_ADDRESS 1 #else #define VMA_BUFFER_DEVICE_ADDRESS 0 #endif #endif // Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers. #if !defined(VMA_MEMORY_PRIORITY) #if VK_EXT_memory_priority #define VMA_MEMORY_PRIORITY 1 #else #define VMA_MEMORY_PRIORITY 0 #endif #endif // Defined to 1 when VK_KHR_maintenance4 device extension is defined in Vulkan headers. #if !defined(VMA_KHR_MAINTENANCE4) #if VK_KHR_maintenance4 #define VMA_KHR_MAINTENANCE4 1 #else #define VMA_KHR_MAINTENANCE4 0 #endif #endif // Defined to 1 when VK_KHR_maintenance5 device extension is defined in Vulkan headers. #if !defined(VMA_KHR_MAINTENANCE5) #if VK_KHR_maintenance5 #define VMA_KHR_MAINTENANCE5 1 #else #define VMA_KHR_MAINTENANCE5 0 #endif #endif // Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers. #if !defined(VMA_EXTERNAL_MEMORY) #if VK_KHR_external_memory #define VMA_EXTERNAL_MEMORY 1 #else #define VMA_EXTERNAL_MEMORY 0 #endif #endif // Defined to 1 when VK_KHR_external_memory_win32 device extension is defined in Vulkan headers. #if !defined(VMA_EXTERNAL_MEMORY_WIN32) #if VK_KHR_external_memory_win32 #define VMA_EXTERNAL_MEMORY_WIN32 1 #else #define VMA_EXTERNAL_MEMORY_WIN32 0 #endif #endif // Define these macros to decorate all public functions with additional code, // before and after returned type, appropriately. This may be useful for // exporting the functions when compiling VMA as a separate library. Example: // #define VMA_CALL_PRE __declspec(dllexport) // #define VMA_CALL_POST __cdecl #ifndef VMA_CALL_PRE #define VMA_CALL_PRE #endif #ifndef VMA_CALL_POST #define VMA_CALL_POST #endif // Define this macro to decorate pNext pointers with an attribute specifying the Vulkan // structure that will be extended via the pNext chain. #ifndef VMA_EXTENDS_VK_STRUCT #define VMA_EXTENDS_VK_STRUCT(vkStruct) #endif // Define this macro to decorate pointers with an attribute specifying the // length of the array they point to if they are not null. // // The length may be one of // - The name of another parameter in the argument list where the pointer is declared // - The name of another member in the struct where the pointer is declared // - The name of a member of a struct type, meaning the value of that member in // the context of the call. For example // VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"), // this means the number of memory heaps available in the device associated // with the VmaAllocator being dealt with. #ifndef VMA_LEN_IF_NOT_NULL #define VMA_LEN_IF_NOT_NULL(len) #endif // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang. // see: https://clang.llvm.org/docs/AttributeReference.html#nullable #ifndef VMA_NULLABLE #ifdef __clang__ #define VMA_NULLABLE _Nullable #else #define VMA_NULLABLE #endif #endif // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang. // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull #ifndef VMA_NOT_NULL #ifdef __clang__ #define VMA_NOT_NULL _Nonnull #else #define VMA_NOT_NULL #endif #endif // If non-dispatchable handles are represented as pointers then we can give // then nullability annotations #ifndef VMA_NOT_NULL_NON_DISPATCHABLE #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL #else #define VMA_NOT_NULL_NON_DISPATCHABLE #endif #endif #ifndef VMA_NULLABLE_NON_DISPATCHABLE #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE #else #define VMA_NULLABLE_NON_DISPATCHABLE #endif #endif #ifndef VMA_STATS_STRING_ENABLED #define VMA_STATS_STRING_ENABLED 1 #endif //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // // INTERFACE // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE. #ifndef _VMA_ENUM_DECLARATIONS /** \addtogroup group_init @{ */ /// Flags for created #VmaAllocator. typedef enum VmaAllocatorCreateFlagBits { /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. Using this flag may increase performance because internal mutexes are not used. */ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, /** \brief Enables usage of VK_KHR_dedicated_allocation extension. The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. Using this extension will automatically allocate dedicated blocks of memory for some buffers and images instead of suballocating place for them out of bigger memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag) when it is recommended by the driver. It may improve performance on some GPUs. You may set this flag only if you found out that following device extensions are supported, you enabled them while creating Vulkan device passed as VmaAllocatorCreateInfo::device, and you want them to be used internally by this library: - VK_KHR_get_memory_requirements2 (device extension) - VK_KHR_dedicated_allocation (device extension) When this flag is set, you can experience following warnings reported by Vulkan validation layer. You can ignore them. > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. */ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, /** Enables usage of VK_KHR_bind_memory2 extension. The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. You may set this flag only if you found out that this device extension is supported, you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, and you want it to be used internally by this library. The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, which allow to pass a chain of `pNext` structures while binding. This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). */ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, /** Enables usage of VK_EXT_memory_budget extension. You may set this flag only if you found out that this device extension is supported, you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, and you want it to be used internally by this library, along with another instance extension VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). The extension provides query for current memory usage and budget, which will probably be more accurate than an estimation used by the library otherwise. */ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, /** Enables usage of VK_AMD_device_coherent_memory extension. You may set this flag only if you: - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device, - want it to be used internally by this library. The extension and accompanying device feature provide access to memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags. They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR. When the extension is not enabled, such memory types are still enumerated, but their usage is illegal. To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type, returning `VK_ERROR_FEATURE_NOT_PRESENT`. */ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010, /** Enables usage of "buffer device address" feature, which allows you to use function `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader. You may set this flag only if you: 1. (For Vulkan version < 1.2) Found as available and enabled device extension VK_KHR_buffer_device_address. This extension is promoted to core Vulkan 1.2. 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`. When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA. The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to allocated memory blocks wherever it might be needed. For more information, see documentation chapter \ref enabling_buffer_device_address. */ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020, /** Enables usage of VK_EXT_memory_priority extension in the library. You may set this flag only if you found available and enabled this device extension, along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`, while creating Vulkan device passed as VmaAllocatorCreateInfo::device. When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored. A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. Larger values are higher priority. The granularity of the priorities is implementation-dependent. It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`. The value to be used for default priority is 0.5. For more details, see the documentation of the VK_EXT_memory_priority extension. */ VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040, /** Enables usage of VK_KHR_maintenance4 extension in the library. You may set this flag only if you found available and enabled this device extension, while creating Vulkan device passed as VmaAllocatorCreateInfo::device. */ VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT = 0x00000080, /** Enables usage of VK_KHR_maintenance5 extension in the library. You should set this flag if you found available and enabled this device extension, while creating Vulkan device passed as VmaAllocatorCreateInfo::device. */ VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT = 0x00000100, /** Enables usage of VK_KHR_external_memory_win32 extension in the library. You should set this flag if you found available and enabled this device extension, while creating Vulkan device passed as VmaAllocatorCreateInfo::device. For more information, see \ref vk_khr_external_memory_win32. */ VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT = 0x00000200, VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaAllocatorCreateFlagBits; /// See #VmaAllocatorCreateFlagBits. typedef VkFlags VmaAllocatorCreateFlags; /** @} */ /** \addtogroup group_alloc @{ */ /// \brief Intended usage of the allocated memory. typedef enum VmaMemoryUsage { /** No intended memory usage specified. Use other members of VmaAllocationCreateInfo to specify your requirements. */ VMA_MEMORY_USAGE_UNKNOWN = 0, /** \deprecated Obsolete, preserved for backward compatibility. Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. */ VMA_MEMORY_USAGE_GPU_ONLY = 1, /** \deprecated Obsolete, preserved for backward compatibility. Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`. */ VMA_MEMORY_USAGE_CPU_ONLY = 2, /** \deprecated Obsolete, preserved for backward compatibility. Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. */ VMA_MEMORY_USAGE_CPU_TO_GPU = 3, /** \deprecated Obsolete, preserved for backward compatibility. Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. */ VMA_MEMORY_USAGE_GPU_TO_CPU = 4, /** \deprecated Obsolete, preserved for backward compatibility. Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. */ VMA_MEMORY_USAGE_CPU_COPY = 5, /** Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. */ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, /** Selects best memory type automatically. This flag is recommended for most common use cases. When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT in VmaAllocationCreateInfo::flags. It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() and not with generic memory allocation functions. */ VMA_MEMORY_USAGE_AUTO = 7, /** Selects best memory type automatically with preference for GPU (device) memory. When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT in VmaAllocationCreateInfo::flags. It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() and not with generic memory allocation functions. */ VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8, /** Selects best memory type automatically with preference for CPU (host) memory. When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT in VmaAllocationCreateInfo::flags. It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() and not with generic memory allocation functions. */ VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9, VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF } VmaMemoryUsage; /// Flags to be passed as VmaAllocationCreateInfo::flags. typedef enum VmaAllocationCreateFlagBits { /** \brief Set this flag if the allocation should have its own memory block. Use it for special, big resources, like fullscreen images used as attachments. If you use this flag while creating a buffer or an image, `VkMemoryDedicatedAllocateInfo` structure is applied if possible. */ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. If new allocation cannot be placed in any of the existing blocks, allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. */ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. It is valid to use this flag for allocation made from memory type that is not `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is useful if you need an allocation that is efficient to use on GPU (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that support it (e.g. Intel GPU). */ VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead. Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a null-terminated string. Instead of copying pointer value, a local copy of the string is made and stored in allocation's `pName`. The string is automatically freed together with the allocation. It is also used in vmaBuildStatsString(). */ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, /** Allocation will be created from upper stack in a double stack pool. This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. */ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, /** Create both buffer/image and allocation, but don't bind them together. It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). Otherwise it is ignored. If you want to make sure the new buffer/image is not tied to the new memory allocation through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block, use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT. */ VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, /** Create allocation only if additional device memory required for it, if any, won't exceed memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. */ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, /** \brief Set this flag if the allocated memory will have aliasing resources. Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified. Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors. */ VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200, /** Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. This includes allocations created in \ref custom_memory_pools. Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number, never read or accessed randomly, so a memory type can be selected that is uncached and write-combined. \warning Violating this declaration may work correctly, but will likely be very slow. Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;` Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once. */ VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400, /** Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. This includes allocations created in \ref custom_memory_pools. Declares that mapped memory can be read, written, and accessed in random order, so a `HOST_CACHED` memory type is preferred. */ VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800, /** Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT, it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected if it may improve performance. By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and issue an explicit transfer to write/read your data. To prepare for this possibility, don't forget to add appropriate flags like `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image. */ VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000, /** Allocation strategy that chooses smallest possible free range for the allocation to minimize memory usage and fragmentation, possibly at the expense of allocation time. */ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000, /** Allocation strategy that chooses first suitable free range for the allocation - not necessarily in terms of the smallest offset but the one that is easiest and fastest to find to minimize allocation time, possibly at the expense of allocation quality. */ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000, /** Allocation strategy that chooses always the lowest offset in available space. This is not the most efficient strategy but achieves highly packed data. Used internally by defragmentation, not recommended in typical usage. */ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000, /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT. */ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT. */ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, /** A bit mask to extract only `STRATEGY` bits from entire set of flags. */ VMA_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT | VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaAllocationCreateFlagBits; /// See #VmaAllocationCreateFlagBits. typedef VkFlags VmaAllocationCreateFlags; /// Flags to be passed as VmaPoolCreateInfo::flags. typedef enum VmaPoolCreateFlagBits { /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. This is an optional optimization flag. If you always allocate using vmaCreateBuffer(), vmaCreateImage(), vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator knows exact type of your allocations so it can handle Buffer-Image Granularity in the optimal way. If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), exact type of such allocations is not known, so allocator must be conservative in handling Buffer-Image Granularity, which can lead to suboptimal allocation (wasted memory). In that case, if you can make sure you always allocate only buffers and linear images or only optimal images out of this pool, use this flag to make allocator disregard Buffer-Image Granularity and so make allocations faster and more optimal. */ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, /** \brief Enables alternative, linear allocation algorithm in this pool. Specify this flag to enable linear allocation algorithm, which always creates new allocations after last one and doesn't reuse space from allocations freed in between. It trades memory consumption for simplified algorithm and data structure, which has better performance and uses less memory for metadata. By using this flag, you can achieve behavior of free-at-once, stack, ring buffer, and double stack. For details, see documentation chapter \ref linear_algorithm. */ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. */ VMA_POOL_CREATE_ALGORITHM_MASK = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT, VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaPoolCreateFlagBits; /// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits. typedef VkFlags VmaPoolCreateFlags; /// Flags to be passed as VmaDefragmentationInfo::flags. typedef enum VmaDefragmentationFlagBits { /* \brief Use simple but fast algorithm for defragmentation. May not achieve best results but will require least time to compute and least allocations to copy. */ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1, /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified. Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved. */ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2, /* \brief Perform full defragmentation of memory. Can result in notably more time to compute and allocations to copy, but will achieve best memory packing. */ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4, /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make. Only available when bufferImageGranularity is greater than 1, since it aims to reduce alignment issues between different types of resources. Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT. */ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8, /// A bit mask to extract only `ALGORITHM` bits from entire set of flags. VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT | VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT | VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT | VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT, VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaDefragmentationFlagBits; /// See #VmaDefragmentationFlagBits. typedef VkFlags VmaDefragmentationFlags; /// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove. typedef enum VmaDefragmentationMoveOperation { /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass(). VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0, /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged. VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1, /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed. VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2, } VmaDefragmentationMoveOperation; /** @} */ /** \addtogroup group_virtual @{ */ /// Flags to be passed as VmaVirtualBlockCreateInfo::flags. typedef enum VmaVirtualBlockCreateFlagBits { /** \brief Enables alternative, linear allocation algorithm in this virtual block. Specify this flag to enable linear allocation algorithm, which always creates new allocations after last one and doesn't reuse space from allocations freed in between. It trades memory consumption for simplified algorithm and data structure, which has better performance and uses less memory for metadata. By using this flag, you can achieve behavior of free-at-once, stack, ring buffer, and double stack. For details, see documentation chapter \ref linear_algorithm. */ VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001, /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags. */ VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK = VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT, VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaVirtualBlockCreateFlagBits; /// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits. typedef VkFlags VmaVirtualBlockCreateFlags; /// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. typedef enum VmaVirtualAllocationCreateFlagBits { /** \brief Allocation will be created from upper stack in a double stack pool. This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag. */ VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT, /** \brief Allocation strategy that tries to minimize memory usage. */ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, /** \brief Allocation strategy that tries to minimize allocation time. */ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, /** Allocation strategy that chooses always the lowest offset in available space. This is not the most efficient strategy but achieves highly packed data. */ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags. These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits. */ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK, VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaVirtualAllocationCreateFlagBits; /// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits. typedef VkFlags VmaVirtualAllocationCreateFlags; /** @} */ #endif // _VMA_ENUM_DECLARATIONS #ifndef _VMA_DATA_TYPES_DECLARATIONS /** \addtogroup group_init @{ */ /** \struct VmaAllocator \brief Represents main object of this library initialized. Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it. Call function vmaDestroyAllocator() to destroy it. It is recommended to create just one object of this type per `VkDevice` object, right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. */ VK_DEFINE_HANDLE(VmaAllocator) /** @} */ /** \addtogroup group_alloc @{ */ /** \struct VmaPool \brief Represents custom memory pool Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it. Call function vmaDestroyPool() to destroy it. For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). */ VK_DEFINE_HANDLE(VmaPool) /** \struct VmaAllocation \brief Represents single memory allocation. It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type plus unique offset. There are multiple ways to create such object. You need to fill structure VmaAllocationCreateInfo. For more information see [Choosing memory type](@ref choosing_memory_type). Although the library provides convenience functions that create Vulkan buffer or image, allocate memory for it and bind them together, binding of the allocation to a buffer or an image is out of scope of the allocation itself. Allocation object can exist without buffer/image bound, binding can be done manually by the user, and destruction of it can be done independently of destruction of the allocation. The object also remembers its size and some other information. To retrieve this information, use function vmaGetAllocationInfo() and inspect returned structure VmaAllocationInfo. */ VK_DEFINE_HANDLE(VmaAllocation) /** \struct VmaDefragmentationContext \brief An opaque object that represents started defragmentation process. Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it. Call function vmaEndDefragmentation() to destroy it. */ VK_DEFINE_HANDLE(VmaDefragmentationContext) /** @} */ /** \addtogroup group_virtual @{ */ /** \struct VmaVirtualAllocation \brief Represents single memory allocation done inside VmaVirtualBlock. Use it as a unique identifier to virtual allocation within the single block. Use value `VK_NULL_HANDLE` to represent a null/invalid allocation. */ VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation) /** @} */ /** \addtogroup group_virtual @{ */ /** \struct VmaVirtualBlock \brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory. Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it. For more information, see documentation chapter \ref virtual_allocator. This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally. */ VK_DEFINE_HANDLE(VmaVirtualBlock) /** @} */ /** \addtogroup group_init @{ */ /// Callback function called after successful vkAllocateMemory. typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)( VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryType, VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, VkDeviceSize size, void* VMA_NULLABLE pUserData); /// Callback function called before vkFreeMemory. typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)( VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryType, VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, VkDeviceSize size, void* VMA_NULLABLE pUserData); /** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. Provided for informative purpose, e.g. to gather statistics about number of allocations or total amount of memory allocated in Vulkan. Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. */ typedef struct VmaDeviceMemoryCallbacks { /// Optional, can be null. PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate; /// Optional, can be null. PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree; /// Optional, can be null. void* VMA_NULLABLE pUserData; } VmaDeviceMemoryCallbacks; /** \brief Pointers to some Vulkan functions - a subset used by the library. Used in VmaAllocatorCreateInfo::pVulkanFunctions. */ typedef struct VmaVulkanFunctions { /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr; /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr; PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties; PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties; PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory; PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory; PFN_vkMapMemory VMA_NULLABLE vkMapMemory; PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory; PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges; PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges; PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory; PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory; PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements; PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements; PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer; PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer; PFN_vkCreateImage VMA_NULLABLE vkCreateImage; PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage; PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer; #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR; /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR; #endif #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension. PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR; /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension. PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR; #endif #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 /// Fetch from "vkGetPhysicalDeviceMemoryProperties2" on Vulkan >= 1.1, but you can also fetch it from "vkGetPhysicalDeviceMemoryProperties2KHR" if you enabled extension VK_KHR_get_physical_device_properties2. PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR; #endif #if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. PFN_vkGetDeviceBufferMemoryRequirementsKHR VMA_NULLABLE vkGetDeviceBufferMemoryRequirements; /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. PFN_vkGetDeviceImageMemoryRequirementsKHR VMA_NULLABLE vkGetDeviceImageMemoryRequirements; #endif #if VMA_EXTERNAL_MEMORY_WIN32 PFN_vkGetMemoryWin32HandleKHR VMA_NULLABLE vkGetMemoryWin32HandleKHR; #else void* VMA_NULLABLE vkGetMemoryWin32HandleKHR; #endif } VmaVulkanFunctions; /// Description of a Allocator to be created. typedef struct VmaAllocatorCreateInfo { /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. VmaAllocatorCreateFlags flags; /// Vulkan physical device. /** It must be valid throughout whole lifetime of created allocator. */ VkPhysicalDevice VMA_NOT_NULL physicalDevice; /// Vulkan device. /** It must be valid throughout whole lifetime of created allocator. */ VkDevice VMA_NOT_NULL device; /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. /** Set to 0 to use default, which is currently 256 MiB. */ VkDeviceSize preferredLargeHeapBlockSize; /// Custom CPU memory allocation callbacks. Optional. /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. /** Optional, can be null. */ const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks; /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on maximum number of bytes that can be allocated out of particular Vulkan memory heap. Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that heap. This is also the default in case of `pHeapSizeLimit` = NULL. If there is a limit defined for a heap: - If user tries to allocate more memory from that heap using this allocator, the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the value of this limit will be reported instead when using vmaGetMemoryProperties(). Warning! Using this feature may not be equivalent to installing a GPU with smaller amount of memory, because graphics driver doesn't necessary fail new allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is exceeded. It may return success and just silently migrate some device memory blocks to system RAM. This driver behavior can also be controlled using VK_AMD_memory_overallocation_behavior extension. */ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit; /** \brief Pointers to Vulkan functions. Can be null. For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions). */ const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions; /** \brief Handle to Vulkan instance object. Starting from version 3.0.0 this member is no longer optional, it must be set! */ VkInstance VMA_NOT_NULL instance; /** \brief Optional. Vulkan version that the application uses. It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. The patch version number specified is ignored. Only the major and minor versions are considered. Only versions 1.0...1.4 are supported by the current implementation. Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. It must match the Vulkan version used by the application and supported on the selected physical device, so it must be no higher than `VkApplicationInfo::apiVersion` passed to `vkCreateInstance` and no higher than `VkPhysicalDeviceProperties::apiVersion` found on the physical device used. */ uint32_t vulkanApiVersion; #if VMA_EXTERNAL_MEMORY /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type. If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount` elements, defining external memory handle types of particular Vulkan memory type, to be passed using `VkExportMemoryAllocateInfoKHR`. Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type. This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL. */ const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes; #endif // #if VMA_EXTERNAL_MEMORY } VmaAllocatorCreateInfo; /// Information about existing #VmaAllocator object. typedef struct VmaAllocatorInfo { /** \brief Handle to Vulkan instance object. This is the same value as has been passed through VmaAllocatorCreateInfo::instance. */ VkInstance VMA_NOT_NULL instance; /** \brief Handle to Vulkan physical device object. This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice. */ VkPhysicalDevice VMA_NOT_NULL physicalDevice; /** \brief Handle to Vulkan device object. This is the same value as has been passed through VmaAllocatorCreateInfo::device. */ VkDevice VMA_NOT_NULL device; } VmaAllocatorInfo; /** @} */ /** \addtogroup group_stats @{ */ /** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total. These are fast to calculate. See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics(). */ typedef struct VmaStatistics { /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated. */ uint32_t blockCount; /** \brief Number of #VmaAllocation objects allocated. Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`. */ uint32_t allocationCount; /** \brief Number of bytes allocated in `VkDeviceMemory` blocks. \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image. */ VkDeviceSize blockBytes; /** \brief Total number of bytes occupied by all #VmaAllocation objects. Always less or equal than `blockBytes`. Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan but unused by any #VmaAllocation. */ VkDeviceSize allocationBytes; } VmaStatistics; /** \brief More detailed statistics than #VmaStatistics. These are slower to calculate. Use for debugging purposes. See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics(). Previous version of the statistics API provided averages, but they have been removed because they can be easily calculated as: \code VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount; VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes; VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount; \endcode */ typedef struct VmaDetailedStatistics { /// Basic statistics. VmaStatistics statistics; /// Number of free ranges of memory between allocations. uint32_t unusedRangeCount; /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations. VkDeviceSize allocationSizeMin; /// Largest allocation size. 0 if there are 0 allocations. VkDeviceSize allocationSizeMax; /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges. VkDeviceSize unusedRangeSizeMin; /// Largest empty range size. 0 if there are 0 empty ranges. VkDeviceSize unusedRangeSizeMax; } VmaDetailedStatistics; /** \brief General statistics from current state of the Allocator - total memory usage across all memory heaps and types. These are slower to calculate. Use for debugging purposes. See function vmaCalculateStatistics(). */ typedef struct VmaTotalStatistics { VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES]; VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS]; VmaDetailedStatistics total; } VmaTotalStatistics; /** \brief Statistics of current memory usage and available budget for a specific memory heap. These are fast to calculate. See function vmaGetHeapBudgets(). */ typedef struct VmaBudget { /** \brief Statistics fetched from the library. */ VmaStatistics statistics; /** \brief Estimated current memory usage of the program, in bytes. Fetched from system using VK_EXT_memory_budget extension if enabled. It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or `VkDeviceMemory` blocks allocated outside of this library, if any. */ VkDeviceSize usage; /** \brief Estimated amount of memory available to the program, in bytes. Fetched from system using VK_EXT_memory_budget extension if enabled. It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors external to the program, decided by the operating system. Difference `budget - usage` is the amount of additional memory that can probably be allocated without problems. Exceeding the budget may result in various problems. */ VkDeviceSize budget; } VmaBudget; /** @} */ /** \addtogroup group_alloc @{ */ /** \brief Parameters of new #VmaAllocation. To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others. */ typedef struct VmaAllocationCreateInfo { /// Use #VmaAllocationCreateFlagBits enum. VmaAllocationCreateFlags flags; /** \brief Intended usage of memory. You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n If `pool` is not null, this member is ignored. */ VmaMemoryUsage usage; /** \brief Flags that must be set in a Memory Type chosen for an allocation. Leave 0 if you specify memory requirements in other way. \n If `pool` is not null, this member is ignored.*/ VkMemoryPropertyFlags requiredFlags; /** \brief Flags that preferably should be set in a memory type chosen for an allocation. Set to 0 if no additional flags are preferred. \n If `pool` is not null, this member is ignored. */ VkMemoryPropertyFlags preferredFlags; /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if it meets other requirements specified by this structure, with no further restrictions on memory type index. \n If `pool` is not null, this member is ignored. */ uint32_t memoryTypeBits; /** \brief Pool that this allocation should be created in. Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. */ VmaPool VMA_NULLABLE pool; /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either null or pointer to a null-terminated string. The string will be then copied to internal buffer, so it doesn't need to be valid after allocation call. */ void* VMA_NULLABLE pUserData; /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. Otherwise, it has the priority of a memory block where it is placed and this variable is ignored. */ float priority; } VmaAllocationCreateInfo; /// Describes parameter of created #VmaPool. typedef struct VmaPoolCreateInfo { /** \brief Vulkan memory type index to allocate this pool from. */ uint32_t memoryTypeIndex; /** \brief Use combination of #VmaPoolCreateFlagBits. */ VmaPoolCreateFlags flags; /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. Specify nonzero to set explicit, constant size of memory blocks used by this pool. Leave 0 to use default and let the library manage block sizes automatically. Sizes of particular blocks may vary. In this case, the pool will also support dedicated allocations. */ VkDeviceSize blockSize; /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. Set to 0 to have no preallocated blocks and allow the pool be completely empty. */ size_t minBlockCount; /** \brief Maximum number of blocks that can be allocated in this pool. Optional. Set to 0 to use default, which is `SIZE_MAX`, which means no limit. Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated throughout whole lifetime of this pool. */ size_t maxBlockCount; /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations. It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object. Otherwise, this variable is ignored. */ float priority; /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0. Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two. It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough, e.g. when doing interop with OpenGL. */ VkDeviceSize minAllocationAlignment; /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional. Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`. It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`. Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool. Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`, can be attached automatically by this library when using other, more convenient of its features. */ void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext; } VmaPoolCreateInfo; /** @} */ /** \addtogroup group_alloc @{ */ /** Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). There is also an extended version of this structure that carries additional parameters: #VmaAllocationInfo2. */ typedef struct VmaAllocationInfo { /** \brief Memory type index that this allocation was allocated from. It never changes. */ uint32_t memoryType; /** \brief Handle to Vulkan memory object. Same memory object can be shared by multiple allocations. It can change after the allocation is moved during \ref defragmentation. */ VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory; /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation. You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image, not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation and apply this offset automatically. It can change after the allocation is moved during \ref defragmentation. */ VkDeviceSize offset; /** \brief Size of this allocation, in bytes. It never changes. \note Allocation size returned in this variable may be greater than the size requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the allocation is accessible for operations on memory e.g. using a pointer after mapping with vmaMapMemory(), but operations on the resource e.g. using `vkCmdCopyBuffer` must be limited to the size of the resource. */ VkDeviceSize size; /** \brief Pointer to the beginning of this allocation as mapped data. If the allocation hasn't been mapped using vmaMapMemory() and hasn't been created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null. It can change after call to vmaMapMemory(), vmaUnmapMemory(). It can also change after the allocation is moved during \ref defragmentation. */ void* VMA_NULLABLE pMappedData; /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). It can change after call to vmaSetAllocationUserData() for this allocation. */ void* VMA_NULLABLE pUserData; /** \brief Custom allocation name that was set with vmaSetAllocationName(). It can change after call to vmaSetAllocationName() for this allocation. Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED]. */ const char* VMA_NULLABLE pName; } VmaAllocationInfo; /// Extended parameters of a #VmaAllocation object that can be retrieved using function vmaGetAllocationInfo2(). typedef struct VmaAllocationInfo2 { /** \brief Basic parameters of the allocation. If you need only these, you can use function vmaGetAllocationInfo() and structure #VmaAllocationInfo instead. */ VmaAllocationInfo allocationInfo; /** \brief Size of the `VkDeviceMemory` block that the allocation belongs to. In case of an allocation with dedicated memory, it will be equal to `allocationInfo.size`. */ VkDeviceSize blockSize; /** \brief `VK_TRUE` if the allocation has dedicated memory, `VK_FALSE` if it was placed as part of a larger memory block. When `VK_TRUE`, it also means `VkMemoryDedicatedAllocateInfo` was used when creating the allocation (if VK_KHR_dedicated_allocation extension or Vulkan version >= 1.1 is enabled). */ VkBool32 dedicatedMemory; } VmaAllocationInfo2; /** Callback function called during vmaBeginDefragmentation() to check custom criterion about ending current defragmentation pass. Should return true if the defragmentation needs to stop current pass. */ typedef VkBool32 (VKAPI_PTR* PFN_vmaCheckDefragmentationBreakFunction)(void* VMA_NULLABLE pUserData); /** \brief Parameters for defragmentation. To be used with function vmaBeginDefragmentation(). */ typedef struct VmaDefragmentationInfo { /// \brief Use combination of #VmaDefragmentationFlagBits. VmaDefragmentationFlags flags; /** \brief Custom pool to be defragmented. If null then default pools will undergo defragmentation process. */ VmaPool VMA_NULLABLE pool; /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places. `0` means no limit. */ VkDeviceSize maxBytesPerPass; /** \brief Maximum number of allocations that can be moved during single pass to a different place. `0` means no limit. */ uint32_t maxAllocationsPerPass; /** \brief Optional custom callback for stopping vmaBeginDefragmentation(). Have to return true for breaking current defragmentation pass. */ PFN_vmaCheckDefragmentationBreakFunction VMA_NULLABLE pfnBreakCallback; /// \brief Optional data to pass to custom callback for stopping pass of defragmentation. void* VMA_NULLABLE pBreakCallbackUserData; } VmaDefragmentationInfo; /// Single move of an allocation to be done for defragmentation. typedef struct VmaDefragmentationMove { /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it. VmaDefragmentationMoveOperation operation; /// Allocation that should be moved. VmaAllocation VMA_NOT_NULL srcAllocation; /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`. \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass, to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory(). vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory. */ VmaAllocation VMA_NOT_NULL dstTmpAllocation; } VmaDefragmentationMove; /** \brief Parameters for incremental defragmentation steps. To be used with function vmaBeginDefragmentationPass(). */ typedef struct VmaDefragmentationPassMoveInfo { /// Number of elements in the `pMoves` array. uint32_t moveCount; /** \brief Array of moves to be performed by the user in the current defragmentation pass. Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass(). For each element, you should: 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset. 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`. 3. Make sure these commands finished executing on the GPU. 4. Destroy the old buffer/image. Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass(). After this call, the allocation will point to the new place in memory. Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. Alternatively, if you decide you want to completely remove the allocation: 1. Destroy its buffer/image. 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. Then, after vmaEndDefragmentationPass() the allocation will be freed. */ VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves; } VmaDefragmentationPassMoveInfo; /// Statistics returned for defragmentation process in function vmaEndDefragmentation(). typedef struct VmaDefragmentationStats { /// Total number of bytes that have been copied while moving allocations to different places. VkDeviceSize bytesMoved; /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects. VkDeviceSize bytesFreed; /// Number of allocations that have been moved to different places. uint32_t allocationsMoved; /// Number of empty `VkDeviceMemory` objects that have been released to the system. uint32_t deviceMemoryBlocksFreed; } VmaDefragmentationStats; /** @} */ /** \addtogroup group_virtual @{ */ /// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock(). typedef struct VmaVirtualBlockCreateInfo { /** \brief Total size of the virtual block. Sizes can be expressed in bytes or any units you want as long as you are consistent in using them. For example, if you allocate from some array of structures, 1 can mean single instance of entire structure. */ VkDeviceSize size; /** \brief Use combination of #VmaVirtualBlockCreateFlagBits. */ VmaVirtualBlockCreateFlags flags; /** \brief Custom CPU memory allocation callbacks. Optional. Optional, can be null. When specified, they will be used for all CPU-side memory allocations. */ const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; } VmaVirtualBlockCreateInfo; /// Parameters of created virtual allocation to be passed to vmaVirtualAllocate(). typedef struct VmaVirtualAllocationCreateInfo { /** \brief Size of the allocation. Cannot be zero. */ VkDeviceSize size; /** \brief Required alignment of the allocation. Optional. Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset. */ VkDeviceSize alignment; /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits. */ VmaVirtualAllocationCreateFlags flags; /** \brief Custom pointer to be associated with the allocation. Optional. It can be any value and can be used for user-defined purposes. It can be fetched or changed later. */ void* VMA_NULLABLE pUserData; } VmaVirtualAllocationCreateInfo; /// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo(). typedef struct VmaVirtualAllocationInfo { /** \brief Offset of the allocation. Offset at which the allocation was made. */ VkDeviceSize offset; /** \brief Size of the allocation. Same value as passed in VmaVirtualAllocationCreateInfo::size. */ VkDeviceSize size; /** \brief Custom pointer associated with the allocation. Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData(). */ void* VMA_NULLABLE pUserData; } VmaVirtualAllocationInfo; /** @} */ #endif // _VMA_DATA_TYPES_DECLARATIONS #ifndef _VMA_FUNCTION_HEADERS /** \addtogroup group_init @{ */ /// Creates #VmaAllocator object. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo, VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator); /// Destroys allocator object. VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( VmaAllocator VMA_NULLABLE allocator); /** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc. It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to `VkPhysicalDevice`, `VkDevice` etc. every time using this function. */ VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo( VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo); /** PhysicalDeviceProperties are fetched from physicalDevice by the allocator. You can access it here, without fetching it again on your own. */ VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( VmaAllocator VMA_NOT_NULL allocator, const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties); /** PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. You can access it here, without fetching it again on your own. */ VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( VmaAllocator VMA_NOT_NULL allocator, const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties); /** \brief Given Memory Type Index, returns Property Flags of this memory type. This is just a convenience function. Same information can be obtained using vmaGetMemoryProperties(). */ VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); /** \brief Sets index of the current frame. */ VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( VmaAllocator VMA_NOT_NULL allocator, uint32_t frameIndex); /** @} */ /** \addtogroup group_stats @{ */ /** \brief Retrieves statistics from current state of the Allocator. This function is called "calculate" not "get" because it has to traverse all internal data structures, so it may be quite slow. Use it for debugging purposes. For faster but more brief statistics suitable to be called every frame or every allocation, use vmaGetHeapBudgets(). Note that when using allocator from multiple threads, returned information may immediately become outdated. */ VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( VmaAllocator VMA_NOT_NULL allocator, VmaTotalStatistics* VMA_NOT_NULL pStats); /** \brief Retrieves information about current memory usage and budget for all memory heaps. \param allocator \param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used. This function is called "get" not "calculate" because it is very fast, suitable to be called every frame or every allocation. For more detailed statistics use vmaCalculateStatistics(). Note that when using allocator from multiple threads, returned information may immediately become outdated. */ VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( VmaAllocator VMA_NOT_NULL allocator, VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets); /** @} */ /** \addtogroup group_alloc @{ */ /** \brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. This algorithm tries to find a memory type that: - Is allowed by memoryTypeBits. - Contains all the flags from pAllocationCreateInfo->requiredFlags. - Matches intended usage. - Has as many flags from pAllocationCreateInfo->preferredFlags as possible. \return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result from this function or any other allocating function probably means that your device doesn't support any memory type with requested features for the specific type of resource you want to use it for. Please check parameters of your resource, like image layout (OPTIMAL versus LINEAR) or mip level count. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, uint32_t* VMA_NOT_NULL pMemoryTypeIndex); /** \brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. It internally creates a temporary, dummy buffer that never has memory bound. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( VmaAllocator VMA_NOT_NULL allocator, const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, uint32_t* VMA_NOT_NULL pMemoryTypeIndex); /** \brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. It internally creates a temporary, dummy image that never has memory bound. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( VmaAllocator VMA_NOT_NULL allocator, const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, uint32_t* VMA_NOT_NULL pMemoryTypeIndex); /** \brief Allocates Vulkan device memory and creates #VmaPool object. \param allocator Allocator object. \param pCreateInfo Parameters of pool to create. \param[out] pPool Handle to created pool. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( VmaAllocator VMA_NOT_NULL allocator, const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo, VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool); /** \brief Destroys #VmaPool object and frees Vulkan device memory. */ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NULLABLE pool); /** @} */ /** \addtogroup group_stats @{ */ /** \brief Retrieves statistics of existing #VmaPool object. \param allocator Allocator object. \param pool Pool object. \param[out] pPoolStats Statistics of specified pool. Note that when using the pool from multiple threads, returned information may immediately become outdated. */ VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool, VmaStatistics* VMA_NOT_NULL pPoolStats); /** \brief Retrieves detailed statistics of existing #VmaPool object. \param allocator Allocator object. \param pool Pool object. \param[out] pPoolStats Statistics of specified pool. */ VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool, VmaDetailedStatistics* VMA_NOT_NULL pPoolStats); /** @} */ /** \addtogroup group_alloc @{ */ /** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, `VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). Possible return values: - `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. - `VK_SUCCESS` - corruption detection has been performed and succeeded. - `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. `VMA_ASSERT` is also fired in that case. - Other value: Error returned by Vulkan, e.g. memory mapping failure. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption( VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool); /** \brief Retrieves name of a custom pool. After the call `ppName` is either null or points to an internally-owned null-terminated string containing name of the pool that was previously set. The pointer becomes invalid when the pool is destroyed or its name is changed using vmaSetPoolName(). */ VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool, const char* VMA_NULLABLE* VMA_NOT_NULL ppName); /** \brief Sets name of a custom pool. `pName` can be either null or pointer to a null-terminated string with new name for the pool. Function makes internal copy of the string, so it can be changed or freed immediately after this call. */ VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool, const char* VMA_NULLABLE pName); /** \brief General purpose memory allocation. \param allocator \param pVkMemoryRequirements \param pCreateInfo \param[out] pAllocation Handle to allocated memory. \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), vmaCreateBuffer(), vmaCreateImage() instead whenever possible. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( VmaAllocator VMA_NOT_NULL allocator, const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements, const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); /** \brief General purpose memory allocation for multiple allocation objects at once. \param allocator Allocator object. \param pVkMemoryRequirements Memory requirements for each allocation. \param pCreateInfo Creation parameters for each allocation. \param allocationCount Number of allocations to make. \param[out] pAllocations Pointer to array that will be filled with handles to created allocations. \param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. It is just a general purpose allocation function able to make multiple allocations at once. It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. All allocations are made using same parameters. All of them are created out of the same memory pool and type. If any allocation fails, all allocations already made within this function call are also freed, so that when returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( VmaAllocator VMA_NOT_NULL allocator, const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements, const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo, size_t allocationCount, VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo); /** \brief Allocates memory suitable for given `VkBuffer`. \param allocator \param buffer \param pCreateInfo \param[out] pAllocation Handle to allocated memory. \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory(). This is a special-purpose function. In most cases you should use vmaCreateBuffer(). You must free the allocation using vmaFreeMemory() when no longer needed. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( VmaAllocator VMA_NOT_NULL allocator, VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); /** \brief Allocates memory suitable for given `VkImage`. \param allocator \param image \param pCreateInfo \param[out] pAllocation Handle to allocated memory. \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory(). This is a special-purpose function. In most cases you should use vmaCreateImage(). You must free the allocation using vmaFreeMemory() when no longer needed. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( VmaAllocator VMA_NOT_NULL allocator, VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); /** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. */ VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( VmaAllocator VMA_NOT_NULL allocator, const VmaAllocation VMA_NULLABLE allocation); /** \brief Frees memory and destroys multiple allocations. Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), vmaAllocateMemoryPages() and other functions. It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. Allocations in `pAllocations` array can come from any memory pools and types. Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. */ VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( VmaAllocator VMA_NOT_NULL allocator, size_t allocationCount, const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations); /** \brief Returns current information about specified allocation. Current parameters of given allocation are returned in `pAllocationInfo`. Although this function doesn't lock any mutex, so it should be quite efficient, you should avoid calling it too often. You can retrieve same VmaAllocationInfo structure while creating your resource, from function vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change (e.g. due to defragmentation). There is also a new function vmaGetAllocationInfo2() that offers extended information about the allocation, returned using new structure #VmaAllocationInfo2. */ VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo); /** \brief Returns extended information about specified allocation. Current parameters of given allocation are returned in `pAllocationInfo`. Extended parameters in structure #VmaAllocationInfo2 include memory block size and a flag telling whether the allocation has dedicated memory. It can be useful e.g. for interop with OpenGL. */ VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo2( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VmaAllocationInfo2* VMA_NOT_NULL pAllocationInfo); /** \brief Sets pUserData in given allocation to new value. The value of pointer `pUserData` is copied to allocation's `pUserData`. It is opaque, so you can use it however you want - e.g. as a pointer, ordinal number or some handle to you own data. */ VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, void* VMA_NULLABLE pUserData); /** \brief Sets pName in given allocation to new value. `pName` must be either null, or pointer to a null-terminated string. The function makes local copy of the string and sets it as allocation's `pName`. String passed as pName doesn't need to be valid for whole lifetime of the allocation - you can free it after this call. String previously pointed by allocation's `pName` is freed from memory. */ VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, const char* VMA_NULLABLE pName); /** \brief Given an allocation, returns Property Flags of its memory type. This is just a convenience function. Same information can be obtained using vmaGetAllocationInfo() + vmaGetMemoryProperties(). */ VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); #if VMA_EXTERNAL_MEMORY_WIN32 /** \brief Given an allocation, returns Win32 handle that may be imported by other processes or APIs. \param hTargetProcess Must be a valid handle to target process or null. If it's null, the function returns handle for the current process. \param[out] pHandle Output parameter that returns the handle. The function fills `pHandle` with handle that can be used in target process. The handle is fetched using function `vkGetMemoryWin32HandleKHR`. When no longer needed, you must close it using: \code CloseHandle(handle); \endcode You can close it any time, before or after destroying the allocation object. It is reference-counted internally by Windows. Note the handle is returned for the entire `VkDeviceMemory` block that the allocation belongs to. If the allocation is sub-allocated from a larger block, you may need to consider the offset of the allocation (VmaAllocationInfo::offset). If the function fails with `VK_ERROR_FEATURE_NOT_PRESENT` error code, please double-check that VmaVulkanFunctions::vkGetMemoryWin32HandleKHR function pointer is set, e.g. either by using `VMA_DYNAMIC_VULKAN_FUNCTIONS` or by manually passing it through VmaAllocatorCreateInfo::pVulkanFunctions. For more information, see chapter \ref vk_khr_external_memory_win32. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaGetMemoryWin32Handle(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, HANDLE hTargetProcess, HANDLE* VMA_NOT_NULL pHandle); #endif // VMA_EXTERNAL_MEMORY_WIN32 /** \brief Maps memory represented by given allocation and returns pointer to it. Maps memory represented by given allocation to make it accessible to CPU code. When succeeded, `*ppData` contains pointer to first byte of this memory. \warning If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is correctly offsetted to the beginning of region assigned to this particular allocation. Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block. You should not add VmaAllocationInfo::offset to it! Mapping is internally reference-counted and synchronized, so despite raw Vulkan function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory` multiple times simultaneously, it is safe to call this function on allocations assigned to the same memory block. Actual Vulkan memory will be mapped on first mapping and unmapped on last unmapping. If the function succeeded, you must call vmaUnmapMemory() to unmap the allocation when mapping is no longer needed or before freeing the allocation, at the latest. It also safe to call this function multiple times on the same allocation. You must call vmaUnmapMemory() same number of times as you called vmaMapMemory(). It is also safe to call this function on allocation created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. You must still call vmaUnmapMemory() same number of times as you called vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the "0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. This function fails when used on allocation made in memory type that is not `HOST_VISIBLE`. This function doesn't automatically flush or invalidate caches. If the allocation is made from a memory types that is not `HOST_COHERENT`, you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, void* VMA_NULLABLE* VMA_NOT_NULL ppData); /** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). For details, see description of vmaMapMemory(). This function doesn't automatically flush or invalidate caches. If the allocation is made from a memory types that is not `HOST_COHERENT`, you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. */ VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation); /** \brief Flushes memory of given allocation. Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. Unmap operation doesn't do that automatically. - `offset` must be relative to the beginning of allocation. - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. - `offset` and `size` don't have to be aligned. They are internally rounded down/up to multiply of `nonCoherentAtomSize`. - If `size` is 0, this call is ignored. - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, this call is ignored. Warning! `offset` and `size` are relative to the contents of given `allocation`. If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. Do not pass allocation's offset as `offset`!!! This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is called, otherwise `VK_SUCCESS`. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize offset, VkDeviceSize size); /** \brief Invalidates memory of given allocation. Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. Map operation doesn't do that automatically. - `offset` must be relative to the beginning of allocation. - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. - `offset` and `size` don't have to be aligned. They are internally rounded down/up to multiply of `nonCoherentAtomSize`. - If `size` is 0, this call is ignored. - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, this call is ignored. Warning! `offset` and `size` are relative to the contents of given `allocation`. If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. Do not pass allocation's offset as `offset`!!! This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is called, otherwise `VK_SUCCESS`. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize offset, VkDeviceSize size); /** \brief Flushes memory of given set of allocations. Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations. For more information, see documentation of vmaFlushAllocation(). \param allocator \param allocationCount \param allocations \param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all offsets are zero. \param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is called, otherwise `VK_SUCCESS`. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( VmaAllocator VMA_NOT_NULL allocator, uint32_t allocationCount, const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); /** \brief Invalidates memory of given set of allocations. Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations. For more information, see documentation of vmaInvalidateAllocation(). \param allocator \param allocationCount \param allocations \param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all offsets are zero. \param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is called, otherwise `VK_SUCCESS`. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( VmaAllocator VMA_NOT_NULL allocator, uint32_t allocationCount, const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); /** \brief Maps the allocation temporarily if needed, copies data from specified host pointer to it, and flushes the memory from the host caches if needed. \param allocator \param pSrcHostPointer Pointer to the host data that become source of the copy. \param dstAllocation Handle to the allocation that becomes destination of the copy. \param dstAllocationLocalOffset Offset within `dstAllocation` where to write copied data, in bytes. \param size Number of bytes to copy. This is a convenience function that allows to copy data from a host pointer to an allocation easily. Same behavior can be achieved by calling vmaMapMemory(), `memcpy()`, vmaUnmapMemory(), vmaFlushAllocation(). This function can be called only for allocations created in a memory type that has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. It can be ensured e.g. by using #VMA_MEMORY_USAGE_AUTO and #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. Otherwise, the function will fail and generate a Validation Layers error. `dstAllocationLocalOffset` is relative to the contents of given `dstAllocation`. If you mean whole allocation, you should pass 0. Do not pass allocation's offset within device memory block this parameter! */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyMemoryToAllocation( VmaAllocator VMA_NOT_NULL allocator, const void* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(size) pSrcHostPointer, VmaAllocation VMA_NOT_NULL dstAllocation, VkDeviceSize dstAllocationLocalOffset, VkDeviceSize size); /** \brief Invalidates memory in the host caches if needed, maps the allocation temporarily if needed, and copies data from it to a specified host pointer. \param allocator \param srcAllocation Handle to the allocation that becomes source of the copy. \param srcAllocationLocalOffset Offset within `srcAllocation` where to read copied data, in bytes. \param pDstHostPointer Pointer to the host memory that become destination of the copy. \param size Number of bytes to copy. This is a convenience function that allows to copy data from an allocation to a host pointer easily. Same behavior can be achieved by calling vmaInvalidateAllocation(), vmaMapMemory(), `memcpy()`, vmaUnmapMemory(). This function should be called only for allocations created in a memory type that has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT` flag. It can be ensured e.g. by using #VMA_MEMORY_USAGE_AUTO and #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. Otherwise, the function may fail and generate a Validation Layers error. It may also work very slowly when reading from an uncached memory. `srcAllocationLocalOffset` is relative to the contents of given `srcAllocation`. If you mean whole allocation, you should pass 0. Do not pass allocation's offset within device memory block as this parameter! */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyAllocationToMemory( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL srcAllocation, VkDeviceSize srcAllocationLocalOffset, void* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(size) pDstHostPointer, VkDeviceSize size); /** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. \param allocator \param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, `VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). Possible return values: - `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. - `VK_SUCCESS` - corruption detection has been performed and succeeded. - `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. `VMA_ASSERT` is also fired in that case. - Other value: Error returned by Vulkan, e.g. memory mapping failure. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits); /** \brief Begins defragmentation process. \param allocator Allocator object. \param pInfo Structure filled with parameters of defragmentation. \param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation. \returns - `VK_SUCCESS` if defragmentation can begin. - `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported. For more information about defragmentation, see documentation chapter: [Defragmentation](@ref defragmentation). */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( VmaAllocator VMA_NOT_NULL allocator, const VmaDefragmentationInfo* VMA_NOT_NULL pInfo, VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext); /** \brief Ends defragmentation process. \param allocator Allocator object. \param context Context object that has been created by vmaBeginDefragmentation(). \param[out] pStats Optional stats for the defragmentation. Can be null. Use this function to finish defragmentation started by vmaBeginDefragmentation(). */ VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( VmaAllocator VMA_NOT_NULL allocator, VmaDefragmentationContext VMA_NOT_NULL context, VmaDefragmentationStats* VMA_NULLABLE pStats); /** \brief Starts single defragmentation pass. \param allocator Allocator object. \param context Context object that has been created by vmaBeginDefragmentation(). \param[out] pPassInfo Computed information for current pass. \returns - `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation. - `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(), and then preferably try another pass with vmaBeginDefragmentationPass(). */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( VmaAllocator VMA_NOT_NULL allocator, VmaDefragmentationContext VMA_NOT_NULL context, VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); /** \brief Ends single defragmentation pass. \param allocator Allocator object. \param context Context object that has been created by vmaBeginDefragmentation(). \param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you. Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible. Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`. After this call: - Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY (which is the default) will be pointing to the new destination place. - Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY will be freed. If no more moves are possible you can end whole defragmentation. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( VmaAllocator VMA_NOT_NULL allocator, VmaDefragmentationContext VMA_NOT_NULL context, VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); /** \brief Binds buffer to allocation. Binds specified buffer to region of memory represented by specified allocation. Gets `VkDeviceMemory` handle and offset from the allocation. If you want to create a buffer, allocate memory for it and bind them together separately, you should use this function for binding instead of standard `vkBindBufferMemory()`, because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously (which is illegal in Vulkan). It is recommended to use function vmaCreateBuffer() instead of this one. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer); /** \brief Binds buffer to allocation with additional parameters. \param allocator \param allocation \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. \param buffer \param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. This function is similar to vmaBindBufferMemory(), but it provides additional parameters. If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize allocationLocalOffset, VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindBufferMemoryInfoKHR) pNext); /** \brief Binds image to allocation. Binds specified image to region of memory represented by specified allocation. Gets `VkDeviceMemory` handle and offset from the allocation. If you want to create an image, allocate memory for it and bind them together separately, you should use this function for binding instead of standard `vkBindImageMemory()`, because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously (which is illegal in Vulkan). It is recommended to use function vmaCreateImage() instead of this one. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkImage VMA_NOT_NULL_NON_DISPATCHABLE image); /** \brief Binds image to allocation with additional parameters. \param allocator \param allocation \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. \param image \param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. This function is similar to vmaBindImageMemory(), but it provides additional parameters. If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize allocationLocalOffset, VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindImageMemoryInfoKHR) pNext); /** \brief Creates a new `VkBuffer`, allocates and binds memory for it. \param allocator \param pBufferCreateInfo \param pAllocationCreateInfo \param[out] pBuffer Buffer that was created. \param[out] pAllocation Allocation that was created. \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). This function automatically: -# Creates buffer. -# Allocates appropriate memory for it. -# Binds the buffer with the memory. If any of these operations fail, buffer and allocation are not created, returned value is negative error code, `*pBuffer` and `*pAllocation` are null. If the function succeeded, you must destroy both buffer and allocation when you no longer need them using either convenience function vmaDestroyBuffer() or separately, using `vkDestroyBuffer()` and vmaFreeMemory(). If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, VK_KHR_dedicated_allocation extension is used internally to query driver whether it requires or prefers the new buffer to have dedicated allocation. If yes, and if dedicated allocation is possible (#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated allocation for this buffer, just like when using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. \note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer, although recommended as a good practice, is out of scope of this library and could be implemented by the user as a higher-level logic on top of VMA. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( VmaAllocator VMA_NOT_NULL allocator, const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); /** \brief Creates a buffer with additional minimum alignment. Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom, minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g. for interop with OpenGL. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( VmaAllocator VMA_NOT_NULL allocator, const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, VkDeviceSize minAlignment, VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); /** \brief Creates a new `VkBuffer`, binds already created memory for it. \param allocator \param allocation Allocation that provides memory to be used for binding new buffer to it. \param pBufferCreateInfo \param[out] pBuffer Buffer that was created. This function automatically: -# Creates buffer. -# Binds the buffer with the supplied memory. If any of these operations fail, buffer is not created, returned value is negative error code and `*pBuffer` is null. If the function succeeded, you must destroy the buffer when you no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding allocation you can use convenience function vmaDestroyBuffer(). \note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2(). */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); /** \brief Creates a new `VkBuffer`, binds already created memory for it. \param allocator \param allocation Allocation that provides memory to be used for binding new buffer to it. \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0. \param pBufferCreateInfo \param[out] pBuffer Buffer that was created. This function automatically: -# Creates buffer. -# Binds the buffer with the supplied memory. If any of these operations fail, buffer is not created, returned value is negative error code and `*pBuffer` is null. If the function succeeded, you must destroy the buffer when you no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding allocation you can use convenience function vmaDestroyBuffer(). \note This is a new version of the function augmented with parameter `allocationLocalOffset`. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize allocationLocalOffset, const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); /** \brief Destroys Vulkan buffer and frees allocated memory. This is just a convenience function equivalent to: \code vkDestroyBuffer(device, buffer, allocationCallbacks); vmaFreeMemory(allocator, allocation); \endcode It is safe to pass null as buffer and/or allocation. */ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( VmaAllocator VMA_NOT_NULL allocator, VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer, VmaAllocation VMA_NULLABLE allocation); /// Function similar to vmaCreateBuffer(). VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( VmaAllocator VMA_NOT_NULL allocator, const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage, VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); /// Function similar to vmaCreateAliasingBuffer() but for images. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); /// Function similar to vmaCreateAliasingBuffer2() but for images. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize allocationLocalOffset, const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); /** \brief Destroys Vulkan image and frees allocated memory. This is just a convenience function equivalent to: \code vkDestroyImage(device, image, allocationCallbacks); vmaFreeMemory(allocator, allocation); \endcode It is safe to pass null as image and/or allocation. */ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( VmaAllocator VMA_NOT_NULL allocator, VkImage VMA_NULLABLE_NON_DISPATCHABLE image, VmaAllocation VMA_NULLABLE allocation); /** @} */ /** \addtogroup group_virtual @{ */ /** \brief Creates new #VmaVirtualBlock object. \param pCreateInfo Parameters for creation. \param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock); /** \brief Destroys #VmaVirtualBlock object. Please note that you should consciously handle virtual allocations that could remain unfreed in the block. You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock() if you are sure this is what you want. If you do neither, an assert is called. If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`, don't forget to free them. */ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock( VmaVirtualBlock VMA_NULLABLE virtualBlock); /** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations. */ VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty( VmaVirtualBlock VMA_NOT_NULL virtualBlock); /** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer. */ VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo( VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo); /** \brief Allocates new virtual allocation inside given #VmaVirtualBlock. If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned (despite the function doesn't ever allocate actual GPU memory). `pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`. \param virtualBlock Virtual block \param pCreateInfo Parameters for the allocation \param[out] pAllocation Returned handle of the new allocation \param[out] pOffset Returned offset of the new allocation. Optional, can be null. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate( VmaVirtualBlock VMA_NOT_NULL virtualBlock, const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, VkDeviceSize* VMA_NULLABLE pOffset); /** \brief Frees virtual allocation inside given #VmaVirtualBlock. It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing. */ VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree( VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation); /** \brief Frees all virtual allocations inside given #VmaVirtualBlock. You must either call this function or free each virtual allocation individually with vmaVirtualFree() before destroying a virtual block. Otherwise, an assert is called. If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`, don't forget to free it as well. */ VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock( VmaVirtualBlock VMA_NOT_NULL virtualBlock); /** \brief Changes custom pointer associated with given virtual allocation. */ VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData( VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData); /** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock. This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics(). */ VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics( VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaStatistics* VMA_NOT_NULL pStats); /** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock. This function is slow to call. Use for debugging purposes. For less detailed statistics, see vmaGetVirtualBlockStatistics(). */ VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics( VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaDetailedStatistics* VMA_NOT_NULL pStats); /** @} */ #if VMA_STATS_STRING_ENABLED /** \addtogroup group_stats @{ */ /** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock. \param virtualBlock Virtual block. \param[out] ppStatsString Returned string. \param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces. Returned string must be freed using vmaFreeVirtualBlockStatsString(). */ VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString( VmaVirtualBlock VMA_NOT_NULL virtualBlock, char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, VkBool32 detailedMap); /// Frees a string returned by vmaBuildVirtualBlockStatsString(). VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString( VmaVirtualBlock VMA_NOT_NULL virtualBlock, char* VMA_NULLABLE pStatsString); /** \brief Builds and returns statistics as a null-terminated string in JSON format. \param allocator \param[out] ppStatsString Must be freed using vmaFreeStatsString() function. \param detailedMap */ VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( VmaAllocator VMA_NOT_NULL allocator, char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, VkBool32 detailedMap); VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( VmaAllocator VMA_NOT_NULL allocator, char* VMA_NULLABLE pStatsString); /** @} */ #endif // VMA_STATS_STRING_ENABLED #endif // _VMA_FUNCTION_HEADERS #ifdef __cplusplus } #endif #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // // IMPLEMENTATION // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // For Visual Studio IntelliSense. #if defined(__cplusplus) && defined(__INTELLISENSE__) #define VMA_IMPLEMENTATION #endif #ifdef VMA_IMPLEMENTATION #undef VMA_IMPLEMENTATION #include #include #include #include #include #include #if !defined(VMA_CPP20) #if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 #define VMA_CPP20 1 #else #define VMA_CPP20 0 #endif #endif #ifdef _MSC_VER #include // For functions like __popcnt, _BitScanForward etc. #endif #if VMA_CPP20 #include #endif #if VMA_STATS_STRING_ENABLED #include // For snprintf #endif /******************************************************************************* CONFIGURATION SECTION Define some of these macros before each #include of this header or change them here if you need other then default behavior depending on your environment. */ #ifndef _VMA_CONFIGURATION /* Define this macro to 1 to make the library fetch pointers to Vulkan functions internally, like: vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; */ #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES) #define VMA_STATIC_VULKAN_FUNCTIONS 1 #endif /* Define this macro to 1 to make the library fetch pointers to Vulkan functions internally, like: vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory"); To use this feature in new versions of VMA you now have to pass VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null. */ #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS) #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 #endif #ifndef VMA_USE_STL_SHARED_MUTEX #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 #define VMA_USE_STL_SHARED_MUTEX 1 // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L #define VMA_USE_STL_SHARED_MUTEX 1 #else #define VMA_USE_STL_SHARED_MUTEX 0 #endif #endif /* Define this macro to include custom header files without having to edit this file directly, e.g.: // Inside of "my_vma_configuration_user_includes.h": #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT #include "my_custom_min.h" // for my_custom_min #include #include // Inside a different file, which includes "vk_mem_alloc.h": #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h" #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr) #define VMA_MIN(v1, v2) (my_custom_min(v1, v2)) #include "vk_mem_alloc.h" ... The following headers are used in this CONFIGURATION section only, so feel free to remove them if not needed. */ #if !defined(VMA_CONFIGURATION_USER_INCLUDES_H) #include // for assert #include // for min, max, swap #include #else #include VMA_CONFIGURATION_USER_INCLUDES_H #endif #ifndef VMA_NULL // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. #define VMA_NULL nullptr #endif #ifndef VMA_FALLTHROUGH #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 #define VMA_FALLTHROUGH [[fallthrough]] #else #define VMA_FALLTHROUGH #endif #endif // Normal assert to check for programmer's errors, especially in Debug configuration. #ifndef VMA_ASSERT #ifdef NDEBUG #define VMA_ASSERT(expr) #else #define VMA_ASSERT(expr) assert(expr) #endif #endif // Assert that will be called very often, like inside data structures e.g. operator[]. // Making it non-empty can make program slow. #ifndef VMA_HEAVY_ASSERT #ifdef NDEBUG #define VMA_HEAVY_ASSERT(expr) #else #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) #endif #endif // Assert used for reporting memory leaks - unfreed allocations. #ifndef VMA_ASSERT_LEAK #define VMA_ASSERT_LEAK(expr) VMA_ASSERT(expr) #endif // If your compiler is not compatible with C++17 and definition of // aligned_alloc() function is missing, uncommenting following line may help: //#include #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) #include static void* vma_aligned_alloc(size_t alignment, size_t size) { // alignment must be >= sizeof(void*) if(alignment < sizeof(void*)) { alignment = sizeof(void*); } return memalign(alignment, size); } #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) #include #if defined(__APPLE__) #include #endif static void* vma_aligned_alloc(size_t alignment, size_t size) { // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4) // Therefore, for now disable this specific exception until a proper solution is found. //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0)) //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0 // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds // // MAC_OS_X_VERSION_10_16), even though the function is marked // // available for 10.15. That is why the preprocessor checks for 10.16 but // // the __builtin_available checks for 10.15. // // People who use C++17 could call aligned_alloc with the 10.15 SDK already. // if (__builtin_available(macOS 10.15, iOS 13, *)) // return aligned_alloc(alignment, size); //#endif //#endif // alignment must be >= sizeof(void*) if(alignment < sizeof(void*)) { alignment = sizeof(void*); } void *pointer; if(posix_memalign(&pointer, alignment, size) == 0) return pointer; return VMA_NULL; } #elif defined(_WIN32) static void* vma_aligned_alloc(size_t alignment, size_t size) { return _aligned_malloc(size, alignment); } #elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 static void* vma_aligned_alloc(size_t alignment, size_t size) { return aligned_alloc(alignment, size); } #else static void* vma_aligned_alloc(size_t alignment, size_t size) { VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system."); return VMA_NULL; } #endif #if defined(_WIN32) static void vma_aligned_free(void* ptr) { _aligned_free(ptr); } #else static void vma_aligned_free(void* VMA_NULLABLE ptr) { free(ptr); } #endif #ifndef VMA_ALIGN_OF #define VMA_ALIGN_OF(type) (alignof(type)) #endif #ifndef VMA_SYSTEM_ALIGNED_MALLOC #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size)) #endif #ifndef VMA_SYSTEM_ALIGNED_FREE // VMA_SYSTEM_FREE is the old name, but might have been defined by the user #if defined(VMA_SYSTEM_FREE) #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr) #else #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr) #endif #endif #ifndef VMA_COUNT_BITS_SET // Returns number of bits set to 1 in (v) #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v) #endif #ifndef VMA_BITSCAN_LSB // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask) #endif #ifndef VMA_BITSCAN_MSB // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask) #endif #ifndef VMA_MIN #define VMA_MIN(v1, v2) ((std::min)((v1), (v2))) #endif #ifndef VMA_MAX #define VMA_MAX(v1, v2) ((std::max)((v1), (v2))) #endif #ifndef VMA_SORT #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) #endif #ifndef VMA_DEBUG_LOG_FORMAT #define VMA_DEBUG_LOG_FORMAT(format, ...) /* #define VMA_DEBUG_LOG_FORMAT(format, ...) do { \ printf((format), __VA_ARGS__); \ printf("\n"); \ } while(false) */ #endif #ifndef VMA_DEBUG_LOG #define VMA_DEBUG_LOG(str) VMA_DEBUG_LOG_FORMAT("%s", (str)) #endif #ifndef VMA_LEAK_LOG_FORMAT #define VMA_LEAK_LOG_FORMAT(format, ...) VMA_DEBUG_LOG_FORMAT(format, __VA_ARGS__) #endif #ifndef VMA_CLASS_NO_COPY #define VMA_CLASS_NO_COPY(className) \ private: \ className(const className&) = delete; \ className& operator=(const className&) = delete; #endif #ifndef VMA_CLASS_NO_COPY_NO_MOVE #define VMA_CLASS_NO_COPY_NO_MOVE(className) \ private: \ className(const className&) = delete; \ className(className&&) = delete; \ className& operator=(const className&) = delete; \ className& operator=(className&&) = delete; #endif // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString. #if VMA_STATS_STRING_ENABLED static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num) { snprintf(outStr, strLen, "%" PRIu32, num); } static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num) { snprintf(outStr, strLen, "%" PRIu64, num); } static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr) { snprintf(outStr, strLen, "%p", ptr); } #endif #ifndef VMA_MUTEX class VmaMutex { VMA_CLASS_NO_COPY_NO_MOVE(VmaMutex) public: VmaMutex() { } void Lock() { m_Mutex.lock(); } void Unlock() { m_Mutex.unlock(); } bool TryLock() { return m_Mutex.try_lock(); } private: std::mutex m_Mutex; }; #define VMA_MUTEX VmaMutex #endif // Read-write mutex, where "read" is shared access, "write" is exclusive access. #ifndef VMA_RW_MUTEX #if VMA_USE_STL_SHARED_MUTEX // Use std::shared_mutex from C++17. #include class VmaRWMutex { public: void LockRead() { m_Mutex.lock_shared(); } void UnlockRead() { m_Mutex.unlock_shared(); } bool TryLockRead() { return m_Mutex.try_lock_shared(); } void LockWrite() { m_Mutex.lock(); } void UnlockWrite() { m_Mutex.unlock(); } bool TryLockWrite() { return m_Mutex.try_lock(); } private: std::shared_mutex m_Mutex; }; #define VMA_RW_MUTEX VmaRWMutex #elif defined(_WIN32) && defined(WINVER) && defined(SRWLOCK_INIT) && WINVER >= 0x0600 // Use SRWLOCK from WinAPI. // Minimum supported client = Windows Vista, server = Windows Server 2008. class VmaRWMutex { public: VmaRWMutex() { InitializeSRWLock(&m_Lock); } void LockRead() { AcquireSRWLockShared(&m_Lock); } void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } private: SRWLOCK m_Lock; }; #define VMA_RW_MUTEX VmaRWMutex #else // Less efficient fallback: Use normal mutex. class VmaRWMutex { public: void LockRead() { m_Mutex.Lock(); } void UnlockRead() { m_Mutex.Unlock(); } bool TryLockRead() { return m_Mutex.TryLock(); } void LockWrite() { m_Mutex.Lock(); } void UnlockWrite() { m_Mutex.Unlock(); } bool TryLockWrite() { return m_Mutex.TryLock(); } private: VMA_MUTEX m_Mutex; }; #define VMA_RW_MUTEX VmaRWMutex #endif // #if VMA_USE_STL_SHARED_MUTEX #endif // #ifndef VMA_RW_MUTEX /* If providing your own implementation, you need to implement a subset of std::atomic. */ #ifndef VMA_ATOMIC_UINT32 #include #define VMA_ATOMIC_UINT32 std::atomic #endif #ifndef VMA_ATOMIC_UINT64 #include #define VMA_ATOMIC_UINT64 std::atomic #endif #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY /** Every allocation will have its own memory block. Define to 1 for debugging purposes only. */ #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) #endif #ifndef VMA_MIN_ALIGNMENT /** Minimum alignment of all allocations, in bytes. Set to more than 1 for debugging purposes. Must be power of two. */ #ifdef VMA_DEBUG_ALIGNMENT // Old name #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT #else #define VMA_MIN_ALIGNMENT (1) #endif #endif #ifndef VMA_DEBUG_MARGIN /** Minimum margin after every allocation, in bytes. Set nonzero for debugging purposes only. */ #define VMA_DEBUG_MARGIN (0) #endif #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS /** Define this macro to 1 to automatically fill new allocations and destroyed allocations with some bit pattern. */ #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) #endif #ifndef VMA_DEBUG_DETECT_CORRUPTION /** Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to enable writing magic value to the margin after every allocation and validating it, so that memory corruptions (out-of-bounds writes) are detected. */ #define VMA_DEBUG_DETECT_CORRUPTION (0) #endif #ifndef VMA_DEBUG_GLOBAL_MUTEX /** Set this to 1 for debugging purposes only, to enable single mutex protecting all entry calls to the library. Can be useful for debugging multithreading issues. */ #define VMA_DEBUG_GLOBAL_MUTEX (0) #endif #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY /** Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. Set to more than 1 for debugging purposes only. Must be power of two. */ #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) #endif #ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT /* Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount and return error instead of leaving up to Vulkan implementation what to do in such cases. */ #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0) #endif #ifndef VMA_SMALL_HEAP_MAX_SIZE /// Maximum size of a memory heap in Vulkan to consider it "small". #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) #endif #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE /// Default size of a block allocated as single VkDeviceMemory from a "large" heap. #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) #endif /* Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called or a persistently mapped allocation is created and destroyed several times in a row. It keeps additional +1 mapping of a device memory block to prevent calling actual vkMapMemory/vkUnmapMemory too many times, which may improve performance and help tools like RenderDoc. */ #ifndef VMA_MAPPING_HYSTERESIS_ENABLED #define VMA_MAPPING_HYSTERESIS_ENABLED 1 #endif #define VMA_VALIDATE(cond) do { if(!(cond)) { \ VMA_ASSERT(0 && "Validation failed: " #cond); \ return false; \ } } while(false) /******************************************************************************* END OF CONFIGURATION */ #endif // _VMA_CONFIGURATION static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF; // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F. static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666; // Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants. static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040; static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080; static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000; static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200; static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000; static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u; static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32; static const uint32_t VMA_VENDOR_ID_AMD = 4098; // This one is tricky. Vulkan specification defines this code as available since // Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131. // See pull request #207. #define VK_ERROR_UNKNOWN_COPY ((VkResult)-13) #if VMA_STATS_STRING_ENABLED // Correspond to values of enum VmaSuballocationType. static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = { "FREE", "UNKNOWN", "BUFFER", "IMAGE_UNKNOWN", "IMAGE_LINEAR", "IMAGE_OPTIMAL", }; #endif static VkAllocationCallbacks VmaEmptyAllocationCallbacks = { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL }; #ifndef _VMA_ENUM_DECLARATIONS enum VmaSuballocationType { VMA_SUBALLOCATION_TYPE_FREE = 0, VMA_SUBALLOCATION_TYPE_UNKNOWN = 1, VMA_SUBALLOCATION_TYPE_BUFFER = 2, VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3, VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5, VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF }; enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE }; enum class VmaAllocationRequestType { Normal, TLSF, // Used by "Linear" algorithm. UpperAddress, EndOf1st, EndOf2nd, }; #endif // _VMA_ENUM_DECLARATIONS #ifndef _VMA_FORWARD_DECLARATIONS // Opaque handle used by allocation algorithms to identify single allocation in any conforming way. VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle); struct VmaMutexLock; struct VmaMutexLockRead; struct VmaMutexLockWrite; template struct AtomicTransactionalIncrement; template struct VmaStlAllocator; template class VmaVector; template class VmaSmallVector; template class VmaPoolAllocator; template struct VmaListItem; template class VmaRawList; template class VmaList; template class VmaIntrusiveLinkedList; #if VMA_STATS_STRING_ENABLED class VmaStringBuilder; class VmaJsonWriter; #endif class VmaDeviceMemoryBlock; struct VmaDedicatedAllocationListItemTraits; class VmaDedicatedAllocationList; struct VmaSuballocation; struct VmaSuballocationOffsetLess; struct VmaSuballocationOffsetGreater; struct VmaSuballocationItemSizeLess; typedef VmaList> VmaSuballocationList; struct VmaAllocationRequest; class VmaBlockMetadata; class VmaBlockMetadata_Linear; class VmaBlockMetadata_TLSF; class VmaBlockVector; struct VmaPoolListItemTraits; struct VmaCurrentBudgetData; class VmaAllocationObjectAllocator; #endif // _VMA_FORWARD_DECLARATIONS #ifndef _VMA_FUNCTIONS /* Returns number of bits set to 1 in (v). On specific platforms and compilers you can use intrinsics like: Visual Studio: return __popcnt(v); GCC, Clang: return static_cast(__builtin_popcount(v)); Define macro VMA_COUNT_BITS_SET to provide your optimized implementation. But you need to check in runtime whether user's CPU supports these, as some old processors don't. */ static inline uint32_t VmaCountBitsSet(uint32_t v) { #if VMA_CPP20 return std::popcount(v); #else uint32_t c = v - ((v >> 1) & 0x55555555); c = ((c >> 2) & 0x33333333) + (c & 0x33333333); c = ((c >> 4) + c) & 0x0F0F0F0F; c = ((c >> 8) + c) & 0x00FF00FF; c = ((c >> 16) + c) & 0x0000FFFF; return c; #endif } static inline uint8_t VmaBitScanLSB(uint64_t mask) { #if defined(_MSC_VER) && defined(_WIN64) unsigned long pos; if (_BitScanForward64(&pos, mask)) return static_cast(pos); return UINT8_MAX; #elif VMA_CPP20 if(mask) return static_cast(std::countr_zero(mask)); return UINT8_MAX; #elif defined __GNUC__ || defined __clang__ return static_cast(__builtin_ffsll(mask)) - 1U; #else uint8_t pos = 0; uint64_t bit = 1; do { if (mask & bit) return pos; bit <<= 1; } while (pos++ < 63); return UINT8_MAX; #endif } static inline uint8_t VmaBitScanLSB(uint32_t mask) { #ifdef _MSC_VER unsigned long pos; if (_BitScanForward(&pos, mask)) return static_cast(pos); return UINT8_MAX; #elif VMA_CPP20 if(mask) return static_cast(std::countr_zero(mask)); return UINT8_MAX; #elif defined __GNUC__ || defined __clang__ return static_cast(__builtin_ffs(mask)) - 1U; #else uint8_t pos = 0; uint32_t bit = 1; do { if (mask & bit) return pos; bit <<= 1; } while (pos++ < 31); return UINT8_MAX; #endif } static inline uint8_t VmaBitScanMSB(uint64_t mask) { #if defined(_MSC_VER) && defined(_WIN64) unsigned long pos; if (_BitScanReverse64(&pos, mask)) return static_cast(pos); #elif VMA_CPP20 if(mask) return 63 - static_cast(std::countl_zero(mask)); #elif defined __GNUC__ || defined __clang__ if (mask) return 63 - static_cast(__builtin_clzll(mask)); #else uint8_t pos = 63; uint64_t bit = 1ULL << 63; do { if (mask & bit) return pos; bit >>= 1; } while (pos-- > 0); #endif return UINT8_MAX; } static inline uint8_t VmaBitScanMSB(uint32_t mask) { #ifdef _MSC_VER unsigned long pos; if (_BitScanReverse(&pos, mask)) return static_cast(pos); #elif VMA_CPP20 if(mask) return 31 - static_cast(std::countl_zero(mask)); #elif defined __GNUC__ || defined __clang__ if (mask) return 31 - static_cast(__builtin_clz(mask)); #else uint8_t pos = 31; uint32_t bit = 1UL << 31; do { if (mask & bit) return pos; bit >>= 1; } while (pos-- > 0); #endif return UINT8_MAX; } /* Returns true if given number is a power of two. T must be unsigned integer number or signed integer but always nonnegative. For 0 returns true. */ template inline bool VmaIsPow2(T x) { return (x & (x - 1)) == 0; } // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16. // Use types like uint32_t, uint64_t as T. template static inline T VmaAlignUp(T val, T alignment) { VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); return (val + alignment - 1) & ~(alignment - 1); } // Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8. // Use types like uint32_t, uint64_t as T. template static inline T VmaAlignDown(T val, T alignment) { VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); return val & ~(alignment - 1); } // Division with mathematical rounding to nearest number. template static inline T VmaRoundDiv(T x, T y) { return (x + (y / (T)2)) / y; } // Divide by 'y' and round up to nearest integer. template static inline T VmaDivideRoundingUp(T x, T y) { return (x + y - (T)1) / y; } // Returns smallest power of 2 greater or equal to v. static inline uint32_t VmaNextPow2(uint32_t v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } static inline uint64_t VmaNextPow2(uint64_t v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v |= v >> 32; v++; return v; } // Returns largest power of 2 less or equal to v. static inline uint32_t VmaPrevPow2(uint32_t v) { v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v = v ^ (v >> 1); return v; } static inline uint64_t VmaPrevPow2(uint64_t v) { v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v |= v >> 32; v = v ^ (v >> 1); return v; } static inline bool VmaStrIsEmpty(const char* pStr) { return pStr == VMA_NULL || *pStr == '\0'; } /* Returns true if two memory blocks occupy overlapping pages. ResourceA must be in less memory offset than ResourceB. Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)" chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity". */ static inline bool VmaBlocksOnSamePage( VkDeviceSize resourceAOffset, VkDeviceSize resourceASize, VkDeviceSize resourceBOffset, VkDeviceSize pageSize) { VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0); VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1; VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1); VkDeviceSize resourceBStart = resourceBOffset; VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1); return resourceAEndPage == resourceBStartPage; } /* Returns true if given suballocation types could conflict and must respect VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer or linear image and another one is optimal image. If type is unknown, behave conservatively. */ static inline bool VmaIsBufferImageGranularityConflict( VmaSuballocationType suballocType1, VmaSuballocationType suballocType2) { if (suballocType1 > suballocType2) { std::swap(suballocType1, suballocType2); } switch (suballocType1) { case VMA_SUBALLOCATION_TYPE_FREE: return false; case VMA_SUBALLOCATION_TYPE_UNKNOWN: return true; case VMA_SUBALLOCATION_TYPE_BUFFER: return suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: return suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: return suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: return false; default: VMA_ASSERT(0); return true; } } static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) { #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION uint32_t* pDst = (uint32_t*)((char*)pData + offset); const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); for (size_t i = 0; i < numberCount; ++i, ++pDst) { *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE; } #else // no-op #endif } static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) { #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); for (size_t i = 0; i < numberCount; ++i, ++pSrc) { if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) { return false; } } #endif return true; } /* Fills structure with parameters of an example buffer to be used for transfers during GPU memory defragmentation. */ static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo) { memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo)); outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size. } /* Performs binary search and returns iterator to first element that is greater or equal to (key), according to comparison (cmp). Cmp should return true if first argument is less than second argument. Returned value is the found element, if present in the collection or place where new element with value (key) should be inserted. */ template static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp) { size_t down = 0, up = size_t(end - beg); while (down < up) { const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation if (cmp(*(beg + mid), key)) { down = mid + 1; } else { up = mid; } } return beg + down; } template IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp) { IterT it = VmaBinaryFindFirstNotLess( beg, end, value, cmp); if (it == end || (!cmp(*it, value) && !cmp(value, *it))) { return it; } return end; } /* Returns true if all pointers in the array are not-null and unique. Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT. T must be pointer type, e.g. VmaAllocation, VmaPool. */ template static bool VmaValidatePointerArray(uint32_t count, const T* arr) { for (uint32_t i = 0; i < count; ++i) { const T iPtr = arr[i]; if (iPtr == VMA_NULL) { return false; } for (uint32_t j = i + 1; j < count; ++j) { if (iPtr == arr[j]) { return false; } } } return true; } template static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct) { newStruct->pNext = mainStruct->pNext; mainStruct->pNext = newStruct; } // Finds structure with s->sType == sType in mainStruct->pNext chain. // Returns pointer to it. If not found, returns null. template static inline const FindT* VmaPnextChainFind(const MainT* mainStruct, VkStructureType sType) { for(const VkBaseInStructure* s = (const VkBaseInStructure*)mainStruct->pNext; s != VMA_NULL; s = s->pNext) { if(s->sType == sType) { return (const FindT*)s; } } return VMA_NULL; } // An abstraction over buffer or image `usage` flags, depending on available extensions. struct VmaBufferImageUsage { #if VMA_KHR_MAINTENANCE5 typedef uint64_t BaseType; // VkFlags64 #else typedef uint32_t BaseType; // VkFlags32 #endif static const VmaBufferImageUsage UNKNOWN; BaseType Value; VmaBufferImageUsage() { *this = UNKNOWN; } explicit VmaBufferImageUsage(BaseType usage) : Value(usage) { } VmaBufferImageUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5); explicit VmaBufferImageUsage(const VkImageCreateInfo &createInfo); bool operator==(const VmaBufferImageUsage& rhs) const { return Value == rhs.Value; } bool operator!=(const VmaBufferImageUsage& rhs) const { return Value != rhs.Value; } bool Contains(BaseType flag) const { return (Value & flag) != 0; } bool ContainsDeviceAccess() const { // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same as VK_BUFFER_IMAGE_TRANSFER*. return (Value & ~BaseType(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0; } }; const VmaBufferImageUsage VmaBufferImageUsage::UNKNOWN = VmaBufferImageUsage(0); VmaBufferImageUsage::VmaBufferImageUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5) { #if VMA_KHR_MAINTENANCE5 if(useKhrMaintenance5) { // If VkBufferCreateInfo::pNext chain contains VkBufferUsageFlags2CreateInfoKHR, // take usage from it and ignore VkBufferCreateInfo::usage, per specification // of the VK_KHR_maintenance5 extension. const VkBufferUsageFlags2CreateInfoKHR* const usageFlags2 = VmaPnextChainFind(&createInfo, VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR); if(usageFlags2) { this->Value = usageFlags2->usage; return; } } #endif this->Value = (BaseType)createInfo.usage; } VmaBufferImageUsage::VmaBufferImageUsage(const VkImageCreateInfo &createInfo) { // Maybe in the future there will be VK_KHR_maintenanceN extension with structure // VkImageUsageFlags2CreateInfoKHR, like the one for buffers... this->Value = (BaseType)createInfo.usage; } // This is the main algorithm that guides the selection of a memory type best for an allocation - // converts usage to required/preferred/not preferred flags. static bool FindMemoryPreferences( bool isIntegratedGPU, const VmaAllocationCreateInfo& allocCreateInfo, VmaBufferImageUsage bufImgUsage, VkMemoryPropertyFlags& outRequiredFlags, VkMemoryPropertyFlags& outPreferredFlags, VkMemoryPropertyFlags& outNotPreferredFlags) { outRequiredFlags = allocCreateInfo.requiredFlags; outPreferredFlags = allocCreateInfo.preferredFlags; outNotPreferredFlags = 0; switch(allocCreateInfo.usage) { case VMA_MEMORY_USAGE_UNKNOWN: break; case VMA_MEMORY_USAGE_GPU_ONLY: if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; } break; case VMA_MEMORY_USAGE_CPU_ONLY: outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; break; case VMA_MEMORY_USAGE_CPU_TO_GPU: outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; } break; case VMA_MEMORY_USAGE_GPU_TO_CPU: outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; break; case VMA_MEMORY_USAGE_CPU_COPY: outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; break; case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; break; case VMA_MEMORY_USAGE_AUTO: case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE: case VMA_MEMORY_USAGE_AUTO_PREFER_HOST: { if(bufImgUsage == VmaBufferImageUsage::UNKNOWN) { VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known." " Maybe you use VkBufferUsageFlags2CreateInfoKHR but forgot to use VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT?" ); return false; } const bool deviceAccess = bufImgUsage.ContainsDeviceAccess(); const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0; const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0; const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0; const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST; // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU. if(hostAccessRandom) { // Prefer cached. Cannot require it, because some platforms don't have it (e.g. Raspberry Pi - see #362)! outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; if (!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) { // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL. // Omitting HOST_VISIBLE here is intentional. // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one. // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list. outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; } else { // Always CPU memory. outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; } } // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined. else if(hostAccessSequentialWrite) { // Want uncached and write-combined. outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) { outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; } else { outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame) if(deviceAccess) { // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory. if(preferHost) outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; else outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; } // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU) else { // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory. if(preferDevice) outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; else outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; } } } // No CPU access else { // if(deviceAccess) // // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory, // unless there is a clear preference from the user not to do so. // // else: // // No direct GPU access, no CPU access, just transfers. // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or // a "swap file" copy to free some GPU memory (then better CPU memory). // Up to the user to decide. If no preferece, assume the former and choose GPU memory. if(preferHost) outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; else outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; } break; } default: VMA_ASSERT(0); } // Avoid DEVICE_COHERENT unless explicitly requested. if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) & (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) { outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY; } return true; } //////////////////////////////////////////////////////////////////////////////// // Memory allocation static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment) { void* result = VMA_NULL; if ((pAllocationCallbacks != VMA_NULL) && (pAllocationCallbacks->pfnAllocation != VMA_NULL)) { result = (*pAllocationCallbacks->pfnAllocation)( pAllocationCallbacks->pUserData, size, alignment, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); } else { result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment); } VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed."); return result; } static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr) { if ((pAllocationCallbacks != VMA_NULL) && (pAllocationCallbacks->pfnFree != VMA_NULL)) { (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr); } else { VMA_SYSTEM_ALIGNED_FREE(ptr); } } template static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks) { return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); } template static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count) { return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); } #define vma_new(allocator, type) new(VmaAllocate(allocator))(type) #define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type) template static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr) { ptr->~T(); VmaFree(pAllocationCallbacks, ptr); } template static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count) { if (ptr != VMA_NULL) { for (size_t i = count; i--; ) { ptr[i].~T(); } VmaFree(pAllocationCallbacks, ptr); } } static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr) { if (srcStr != VMA_NULL) { const size_t len = strlen(srcStr); char* const result = vma_new_array(allocs, char, len + 1); memcpy(result, srcStr, len + 1); return result; } return VMA_NULL; } #if VMA_STATS_STRING_ENABLED static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen) { if (srcStr != VMA_NULL) { char* const result = vma_new_array(allocs, char, strLen + 1); memcpy(result, srcStr, strLen); result[strLen] = '\0'; return result; } return VMA_NULL; } #endif // VMA_STATS_STRING_ENABLED static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) { if (str != VMA_NULL) { const size_t len = strlen(str); vma_delete_array(allocs, str, len + 1); } } template size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value) { const size_t indexToInsert = VmaBinaryFindFirstNotLess( vector.data(), vector.data() + vector.size(), value, CmpLess()) - vector.data(); VmaVectorInsert(vector, indexToInsert, value); return indexToInsert; } template bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value) { CmpLess comparator; typename VectorT::iterator it = VmaBinaryFindFirstNotLess( vector.begin(), vector.end(), value, comparator); if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) { size_t indexToRemove = it - vector.begin(); VmaVectorRemove(vector, indexToRemove); return true; } return false; } #endif // _VMA_FUNCTIONS #ifndef _VMA_STATISTICS_FUNCTIONS static void VmaClearStatistics(VmaStatistics& outStats) { outStats.blockCount = 0; outStats.allocationCount = 0; outStats.blockBytes = 0; outStats.allocationBytes = 0; } static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src) { inoutStats.blockCount += src.blockCount; inoutStats.allocationCount += src.allocationCount; inoutStats.blockBytes += src.blockBytes; inoutStats.allocationBytes += src.allocationBytes; } static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats) { VmaClearStatistics(outStats.statistics); outStats.unusedRangeCount = 0; outStats.allocationSizeMin = VK_WHOLE_SIZE; outStats.allocationSizeMax = 0; outStats.unusedRangeSizeMin = VK_WHOLE_SIZE; outStats.unusedRangeSizeMax = 0; } static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size) { inoutStats.statistics.allocationCount++; inoutStats.statistics.allocationBytes += size; inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size); inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size); } static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size) { inoutStats.unusedRangeCount++; inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size); inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size); } static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src) { VmaAddStatistics(inoutStats.statistics, src.statistics); inoutStats.unusedRangeCount += src.unusedRangeCount; inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin); inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax); inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin); inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax); } #endif // _VMA_STATISTICS_FUNCTIONS #ifndef _VMA_MUTEX_LOCK // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope). struct VmaMutexLock { VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLock) public: VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) : m_pMutex(useMutex ? &mutex : VMA_NULL) { if (m_pMutex) { m_pMutex->Lock(); } } ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } } private: VMA_MUTEX* m_pMutex; }; // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading. struct VmaMutexLockRead { VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockRead) public: VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) : m_pMutex(useMutex ? &mutex : VMA_NULL) { if (m_pMutex) { m_pMutex->LockRead(); } } ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } } private: VMA_RW_MUTEX* m_pMutex; }; // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing. struct VmaMutexLockWrite { VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockWrite) public: VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) : m_pMutex(useMutex ? &mutex : VMA_NULL) { if (m_pMutex) { m_pMutex->LockWrite(); } } ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } } private: VMA_RW_MUTEX* m_pMutex; }; #if VMA_DEBUG_GLOBAL_MUTEX static VMA_MUTEX gDebugGlobalMutex; #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); #else #define VMA_DEBUG_GLOBAL_MUTEX_LOCK #endif #endif // _VMA_MUTEX_LOCK #ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT // An object that increments given atomic but decrements it back in the destructor unless Commit() is called. template struct AtomicTransactionalIncrement { public: using T = decltype(AtomicT().load()); ~AtomicTransactionalIncrement() { if(m_Atomic) --(*m_Atomic); } void Commit() { m_Atomic = VMA_NULL; } T Increment(AtomicT* atomic) { m_Atomic = atomic; return m_Atomic->fetch_add(1); } private: AtomicT* m_Atomic = VMA_NULL; }; #endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT #ifndef _VMA_STL_ALLOCATOR // STL-compatible allocator. template struct VmaStlAllocator { const VkAllocationCallbacks* const m_pCallbacks; typedef T value_type; VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {} template VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) {} VmaStlAllocator(const VmaStlAllocator&) = default; VmaStlAllocator& operator=(const VmaStlAllocator&) = delete; T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); } void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } template bool operator==(const VmaStlAllocator& rhs) const { return m_pCallbacks == rhs.m_pCallbacks; } template bool operator!=(const VmaStlAllocator& rhs) const { return m_pCallbacks != rhs.m_pCallbacks; } }; #endif // _VMA_STL_ALLOCATOR #ifndef _VMA_VECTOR /* Class with interface compatible with subset of std::vector. T must be POD because constructors and destructors are not called and memcpy is used for these objects. */ template class VmaVector { public: typedef T value_type; typedef T* iterator; typedef const T* const_iterator; VmaVector(const AllocatorT& allocator); VmaVector(size_t count, const AllocatorT& allocator); // This version of the constructor is here for compatibility with pre-C++14 std::vector. // value is unused. VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {} VmaVector(const VmaVector& src); VmaVector& operator=(const VmaVector& rhs); ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); } bool empty() const { return m_Count == 0; } size_t size() const { return m_Count; } T* data() { return m_pArray; } T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } const T* data() const { return m_pArray; } const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } iterator begin() { return m_pArray; } iterator end() { return m_pArray + m_Count; } const_iterator cbegin() const { return m_pArray; } const_iterator cend() const { return m_pArray + m_Count; } const_iterator begin() const { return cbegin(); } const_iterator end() const { return cend(); } void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } void push_front(const T& src) { insert(0, src); } void push_back(const T& src); void reserve(size_t newCapacity, bool freeMemory = false); void resize(size_t newCount); void clear() { resize(0); } void shrink_to_fit(); void insert(size_t index, const T& src); void remove(size_t index); T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } private: AllocatorT m_Allocator; T* m_pArray; size_t m_Count; size_t m_Capacity; }; #ifndef _VMA_VECTOR_FUNCTIONS template VmaVector::VmaVector(const AllocatorT& allocator) : m_Allocator(allocator), m_pArray(VMA_NULL), m_Count(0), m_Capacity(0) {} template VmaVector::VmaVector(size_t count, const AllocatorT& allocator) : m_Allocator(allocator), m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL), m_Count(count), m_Capacity(count) {} template VmaVector::VmaVector(const VmaVector& src) : m_Allocator(src.m_Allocator), m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), m_Count(src.m_Count), m_Capacity(src.m_Count) { if (m_Count != 0) { memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T)); } } template VmaVector& VmaVector::operator=(const VmaVector& rhs) { if (&rhs != this) { resize(rhs.m_Count); if (m_Count != 0) { memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T)); } } return *this; } template void VmaVector::push_back(const T& src) { const size_t newIndex = size(); resize(newIndex + 1); m_pArray[newIndex] = src; } template void VmaVector::reserve(size_t newCapacity, bool freeMemory) { newCapacity = VMA_MAX(newCapacity, m_Count); if ((newCapacity < m_Capacity) && !freeMemory) { newCapacity = m_Capacity; } if (newCapacity != m_Capacity) { T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL; if (m_Count != 0) { memcpy(newArray, m_pArray, m_Count * sizeof(T)); } VmaFree(m_Allocator.m_pCallbacks, m_pArray); m_Capacity = newCapacity; m_pArray = newArray; } } template void VmaVector::resize(size_t newCount) { size_t newCapacity = m_Capacity; if (newCount > m_Capacity) { newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8)); } if (newCapacity != m_Capacity) { T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; const size_t elementsToCopy = VMA_MIN(m_Count, newCount); if (elementsToCopy != 0) { memcpy(newArray, m_pArray, elementsToCopy * sizeof(T)); } VmaFree(m_Allocator.m_pCallbacks, m_pArray); m_Capacity = newCapacity; m_pArray = newArray; } m_Count = newCount; } template void VmaVector::shrink_to_fit() { if (m_Capacity > m_Count) { T* newArray = VMA_NULL; if (m_Count > 0) { newArray = VmaAllocateArray(m_Allocator.m_pCallbacks, m_Count); memcpy(newArray, m_pArray, m_Count * sizeof(T)); } VmaFree(m_Allocator.m_pCallbacks, m_pArray); m_Capacity = m_Count; m_pArray = newArray; } } template void VmaVector::insert(size_t index, const T& src) { VMA_HEAVY_ASSERT(index <= m_Count); const size_t oldCount = size(); resize(oldCount + 1); if (index < oldCount) { memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T)); } m_pArray[index] = src; } template void VmaVector::remove(size_t index) { VMA_HEAVY_ASSERT(index < m_Count); const size_t oldCount = size(); if (index < oldCount - 1) { memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T)); } resize(oldCount - 1); } #endif // _VMA_VECTOR_FUNCTIONS template static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item) { vec.insert(index, item); } template static void VmaVectorRemove(VmaVector& vec, size_t index) { vec.remove(index); } #endif // _VMA_VECTOR #ifndef _VMA_SMALL_VECTOR /* This is a vector (a variable-sized array), optimized for the case when the array is small. It contains some number of elements in-place, which allows it to avoid heap allocation when the actual number of elements is below that threshold. This allows normal "small" cases to be fast without losing generality for large inputs. */ template class VmaSmallVector { public: typedef T value_type; typedef T* iterator; VmaSmallVector(const AllocatorT& allocator); VmaSmallVector(size_t count, const AllocatorT& allocator); template VmaSmallVector(const VmaSmallVector&) = delete; template VmaSmallVector& operator=(const VmaSmallVector&) = delete; ~VmaSmallVector() = default; bool empty() const { return m_Count == 0; } size_t size() const { return m_Count; } T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } iterator begin() { return data(); } iterator end() { return data() + m_Count; } void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } void push_front(const T& src) { insert(0, src); } void push_back(const T& src); void resize(size_t newCount, bool freeMemory = false); void clear(bool freeMemory = false); void insert(size_t index, const T& src); void remove(size_t index); T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } private: size_t m_Count; T m_StaticArray[N]; // Used when m_Size <= N VmaVector m_DynamicArray; // Used when m_Size > N }; #ifndef _VMA_SMALL_VECTOR_FUNCTIONS template VmaSmallVector::VmaSmallVector(const AllocatorT& allocator) : m_Count(0), m_DynamicArray(allocator) {} template VmaSmallVector::VmaSmallVector(size_t count, const AllocatorT& allocator) : m_Count(count), m_DynamicArray(count > N ? count : 0, allocator) {} template void VmaSmallVector::push_back(const T& src) { const size_t newIndex = size(); resize(newIndex + 1); data()[newIndex] = src; } template void VmaSmallVector::resize(size_t newCount, bool freeMemory) { if (newCount > N && m_Count > N) { // Any direction, staying in m_DynamicArray m_DynamicArray.resize(newCount); if (freeMemory) { m_DynamicArray.shrink_to_fit(); } } else if (newCount > N && m_Count <= N) { // Growing, moving from m_StaticArray to m_DynamicArray m_DynamicArray.resize(newCount); if (m_Count > 0) { memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T)); } } else if (newCount <= N && m_Count > N) { // Shrinking, moving from m_DynamicArray to m_StaticArray if (newCount > 0) { memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T)); } m_DynamicArray.resize(0); if (freeMemory) { m_DynamicArray.shrink_to_fit(); } } else { // Any direction, staying in m_StaticArray - nothing to do here } m_Count = newCount; } template void VmaSmallVector::clear(bool freeMemory) { m_DynamicArray.clear(); if (freeMemory) { m_DynamicArray.shrink_to_fit(); } m_Count = 0; } template void VmaSmallVector::insert(size_t index, const T& src) { VMA_HEAVY_ASSERT(index <= m_Count); const size_t oldCount = size(); resize(oldCount + 1); T* const dataPtr = data(); if (index < oldCount) { // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray. memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T)); } dataPtr[index] = src; } template void VmaSmallVector::remove(size_t index) { VMA_HEAVY_ASSERT(index < m_Count); const size_t oldCount = size(); if (index < oldCount - 1) { // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray. T* const dataPtr = data(); memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T)); } resize(oldCount - 1); } #endif // _VMA_SMALL_VECTOR_FUNCTIONS #endif // _VMA_SMALL_VECTOR #ifndef _VMA_POOL_ALLOCATOR /* Allocator for objects of type T using a list of arrays (pools) to speed up allocation. Number of elements that can be allocated is not bounded because allocator can create multiple blocks. */ template class VmaPoolAllocator { VMA_CLASS_NO_COPY_NO_MOVE(VmaPoolAllocator) public: VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity); ~VmaPoolAllocator(); template T* Alloc(Types&&... args); void Free(T* ptr); private: union Item { uint32_t NextFreeIndex; alignas(T) char Value[sizeof(T)]; }; struct ItemBlock { Item* pItems; uint32_t Capacity; uint32_t FirstFreeIndex; }; const VkAllocationCallbacks* m_pAllocationCallbacks; const uint32_t m_FirstBlockCapacity; VmaVector> m_ItemBlocks; ItemBlock& CreateNewBlock(); }; #ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS template VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) : m_pAllocationCallbacks(pAllocationCallbacks), m_FirstBlockCapacity(firstBlockCapacity), m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks)) { VMA_ASSERT(m_FirstBlockCapacity > 1); } template VmaPoolAllocator::~VmaPoolAllocator() { for (size_t i = m_ItemBlocks.size(); i--;) vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity); m_ItemBlocks.clear(); } template template T* VmaPoolAllocator::Alloc(Types&&... args) { for (size_t i = m_ItemBlocks.size(); i--; ) { ItemBlock& block = m_ItemBlocks[i]; // This block has some free items: Use first one. if (block.FirstFreeIndex != UINT32_MAX) { Item* const pItem = &block.pItems[block.FirstFreeIndex]; block.FirstFreeIndex = pItem->NextFreeIndex; T* result = (T*)&pItem->Value; new(result)T(std::forward(args)...); // Explicit constructor call. return result; } } // No block has free item: Create new one and use it. ItemBlock& newBlock = CreateNewBlock(); Item* const pItem = &newBlock.pItems[0]; newBlock.FirstFreeIndex = pItem->NextFreeIndex; T* result = (T*)&pItem->Value; new(result) T(std::forward(args)...); // Explicit constructor call. return result; } template void VmaPoolAllocator::Free(T* ptr) { // Search all memory blocks to find ptr. for (size_t i = m_ItemBlocks.size(); i--; ) { ItemBlock& block = m_ItemBlocks[i]; // Casting to union. Item* pItemPtr; memcpy(&pItemPtr, &ptr, sizeof(pItemPtr)); // Check if pItemPtr is in address range of this block. if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) { ptr->~T(); // Explicit destructor call. const uint32_t index = static_cast(pItemPtr - block.pItems); pItemPtr->NextFreeIndex = block.FirstFreeIndex; block.FirstFreeIndex = index; return; } } VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool."); } template typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock() { const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; const ItemBlock newBlock = { vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), newBlockCapacity, 0 }; m_ItemBlocks.push_back(newBlock); // Setup singly-linked list of all free items in this block. for (uint32_t i = 0; i < newBlockCapacity - 1; ++i) newBlock.pItems[i].NextFreeIndex = i + 1; newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX; return m_ItemBlocks.back(); } #endif // _VMA_POOL_ALLOCATOR_FUNCTIONS #endif // _VMA_POOL_ALLOCATOR #ifndef _VMA_RAW_LIST template struct VmaListItem { VmaListItem* pPrev; VmaListItem* pNext; T Value; }; // Doubly linked list. template class VmaRawList { VMA_CLASS_NO_COPY_NO_MOVE(VmaRawList) public: typedef VmaListItem ItemType; VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks); // Intentionally not calling Clear, because that would be unnecessary // computations to return all items to m_ItemAllocator as free. ~VmaRawList() = default; size_t GetCount() const { return m_Count; } bool IsEmpty() const { return m_Count == 0; } ItemType* Front() { return m_pFront; } ItemType* Back() { return m_pBack; } const ItemType* Front() const { return m_pFront; } const ItemType* Back() const { return m_pBack; } ItemType* PushFront(); ItemType* PushBack(); ItemType* PushFront(const T& value); ItemType* PushBack(const T& value); void PopFront(); void PopBack(); // Item can be null - it means PushBack. ItemType* InsertBefore(ItemType* pItem); // Item can be null - it means PushFront. ItemType* InsertAfter(ItemType* pItem); ItemType* InsertBefore(ItemType* pItem, const T& value); ItemType* InsertAfter(ItemType* pItem, const T& value); void Clear(); void Remove(ItemType* pItem); private: const VkAllocationCallbacks* const m_pAllocationCallbacks; VmaPoolAllocator m_ItemAllocator; ItemType* m_pFront; ItemType* m_pBack; size_t m_Count; }; #ifndef _VMA_RAW_LIST_FUNCTIONS template VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) : m_pAllocationCallbacks(pAllocationCallbacks), m_ItemAllocator(pAllocationCallbacks, 128), m_pFront(VMA_NULL), m_pBack(VMA_NULL), m_Count(0) {} template VmaListItem* VmaRawList::PushFront() { ItemType* const pNewItem = m_ItemAllocator.Alloc(); pNewItem->pPrev = VMA_NULL; if (IsEmpty()) { pNewItem->pNext = VMA_NULL; m_pFront = pNewItem; m_pBack = pNewItem; m_Count = 1; } else { pNewItem->pNext = m_pFront; m_pFront->pPrev = pNewItem; m_pFront = pNewItem; ++m_Count; } return pNewItem; } template VmaListItem* VmaRawList::PushBack() { ItemType* const pNewItem = m_ItemAllocator.Alloc(); pNewItem->pNext = VMA_NULL; if(IsEmpty()) { pNewItem->pPrev = VMA_NULL; m_pFront = pNewItem; m_pBack = pNewItem; m_Count = 1; } else { pNewItem->pPrev = m_pBack; m_pBack->pNext = pNewItem; m_pBack = pNewItem; ++m_Count; } return pNewItem; } template VmaListItem* VmaRawList::PushFront(const T& value) { ItemType* const pNewItem = PushFront(); pNewItem->Value = value; return pNewItem; } template VmaListItem* VmaRawList::PushBack(const T& value) { ItemType* const pNewItem = PushBack(); pNewItem->Value = value; return pNewItem; } template void VmaRawList::PopFront() { VMA_HEAVY_ASSERT(m_Count > 0); ItemType* const pFrontItem = m_pFront; ItemType* const pNextItem = pFrontItem->pNext; if (pNextItem != VMA_NULL) { pNextItem->pPrev = VMA_NULL; } m_pFront = pNextItem; m_ItemAllocator.Free(pFrontItem); --m_Count; } template void VmaRawList::PopBack() { VMA_HEAVY_ASSERT(m_Count > 0); ItemType* const pBackItem = m_pBack; ItemType* const pPrevItem = pBackItem->pPrev; if(pPrevItem != VMA_NULL) { pPrevItem->pNext = VMA_NULL; } m_pBack = pPrevItem; m_ItemAllocator.Free(pBackItem); --m_Count; } template void VmaRawList::Clear() { if (IsEmpty() == false) { ItemType* pItem = m_pBack; while (pItem != VMA_NULL) { ItemType* const pPrevItem = pItem->pPrev; m_ItemAllocator.Free(pItem); pItem = pPrevItem; } m_pFront = VMA_NULL; m_pBack = VMA_NULL; m_Count = 0; } } template void VmaRawList::Remove(ItemType* pItem) { VMA_HEAVY_ASSERT(pItem != VMA_NULL); VMA_HEAVY_ASSERT(m_Count > 0); if(pItem->pPrev != VMA_NULL) { pItem->pPrev->pNext = pItem->pNext; } else { VMA_HEAVY_ASSERT(m_pFront == pItem); m_pFront = pItem->pNext; } if(pItem->pNext != VMA_NULL) { pItem->pNext->pPrev = pItem->pPrev; } else { VMA_HEAVY_ASSERT(m_pBack == pItem); m_pBack = pItem->pPrev; } m_ItemAllocator.Free(pItem); --m_Count; } template VmaListItem* VmaRawList::InsertBefore(ItemType* pItem) { if(pItem != VMA_NULL) { ItemType* const prevItem = pItem->pPrev; ItemType* const newItem = m_ItemAllocator.Alloc(); newItem->pPrev = prevItem; newItem->pNext = pItem; pItem->pPrev = newItem; if(prevItem != VMA_NULL) { prevItem->pNext = newItem; } else { VMA_HEAVY_ASSERT(m_pFront == pItem); m_pFront = newItem; } ++m_Count; return newItem; } else return PushBack(); } template VmaListItem* VmaRawList::InsertAfter(ItemType* pItem) { if(pItem != VMA_NULL) { ItemType* const nextItem = pItem->pNext; ItemType* const newItem = m_ItemAllocator.Alloc(); newItem->pNext = nextItem; newItem->pPrev = pItem; pItem->pNext = newItem; if(nextItem != VMA_NULL) { nextItem->pPrev = newItem; } else { VMA_HEAVY_ASSERT(m_pBack == pItem); m_pBack = newItem; } ++m_Count; return newItem; } else return PushFront(); } template VmaListItem* VmaRawList::InsertBefore(ItemType* pItem, const T& value) { ItemType* const newItem = InsertBefore(pItem); newItem->Value = value; return newItem; } template VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value) { ItemType* const newItem = InsertAfter(pItem); newItem->Value = value; return newItem; } #endif // _VMA_RAW_LIST_FUNCTIONS #endif // _VMA_RAW_LIST #ifndef _VMA_LIST template class VmaList { VMA_CLASS_NO_COPY_NO_MOVE(VmaList) public: class reverse_iterator; class const_iterator; class const_reverse_iterator; class iterator { friend class const_iterator; friend class VmaList; public: iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } iterator operator++(int) { iterator result = *this; ++*this; return result; } iterator operator--(int) { iterator result = *this; --*this; return result; } iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } iterator& operator--(); private: VmaRawList* m_pList; VmaListItem* m_pItem; iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} }; class reverse_iterator { friend class const_reverse_iterator; friend class VmaList; public: reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; } reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; } reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } reverse_iterator& operator--(); private: VmaRawList* m_pList; VmaListItem* m_pItem; reverse_iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} }; class const_iterator { friend class VmaList; public: const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; } const_iterator operator--(int) { const_iterator result = *this; --* this; return result; } const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } const_iterator& operator--(); private: const VmaRawList* m_pList; const VmaListItem* m_pItem; const_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} }; class const_reverse_iterator { friend class VmaList; public: const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} reverse_iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; } const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; } const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } const_reverse_iterator& operator--(); private: const VmaRawList* m_pList; const VmaListItem* m_pItem; const_reverse_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} }; VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {} bool empty() const { return m_RawList.IsEmpty(); } size_t size() const { return m_RawList.GetCount(); } iterator begin() { return iterator(&m_RawList, m_RawList.Front()); } iterator end() { return iterator(&m_RawList, VMA_NULL); } const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); } const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); } const_iterator begin() const { return cbegin(); } const_iterator end() const { return cend(); } reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); } reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); } const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); } const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); } const_reverse_iterator rbegin() const { return crbegin(); } const_reverse_iterator rend() const { return crend(); } void push_back(const T& value) { m_RawList.PushBack(value); } iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } void clear() { m_RawList.Clear(); } void erase(iterator it) { m_RawList.Remove(it.m_pItem); } private: VmaRawList m_RawList; }; #ifndef _VMA_LIST_FUNCTIONS template typename VmaList::iterator& VmaList::iterator::operator--() { if (m_pItem != VMA_NULL) { m_pItem = m_pItem->pPrev; } else { VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); m_pItem = m_pList->Back(); } return *this; } template typename VmaList::reverse_iterator& VmaList::reverse_iterator::operator--() { if (m_pItem != VMA_NULL) { m_pItem = m_pItem->pNext; } else { VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); m_pItem = m_pList->Front(); } return *this; } template typename VmaList::const_iterator& VmaList::const_iterator::operator--() { if (m_pItem != VMA_NULL) { m_pItem = m_pItem->pPrev; } else { VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); m_pItem = m_pList->Back(); } return *this; } template typename VmaList::const_reverse_iterator& VmaList::const_reverse_iterator::operator--() { if (m_pItem != VMA_NULL) { m_pItem = m_pItem->pNext; } else { VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); m_pItem = m_pList->Back(); } return *this; } #endif // _VMA_LIST_FUNCTIONS #endif // _VMA_LIST #ifndef _VMA_INTRUSIVE_LINKED_LIST /* Expected interface of ItemTypeTraits: struct MyItemTypeTraits { typedef MyItem ItemType; static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; } static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; } static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; } static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; } }; */ template class VmaIntrusiveLinkedList { public: typedef typename ItemTypeTraits::ItemType ItemType; static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); } static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); } // Movable, not copyable. VmaIntrusiveLinkedList() = default; VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src); VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete; VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src); VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete; ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); } size_t GetCount() const { return m_Count; } bool IsEmpty() const { return m_Count == 0; } ItemType* Front() { return m_Front; } ItemType* Back() { return m_Back; } const ItemType* Front() const { return m_Front; } const ItemType* Back() const { return m_Back; } void PushBack(ItemType* item); void PushFront(ItemType* item); ItemType* PopBack(); ItemType* PopFront(); // MyItem can be null - it means PushBack. void InsertBefore(ItemType* existingItem, ItemType* newItem); // MyItem can be null - it means PushFront. void InsertAfter(ItemType* existingItem, ItemType* newItem); void Remove(ItemType* item); void RemoveAll(); private: ItemType* m_Front = VMA_NULL; ItemType* m_Back = VMA_NULL; size_t m_Count = 0; }; #ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS template VmaIntrusiveLinkedList::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src) : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count) { src.m_Front = src.m_Back = VMA_NULL; src.m_Count = 0; } template VmaIntrusiveLinkedList& VmaIntrusiveLinkedList::operator=(VmaIntrusiveLinkedList&& src) { if (&src != this) { VMA_HEAVY_ASSERT(IsEmpty()); m_Front = src.m_Front; m_Back = src.m_Back; m_Count = src.m_Count; src.m_Front = src.m_Back = VMA_NULL; src.m_Count = 0; } return *this; } template void VmaIntrusiveLinkedList::PushBack(ItemType* item) { VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); if (IsEmpty()) { m_Front = item; m_Back = item; m_Count = 1; } else { ItemTypeTraits::AccessPrev(item) = m_Back; ItemTypeTraits::AccessNext(m_Back) = item; m_Back = item; ++m_Count; } } template void VmaIntrusiveLinkedList::PushFront(ItemType* item) { VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); if (IsEmpty()) { m_Front = item; m_Back = item; m_Count = 1; } else { ItemTypeTraits::AccessNext(item) = m_Front; ItemTypeTraits::AccessPrev(m_Front) = item; m_Front = item; ++m_Count; } } template typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopBack() { VMA_HEAVY_ASSERT(m_Count > 0); ItemType* const backItem = m_Back; ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem); if (prevItem != VMA_NULL) { ItemTypeTraits::AccessNext(prevItem) = VMA_NULL; } m_Back = prevItem; --m_Count; ItemTypeTraits::AccessPrev(backItem) = VMA_NULL; ItemTypeTraits::AccessNext(backItem) = VMA_NULL; return backItem; } template typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopFront() { VMA_HEAVY_ASSERT(m_Count > 0); ItemType* const frontItem = m_Front; ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem); if (nextItem != VMA_NULL) { ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL; } m_Front = nextItem; --m_Count; ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL; ItemTypeTraits::AccessNext(frontItem) = VMA_NULL; return frontItem; } template void VmaIntrusiveLinkedList::InsertBefore(ItemType* existingItem, ItemType* newItem) { VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); if (existingItem != VMA_NULL) { ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem); ItemTypeTraits::AccessPrev(newItem) = prevItem; ItemTypeTraits::AccessNext(newItem) = existingItem; ItemTypeTraits::AccessPrev(existingItem) = newItem; if (prevItem != VMA_NULL) { ItemTypeTraits::AccessNext(prevItem) = newItem; } else { VMA_HEAVY_ASSERT(m_Front == existingItem); m_Front = newItem; } ++m_Count; } else PushBack(newItem); } template void VmaIntrusiveLinkedList::InsertAfter(ItemType* existingItem, ItemType* newItem) { VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); if (existingItem != VMA_NULL) { ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem); ItemTypeTraits::AccessNext(newItem) = nextItem; ItemTypeTraits::AccessPrev(newItem) = existingItem; ItemTypeTraits::AccessNext(existingItem) = newItem; if (nextItem != VMA_NULL) { ItemTypeTraits::AccessPrev(nextItem) = newItem; } else { VMA_HEAVY_ASSERT(m_Back == existingItem); m_Back = newItem; } ++m_Count; } else return PushFront(newItem); } template void VmaIntrusiveLinkedList::Remove(ItemType* item) { VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0); if (ItemTypeTraits::GetPrev(item) != VMA_NULL) { ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item); } else { VMA_HEAVY_ASSERT(m_Front == item); m_Front = ItemTypeTraits::GetNext(item); } if (ItemTypeTraits::GetNext(item) != VMA_NULL) { ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item); } else { VMA_HEAVY_ASSERT(m_Back == item); m_Back = ItemTypeTraits::GetPrev(item); } ItemTypeTraits::AccessPrev(item) = VMA_NULL; ItemTypeTraits::AccessNext(item) = VMA_NULL; --m_Count; } template void VmaIntrusiveLinkedList::RemoveAll() { if (!IsEmpty()) { ItemType* item = m_Back; while (item != VMA_NULL) { ItemType* const prevItem = ItemTypeTraits::AccessPrev(item); ItemTypeTraits::AccessPrev(item) = VMA_NULL; ItemTypeTraits::AccessNext(item) = VMA_NULL; item = prevItem; } m_Front = VMA_NULL; m_Back = VMA_NULL; m_Count = 0; } } #endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS #endif // _VMA_INTRUSIVE_LINKED_LIST #if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED class VmaStringBuilder { public: VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator(allocationCallbacks)) {} ~VmaStringBuilder() = default; size_t GetLength() const { return m_Data.size(); } const char* GetData() const { return m_Data.data(); } void AddNewLine() { Add('\n'); } void Add(char ch) { m_Data.push_back(ch); } void Add(const char* pStr); void AddNumber(uint32_t num); void AddNumber(uint64_t num); void AddPointer(const void* ptr); private: VmaVector> m_Data; }; #ifndef _VMA_STRING_BUILDER_FUNCTIONS void VmaStringBuilder::Add(const char* pStr) { const size_t strLen = strlen(pStr); if (strLen > 0) { const size_t oldCount = m_Data.size(); m_Data.resize(oldCount + strLen); memcpy(m_Data.data() + oldCount, pStr, strLen); } } void VmaStringBuilder::AddNumber(uint32_t num) { char buf[11]; buf[10] = '\0'; char* p = &buf[10]; do { *--p = '0' + (char)(num % 10); num /= 10; } while (num); Add(p); } void VmaStringBuilder::AddNumber(uint64_t num) { char buf[21]; buf[20] = '\0'; char* p = &buf[20]; do { *--p = '0' + (char)(num % 10); num /= 10; } while (num); Add(p); } void VmaStringBuilder::AddPointer(const void* ptr) { char buf[21]; VmaPtrToStr(buf, sizeof(buf), ptr); Add(buf); } #endif //_VMA_STRING_BUILDER_FUNCTIONS #endif // _VMA_STRING_BUILDER #if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED /* Allows to conveniently build a correct JSON document to be written to the VmaStringBuilder passed to the constructor. */ class VmaJsonWriter { VMA_CLASS_NO_COPY_NO_MOVE(VmaJsonWriter) public: // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object. VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb); ~VmaJsonWriter(); // Begins object by writing "{". // Inside an object, you must call pairs of WriteString and a value, e.g.: // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject(); // Will write: { "A": 1, "B": 2 } void BeginObject(bool singleLine = false); // Ends object by writing "}". void EndObject(); // Begins array by writing "[". // Inside an array, you can write a sequence of any values. void BeginArray(bool singleLine = false); // Ends array by writing "[". void EndArray(); // Writes a string value inside "". // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped. void WriteString(const char* pStr); // Begins writing a string value. // Call BeginString, ContinueString, ContinueString, ..., EndString instead of // WriteString to conveniently build the string content incrementally, made of // parts including numbers. void BeginString(const char* pStr = VMA_NULL); // Posts next part of an open string. void ContinueString(const char* pStr); // Posts next part of an open string. The number is converted to decimal characters. void ContinueString(uint32_t n); void ContinueString(uint64_t n); // Posts next part of an open string. Pointer value is converted to characters // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00 void ContinueString_Pointer(const void* ptr); // Ends writing a string value by writing '"'. void EndString(const char* pStr = VMA_NULL); // Writes a number value. void WriteNumber(uint32_t n); void WriteNumber(uint64_t n); // Writes a boolean value - false or true. void WriteBool(bool b); // Writes a null value. void WriteNull(); private: enum COLLECTION_TYPE { COLLECTION_TYPE_OBJECT, COLLECTION_TYPE_ARRAY, }; struct StackItem { COLLECTION_TYPE type; uint32_t valueCount; bool singleLineMode; }; static const char* const INDENT; VmaStringBuilder& m_SB; VmaVector< StackItem, VmaStlAllocator > m_Stack; bool m_InsideString; void BeginValue(bool isString); void WriteIndent(bool oneLess = false); }; const char* const VmaJsonWriter::INDENT = " "; #ifndef _VMA_JSON_WRITER_FUNCTIONS VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) : m_SB(sb), m_Stack(VmaStlAllocator(pAllocationCallbacks)), m_InsideString(false) {} VmaJsonWriter::~VmaJsonWriter() { VMA_ASSERT(!m_InsideString); VMA_ASSERT(m_Stack.empty()); } void VmaJsonWriter::BeginObject(bool singleLine) { VMA_ASSERT(!m_InsideString); BeginValue(false); m_SB.Add('{'); StackItem item; item.type = COLLECTION_TYPE_OBJECT; item.valueCount = 0; item.singleLineMode = singleLine; m_Stack.push_back(item); } void VmaJsonWriter::EndObject() { VMA_ASSERT(!m_InsideString); WriteIndent(true); m_SB.Add('}'); VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT); m_Stack.pop_back(); } void VmaJsonWriter::BeginArray(bool singleLine) { VMA_ASSERT(!m_InsideString); BeginValue(false); m_SB.Add('['); StackItem item; item.type = COLLECTION_TYPE_ARRAY; item.valueCount = 0; item.singleLineMode = singleLine; m_Stack.push_back(item); } void VmaJsonWriter::EndArray() { VMA_ASSERT(!m_InsideString); WriteIndent(true); m_SB.Add(']'); VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY); m_Stack.pop_back(); } void VmaJsonWriter::WriteString(const char* pStr) { BeginString(pStr); EndString(); } void VmaJsonWriter::BeginString(const char* pStr) { VMA_ASSERT(!m_InsideString); BeginValue(true); m_SB.Add('"'); m_InsideString = true; if (pStr != VMA_NULL && pStr[0] != '\0') { ContinueString(pStr); } } void VmaJsonWriter::ContinueString(const char* pStr) { VMA_ASSERT(m_InsideString); const size_t strLen = strlen(pStr); for (size_t i = 0; i < strLen; ++i) { char ch = pStr[i]; if (ch == '\\') { m_SB.Add("\\\\"); } else if (ch == '"') { m_SB.Add("\\\""); } else if ((uint8_t)ch >= 32) { m_SB.Add(ch); } else switch (ch) { case '\b': m_SB.Add("\\b"); break; case '\f': m_SB.Add("\\f"); break; case '\n': m_SB.Add("\\n"); break; case '\r': m_SB.Add("\\r"); break; case '\t': m_SB.Add("\\t"); break; default: VMA_ASSERT(0 && "Character not currently supported."); } } } void VmaJsonWriter::ContinueString(uint32_t n) { VMA_ASSERT(m_InsideString); m_SB.AddNumber(n); } void VmaJsonWriter::ContinueString(uint64_t n) { VMA_ASSERT(m_InsideString); m_SB.AddNumber(n); } void VmaJsonWriter::ContinueString_Pointer(const void* ptr) { VMA_ASSERT(m_InsideString); m_SB.AddPointer(ptr); } void VmaJsonWriter::EndString(const char* pStr) { VMA_ASSERT(m_InsideString); if (pStr != VMA_NULL && pStr[0] != '\0') { ContinueString(pStr); } m_SB.Add('"'); m_InsideString = false; } void VmaJsonWriter::WriteNumber(uint32_t n) { VMA_ASSERT(!m_InsideString); BeginValue(false); m_SB.AddNumber(n); } void VmaJsonWriter::WriteNumber(uint64_t n) { VMA_ASSERT(!m_InsideString); BeginValue(false); m_SB.AddNumber(n); } void VmaJsonWriter::WriteBool(bool b) { VMA_ASSERT(!m_InsideString); BeginValue(false); m_SB.Add(b ? "true" : "false"); } void VmaJsonWriter::WriteNull() { VMA_ASSERT(!m_InsideString); BeginValue(false); m_SB.Add("null"); } void VmaJsonWriter::BeginValue(bool isString) { if (!m_Stack.empty()) { StackItem& currItem = m_Stack.back(); if (currItem.type == COLLECTION_TYPE_OBJECT && currItem.valueCount % 2 == 0) { VMA_ASSERT(isString); } if (currItem.type == COLLECTION_TYPE_OBJECT && currItem.valueCount % 2 != 0) { m_SB.Add(": "); } else if (currItem.valueCount > 0) { m_SB.Add(", "); WriteIndent(); } else { WriteIndent(); } ++currItem.valueCount; } } void VmaJsonWriter::WriteIndent(bool oneLess) { if (!m_Stack.empty() && !m_Stack.back().singleLineMode) { m_SB.AddNewLine(); size_t count = m_Stack.size(); if (count > 0 && oneLess) { --count; } for (size_t i = 0; i < count; ++i) { m_SB.Add(INDENT); } } } #endif // _VMA_JSON_WRITER_FUNCTIONS static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat) { json.BeginObject(); json.WriteString("BlockCount"); json.WriteNumber(stat.statistics.blockCount); json.WriteString("BlockBytes"); json.WriteNumber(stat.statistics.blockBytes); json.WriteString("AllocationCount"); json.WriteNumber(stat.statistics.allocationCount); json.WriteString("AllocationBytes"); json.WriteNumber(stat.statistics.allocationBytes); json.WriteString("UnusedRangeCount"); json.WriteNumber(stat.unusedRangeCount); if (stat.statistics.allocationCount > 1) { json.WriteString("AllocationSizeMin"); json.WriteNumber(stat.allocationSizeMin); json.WriteString("AllocationSizeMax"); json.WriteNumber(stat.allocationSizeMax); } if (stat.unusedRangeCount > 1) { json.WriteString("UnusedRangeSizeMin"); json.WriteNumber(stat.unusedRangeSizeMin); json.WriteString("UnusedRangeSizeMax"); json.WriteNumber(stat.unusedRangeSizeMax); } json.EndObject(); } #endif // _VMA_JSON_WRITER #ifndef _VMA_MAPPING_HYSTERESIS class VmaMappingHysteresis { VMA_CLASS_NO_COPY_NO_MOVE(VmaMappingHysteresis) public: VmaMappingHysteresis() = default; uint32_t GetExtraMapping() const { return m_ExtraMapping; } // Call when Map was called. // Returns true if switched to extra +1 mapping reference count. bool PostMap() { #if VMA_MAPPING_HYSTERESIS_ENABLED if(m_ExtraMapping == 0) { ++m_MajorCounter; if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING) { m_ExtraMapping = 1; m_MajorCounter = 0; m_MinorCounter = 0; return true; } } else // m_ExtraMapping == 1 PostMinorCounter(); #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED return false; } // Call when Unmap was called. void PostUnmap() { #if VMA_MAPPING_HYSTERESIS_ENABLED if(m_ExtraMapping == 0) ++m_MajorCounter; else // m_ExtraMapping == 1 PostMinorCounter(); #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED } // Call when allocation was made from the memory block. void PostAlloc() { #if VMA_MAPPING_HYSTERESIS_ENABLED if(m_ExtraMapping == 1) ++m_MajorCounter; else // m_ExtraMapping == 0 PostMinorCounter(); #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED } // Call when allocation was freed from the memory block. // Returns true if switched to extra -1 mapping reference count. bool PostFree() { #if VMA_MAPPING_HYSTERESIS_ENABLED if(m_ExtraMapping == 1) { ++m_MajorCounter; if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING && m_MajorCounter > m_MinorCounter + 1) { m_ExtraMapping = 0; m_MajorCounter = 0; m_MinorCounter = 0; return true; } } else // m_ExtraMapping == 0 PostMinorCounter(); #endif // #if VMA_MAPPING_HYSTERESIS_ENABLED return false; } private: static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7; uint32_t m_MinorCounter = 0; uint32_t m_MajorCounter = 0; uint32_t m_ExtraMapping = 0; // 0 or 1. void PostMinorCounter() { if(m_MinorCounter < m_MajorCounter) { ++m_MinorCounter; } else if(m_MajorCounter > 0) { --m_MajorCounter; --m_MinorCounter; } } }; #endif // _VMA_MAPPING_HYSTERESIS #if VMA_EXTERNAL_MEMORY_WIN32 class VmaWin32Handle { public: VmaWin32Handle() noexcept : m_hHandle(VMA_NULL) { } explicit VmaWin32Handle(HANDLE hHandle) noexcept : m_hHandle(hHandle) { } ~VmaWin32Handle() noexcept { if (m_hHandle != VMA_NULL) { ::CloseHandle(m_hHandle); } } VMA_CLASS_NO_COPY_NO_MOVE(VmaWin32Handle) public: // Strengthened VkResult GetHandle(VkDevice device, VkDeviceMemory memory, PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, HANDLE hTargetProcess, bool useMutex, HANDLE* pHandle) noexcept { *pHandle = VMA_NULL; // Try to get handle first. if (m_hHandle != VMA_NULL) { *pHandle = Duplicate(hTargetProcess); return VK_SUCCESS; } VkResult res = VK_SUCCESS; // If failed, try to create it. { VmaMutexLockWrite lock(m_Mutex, useMutex); if (m_hHandle == VMA_NULL) { res = Create(device, memory, pvkGetMemoryWin32HandleKHR, &m_hHandle); } } *pHandle = Duplicate(hTargetProcess); return res; } operator bool() const noexcept { return m_hHandle != VMA_NULL; } private: // Not atomic static VkResult Create(VkDevice device, VkDeviceMemory memory, PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, HANDLE* pHandle) noexcept { VkResult res = VK_ERROR_FEATURE_NOT_PRESENT; if (pvkGetMemoryWin32HandleKHR != VMA_NULL) { VkMemoryGetWin32HandleInfoKHR handleInfo{ }; handleInfo.sType = VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR; handleInfo.memory = memory; handleInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR; res = pvkGetMemoryWin32HandleKHR(device, &handleInfo, pHandle); } return res; } HANDLE Duplicate(HANDLE hTargetProcess = VMA_NULL) const noexcept { if (!m_hHandle) return m_hHandle; HANDLE hCurrentProcess = ::GetCurrentProcess(); HANDLE hDupHandle = VMA_NULL; if (!::DuplicateHandle(hCurrentProcess, m_hHandle, hTargetProcess ? hTargetProcess : hCurrentProcess, &hDupHandle, 0, FALSE, DUPLICATE_SAME_ACCESS)) { VMA_ASSERT(0 && "Failed to duplicate handle."); } return hDupHandle; } private: HANDLE m_hHandle; VMA_RW_MUTEX m_Mutex; // Protects access m_Handle }; #else class VmaWin32Handle { // ABI compatibility void* placeholder = VMA_NULL; VMA_RW_MUTEX placeholder2; }; #endif // VMA_EXTERNAL_MEMORY_WIN32 #ifndef _VMA_DEVICE_MEMORY_BLOCK /* Represents a single block of device memory (`VkDeviceMemory`) with all the data about its regions (aka suballocations, #VmaAllocation), assigned and free. Thread-safety: - Access to m_pMetadata must be externally synchronized. - Map, Unmap, Bind* are synchronized internally. */ class VmaDeviceMemoryBlock { VMA_CLASS_NO_COPY_NO_MOVE(VmaDeviceMemoryBlock) public: VmaBlockMetadata* m_pMetadata; VmaDeviceMemoryBlock(VmaAllocator hAllocator); ~VmaDeviceMemoryBlock(); // Always call after construction. void Init( VmaAllocator hAllocator, VmaPool hParentPool, uint32_t newMemoryTypeIndex, VkDeviceMemory newMemory, VkDeviceSize newSize, uint32_t id, uint32_t algorithm, VkDeviceSize bufferImageGranularity); // Always call before destruction. void Destroy(VmaAllocator allocator); VmaPool GetParentPool() const { return m_hParentPool; } VkDeviceMemory GetDeviceMemory() const { return m_hMemory; } uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } uint32_t GetId() const { return m_Id; } void* GetMappedData() const { return m_pMappedData; } uint32_t GetMapRefCount() const { return m_MapCount; } // Call when allocation/free was made from m_pMetadata. // Used for m_MappingHysteresis. void PostAlloc(VmaAllocator hAllocator); void PostFree(VmaAllocator hAllocator); // Validates all data structures inside this object. If not valid, returns false. bool Validate() const; VkResult CheckCorruption(VmaAllocator hAllocator); // ppData can be null. VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData); void Unmap(VmaAllocator hAllocator, uint32_t count); VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); VkResult BindBufferMemory( const VmaAllocator hAllocator, const VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkBuffer hBuffer, const void* pNext); VkResult BindImageMemory( const VmaAllocator hAllocator, const VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkImage hImage, const void* pNext); #if VMA_EXTERNAL_MEMORY_WIN32 VkResult CreateWin32Handle( const VmaAllocator hAllocator, PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, HANDLE hTargetProcess, HANDLE* pHandle)noexcept; #endif // VMA_EXTERNAL_MEMORY_WIN32 private: VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. uint32_t m_MemoryTypeIndex; uint32_t m_Id; VkDeviceMemory m_hMemory; /* Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory. Also protects m_MapCount, m_pMappedData. Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. */ VMA_MUTEX m_MapAndBindMutex; VmaMappingHysteresis m_MappingHysteresis; uint32_t m_MapCount; void* m_pMappedData; VmaWin32Handle m_Handle; }; #endif // _VMA_DEVICE_MEMORY_BLOCK #ifndef _VMA_ALLOCATION_T struct VmaAllocationExtraData { void* m_pMappedData = VMA_NULL; // Not null means memory is mapped. VmaWin32Handle m_Handle; }; struct VmaAllocation_T { friend struct VmaDedicatedAllocationListItemTraits; enum FLAGS { FLAG_PERSISTENT_MAP = 0x01, FLAG_MAPPING_ALLOWED = 0x02, }; public: enum ALLOCATION_TYPE { ALLOCATION_TYPE_NONE, ALLOCATION_TYPE_BLOCK, ALLOCATION_TYPE_DEDICATED, }; // This struct is allocated using VmaPoolAllocator. VmaAllocation_T(bool mappingAllowed); ~VmaAllocation_T(); void InitBlockAllocation( VmaDeviceMemoryBlock* block, VmaAllocHandle allocHandle, VkDeviceSize alignment, VkDeviceSize size, uint32_t memoryTypeIndex, VmaSuballocationType suballocationType, bool mapped); // pMappedData not null means allocation is created with MAPPED flag. void InitDedicatedAllocation( VmaAllocator allocator, VmaPool hParentPool, uint32_t memoryTypeIndex, VkDeviceMemory hMemory, VmaSuballocationType suballocationType, void* pMappedData, VkDeviceSize size); void Destroy(VmaAllocator allocator); ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; } VkDeviceSize GetAlignment() const { return m_Alignment; } VkDeviceSize GetSize() const { return m_Size; } void* GetUserData() const { return m_pUserData; } const char* GetName() const { return m_pName; } VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; } uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; } bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; } void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; } void SetName(VmaAllocator hAllocator, const char* pName); void FreeName(VmaAllocator hAllocator); uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation); VmaAllocHandle GetAllocHandle() const; VkDeviceSize GetOffset() const; VmaPool GetParentPool() const; VkDeviceMemory GetMemory() const; void* GetMappedData() const; void BlockAllocMap(); void BlockAllocUnmap(); VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); void DedicatedAllocUnmap(VmaAllocator hAllocator); #if VMA_STATS_STRING_ENABLED VmaBufferImageUsage GetBufferImageUsage() const { return m_BufferImageUsage; } void InitBufferUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5) { VMA_ASSERT(m_BufferImageUsage == VmaBufferImageUsage::UNKNOWN); m_BufferImageUsage = VmaBufferImageUsage(createInfo, useKhrMaintenance5); } void InitImageUsage(const VkImageCreateInfo &createInfo) { VMA_ASSERT(m_BufferImageUsage == VmaBufferImageUsage::UNKNOWN); m_BufferImageUsage = VmaBufferImageUsage(createInfo); } void PrintParameters(class VmaJsonWriter& json) const; #endif #if VMA_EXTERNAL_MEMORY_WIN32 VkResult GetWin32Handle(VmaAllocator hAllocator, HANDLE hTargetProcess, HANDLE* hHandle) noexcept; #endif // VMA_EXTERNAL_MEMORY_WIN32 private: // Allocation out of VmaDeviceMemoryBlock. struct BlockAllocation { VmaDeviceMemoryBlock* m_Block; VmaAllocHandle m_AllocHandle; }; // Allocation for an object that has its own private VkDeviceMemory. struct DedicatedAllocation { VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. VkDeviceMemory m_hMemory; VmaAllocationExtraData* m_ExtraData; VmaAllocation_T* m_Prev; VmaAllocation_T* m_Next; }; union { // Allocation out of VmaDeviceMemoryBlock. BlockAllocation m_BlockAllocation; // Allocation for an object that has its own private VkDeviceMemory. DedicatedAllocation m_DedicatedAllocation; }; VkDeviceSize m_Alignment; VkDeviceSize m_Size; void* m_pUserData; char* m_pName; uint32_t m_MemoryTypeIndex; uint8_t m_Type; // ALLOCATION_TYPE uint8_t m_SuballocationType; // VmaSuballocationType // Reference counter for vmaMapMemory()/vmaUnmapMemory(). uint8_t m_MapCount; uint8_t m_Flags; // enum FLAGS #if VMA_STATS_STRING_ENABLED VmaBufferImageUsage m_BufferImageUsage; // 0 if unknown. #endif void EnsureExtraData(VmaAllocator hAllocator); }; #endif // _VMA_ALLOCATION_T #ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS struct VmaDedicatedAllocationListItemTraits { typedef VmaAllocation_T ItemType; static ItemType* GetPrev(const ItemType* item) { VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); return item->m_DedicatedAllocation.m_Prev; } static ItemType* GetNext(const ItemType* item) { VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); return item->m_DedicatedAllocation.m_Next; } static ItemType*& AccessPrev(ItemType* item) { VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); return item->m_DedicatedAllocation.m_Prev; } static ItemType*& AccessNext(ItemType* item) { VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); return item->m_DedicatedAllocation.m_Next; } }; #endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS #ifndef _VMA_DEDICATED_ALLOCATION_LIST /* Stores linked list of VmaAllocation_T objects. Thread-safe, synchronized internally. */ class VmaDedicatedAllocationList { VMA_CLASS_NO_COPY_NO_MOVE(VmaDedicatedAllocationList) public: VmaDedicatedAllocationList() {} ~VmaDedicatedAllocationList(); void Init(bool useMutex) { m_UseMutex = useMutex; } bool Validate(); void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); void AddStatistics(VmaStatistics& inoutStats); #if VMA_STATS_STRING_ENABLED // Writes JSON array with the list of allocations. void BuildStatsString(VmaJsonWriter& json); #endif bool IsEmpty(); void Register(VmaAllocation alloc); void Unregister(VmaAllocation alloc); private: typedef VmaIntrusiveLinkedList DedicatedAllocationLinkedList; bool m_UseMutex = true; VMA_RW_MUTEX m_Mutex; DedicatedAllocationLinkedList m_AllocationList; }; #ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS VmaDedicatedAllocationList::~VmaDedicatedAllocationList() { VMA_HEAVY_ASSERT(Validate()); if (!m_AllocationList.IsEmpty()) { VMA_ASSERT_LEAK(false && "Unfreed dedicated allocations found!"); } } bool VmaDedicatedAllocationList::Validate() { const size_t declaredCount = m_AllocationList.GetCount(); size_t actualCount = 0; VmaMutexLockRead lock(m_Mutex, m_UseMutex); for (VmaAllocation alloc = m_AllocationList.Front(); alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) { ++actualCount; } VMA_VALIDATE(actualCount == declaredCount); return true; } void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) { for(auto* item = m_AllocationList.Front(); item != VMA_NULL; item = DedicatedAllocationLinkedList::GetNext(item)) { const VkDeviceSize size = item->GetSize(); inoutStats.statistics.blockCount++; inoutStats.statistics.blockBytes += size; VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize()); } } void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats) { VmaMutexLockRead lock(m_Mutex, m_UseMutex); const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount(); inoutStats.blockCount += allocCount; inoutStats.allocationCount += allocCount; for(auto* item = m_AllocationList.Front(); item != VMA_NULL; item = DedicatedAllocationLinkedList::GetNext(item)) { const VkDeviceSize size = item->GetSize(); inoutStats.blockBytes += size; inoutStats.allocationBytes += size; } } #if VMA_STATS_STRING_ENABLED void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json) { VmaMutexLockRead lock(m_Mutex, m_UseMutex); json.BeginArray(); for (VmaAllocation alloc = m_AllocationList.Front(); alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) { json.BeginObject(true); alloc->PrintParameters(json); json.EndObject(); } json.EndArray(); } #endif // VMA_STATS_STRING_ENABLED bool VmaDedicatedAllocationList::IsEmpty() { VmaMutexLockRead lock(m_Mutex, m_UseMutex); return m_AllocationList.IsEmpty(); } void VmaDedicatedAllocationList::Register(VmaAllocation alloc) { VmaMutexLockWrite lock(m_Mutex, m_UseMutex); m_AllocationList.PushBack(alloc); } void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc) { VmaMutexLockWrite lock(m_Mutex, m_UseMutex); m_AllocationList.Remove(alloc); } #endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS #endif // _VMA_DEDICATED_ALLOCATION_LIST #ifndef _VMA_SUBALLOCATION /* Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as allocated memory block or free. */ struct VmaSuballocation { VkDeviceSize offset; VkDeviceSize size; void* userData; VmaSuballocationType type; }; // Comparator for offsets. struct VmaSuballocationOffsetLess { bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const { return lhs.offset < rhs.offset; } }; struct VmaSuballocationOffsetGreater { bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const { return lhs.offset > rhs.offset; } }; struct VmaSuballocationItemSizeLess { bool operator()(const VmaSuballocationList::iterator lhs, const VmaSuballocationList::iterator rhs) const { return lhs->size < rhs->size; } bool operator()(const VmaSuballocationList::iterator lhs, VkDeviceSize rhsSize) const { return lhs->size < rhsSize; } }; #endif // _VMA_SUBALLOCATION #ifndef _VMA_ALLOCATION_REQUEST /* Parameters of planned allocation inside a VmaDeviceMemoryBlock. item points to a FREE suballocation. */ struct VmaAllocationRequest { VmaAllocHandle allocHandle; VkDeviceSize size; VmaSuballocationList::iterator item; void* customData; uint64_t algorithmData; VmaAllocationRequestType type; }; #endif // _VMA_ALLOCATION_REQUEST #ifndef _VMA_BLOCK_METADATA /* Data structure used for bookkeeping of allocations and unused ranges of memory in a single VkDeviceMemory block. */ class VmaBlockMetadata { VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata) public: // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object. VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize bufferImageGranularity, bool isVirtual); virtual ~VmaBlockMetadata() = default; virtual void Init(VkDeviceSize size) { m_Size = size; } bool IsVirtual() const { return m_IsVirtual; } VkDeviceSize GetSize() const { return m_Size; } // Validates all data structures inside this object. If not valid, returns false. virtual bool Validate() const = 0; virtual size_t GetAllocationCount() const = 0; virtual size_t GetFreeRegionsCount() const = 0; virtual VkDeviceSize GetSumFreeSize() const = 0; // Returns true if this block is empty - contains only single free suballocation. virtual bool IsEmpty() const = 0; virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0; virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0; virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0; virtual VmaAllocHandle GetAllocationListBegin() const = 0; virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0; virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0; // Shouldn't modify blockCount. virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0; virtual void AddStatistics(VmaStatistics& inoutStats) const = 0; #if VMA_STATS_STRING_ENABLED virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; #endif // Tries to find a place for suballocation with given parameters inside this block. // If succeeded, fills pAllocationRequest and returns true. // If failed, returns false. virtual bool CreateAllocationRequest( VkDeviceSize allocSize, VkDeviceSize allocAlignment, bool upperAddress, VmaSuballocationType allocType, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. uint32_t strategy, VmaAllocationRequest* pAllocationRequest) = 0; virtual VkResult CheckCorruption(const void* pBlockData) = 0; // Makes actual allocation based on request. Request must already be checked and valid. virtual void Alloc( const VmaAllocationRequest& request, VmaSuballocationType type, void* userData) = 0; // Frees suballocation assigned to given memory region. virtual void Free(VmaAllocHandle allocHandle) = 0; // Frees all allocations. // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations! virtual void Clear() = 0; virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0; virtual void DebugLogAllAllocations() const = 0; protected: const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } VkDeviceSize GetDebugMargin() const { return VkDeviceSize(IsVirtual() ? 0 : VMA_DEBUG_MARGIN); } void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const; #if VMA_STATS_STRING_ENABLED // mapRefCount == UINT32_MAX means unspecified. void PrintDetailedMap_Begin(class VmaJsonWriter& json, VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const; void PrintDetailedMap_Allocation(class VmaJsonWriter& json, VkDeviceSize offset, VkDeviceSize size, void* userData) const; void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, VkDeviceSize offset, VkDeviceSize size) const; void PrintDetailedMap_End(class VmaJsonWriter& json) const; #endif private: VkDeviceSize m_Size; const VkAllocationCallbacks* m_pAllocationCallbacks; const VkDeviceSize m_BufferImageGranularity; const bool m_IsVirtual; }; #ifndef _VMA_BLOCK_METADATA_FUNCTIONS VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize bufferImageGranularity, bool isVirtual) : m_Size(0), m_pAllocationCallbacks(pAllocationCallbacks), m_BufferImageGranularity(bufferImageGranularity), m_IsVirtual(isVirtual) {} void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const { if (IsVirtual()) { VMA_LEAK_LOG_FORMAT("UNFREED VIRTUAL ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p", offset, size, userData); } else { VMA_ASSERT(userData != VMA_NULL); VmaAllocation allocation = reinterpret_cast(userData); userData = allocation->GetUserData(); const char* name = allocation->GetName(); #if VMA_STATS_STRING_ENABLED VMA_LEAK_LOG_FORMAT("UNFREED ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p; Name: %s; Type: %s; Usage: %" PRIu64, offset, size, userData, name ? name : "vma_empty", VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()], (uint64_t)allocation->GetBufferImageUsage().Value); #else VMA_LEAK_LOG_FORMAT("UNFREED ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p; Name: %s; Type: %u", offset, size, userData, name ? name : "vma_empty", (unsigned)allocation->GetSuballocationType()); #endif // VMA_STATS_STRING_ENABLED } } #if VMA_STATS_STRING_ENABLED void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const { json.WriteString("TotalBytes"); json.WriteNumber(GetSize()); json.WriteString("UnusedBytes"); json.WriteNumber(unusedBytes); json.WriteString("Allocations"); json.WriteNumber((uint64_t)allocationCount); json.WriteString("UnusedRanges"); json.WriteNumber((uint64_t)unusedRangeCount); json.WriteString("Suballocations"); json.BeginArray(); } void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, VkDeviceSize offset, VkDeviceSize size, void* userData) const { json.BeginObject(true); json.WriteString("Offset"); json.WriteNumber(offset); if (IsVirtual()) { json.WriteString("Size"); json.WriteNumber(size); if (userData) { json.WriteString("CustomData"); json.BeginString(); json.ContinueString_Pointer(userData); json.EndString(); } } else { ((VmaAllocation)userData)->PrintParameters(json); } json.EndObject(); } void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, VkDeviceSize offset, VkDeviceSize size) const { json.BeginObject(true); json.WriteString("Offset"); json.WriteNumber(offset); json.WriteString("Type"); json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]); json.WriteString("Size"); json.WriteNumber(size); json.EndObject(); } void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const { json.EndArray(); } #endif // VMA_STATS_STRING_ENABLED #endif // _VMA_BLOCK_METADATA_FUNCTIONS #endif // _VMA_BLOCK_METADATA #ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY // Before deleting object of this class remember to call 'Destroy()' class VmaBlockBufferImageGranularity final { public: struct ValidationContext { const VkAllocationCallbacks* allocCallbacks; uint16_t* pageAllocs; }; VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity); ~VmaBlockBufferImageGranularity(); bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; } void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size); // Before destroying object you must call free it's memory void Destroy(const VkAllocationCallbacks* pAllocationCallbacks); void RoundupAllocRequest(VmaSuballocationType allocType, VkDeviceSize& inOutAllocSize, VkDeviceSize& inOutAllocAlignment) const; bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, VkDeviceSize allocSize, VkDeviceSize blockOffset, VkDeviceSize blockSize, VmaSuballocationType allocType) const; void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size); void FreePages(VkDeviceSize offset, VkDeviceSize size); void Clear(); ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const; bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const; bool FinishValidation(ValidationContext& ctx) const; private: static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256; struct RegionInfo { uint8_t allocType; uint16_t allocCount; }; VkDeviceSize m_BufferImageGranularity; uint32_t m_RegionCount; RegionInfo* m_RegionInfo; uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); } uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); } uint32_t OffsetToPageIndex(VkDeviceSize offset) const; void AllocPage(RegionInfo& page, uint8_t allocType); }; #ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity) : m_BufferImageGranularity(bufferImageGranularity), m_RegionCount(0), m_RegionInfo(VMA_NULL) {} VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity() { VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!"); } void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size) { if (IsEnabled()) { m_RegionCount = static_cast(VmaDivideRoundingUp(size, m_BufferImageGranularity)); m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount); memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); } } void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks) { if (m_RegionInfo) { vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount); m_RegionInfo = VMA_NULL; } } void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType, VkDeviceSize& inOutAllocSize, VkDeviceSize& inOutAllocAlignment) const { if (m_BufferImageGranularity > 1 && m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY) { if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) { inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity); inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity); } } } bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, VkDeviceSize allocSize, VkDeviceSize blockOffset, VkDeviceSize blockSize, VmaSuballocationType allocType) const { if (IsEnabled()) { uint32_t startPage = GetStartPage(inOutAllocOffset); if (m_RegionInfo[startPage].allocCount > 0 && VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[startPage].allocType), allocType)) { inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity); if (blockSize < allocSize + inOutAllocOffset - blockOffset) return true; ++startPage; } uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize); if (endPage != startPage && m_RegionInfo[endPage].allocCount > 0 && VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[endPage].allocType), allocType)) { return true; } } return false; } void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size) { if (IsEnabled()) { uint32_t startPage = GetStartPage(offset); AllocPage(m_RegionInfo[startPage], allocType); uint32_t endPage = GetEndPage(offset, size); if (startPage != endPage) AllocPage(m_RegionInfo[endPage], allocType); } } void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size) { if (IsEnabled()) { uint32_t startPage = GetStartPage(offset); --m_RegionInfo[startPage].allocCount; if (m_RegionInfo[startPage].allocCount == 0) m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; uint32_t endPage = GetEndPage(offset, size); if (startPage != endPage) { --m_RegionInfo[endPage].allocCount; if (m_RegionInfo[endPage].allocCount == 0) m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; } } } void VmaBlockBufferImageGranularity::Clear() { if (m_RegionInfo) memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); } VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation( const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const { ValidationContext ctx{ pAllocationCallbacks, VMA_NULL }; if (!isVirutal && IsEnabled()) { ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount); memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t)); } return ctx; } bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const { if (IsEnabled()) { uint32_t start = GetStartPage(offset); ++ctx.pageAllocs[start]; VMA_VALIDATE(m_RegionInfo[start].allocCount > 0); uint32_t end = GetEndPage(offset, size); if (start != end) { ++ctx.pageAllocs[end]; VMA_VALIDATE(m_RegionInfo[end].allocCount > 0); } } return true; } bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const { // Check proper page structure if (IsEnabled()) { VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!"); for (uint32_t page = 0; page < m_RegionCount; ++page) { VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount); } vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount); ctx.pageAllocs = VMA_NULL; } return true; } uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const { return static_cast(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity)); } void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType) { // When current alloc type is free then it can be overridden by new type if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE)) page.allocType = allocType; ++page.allocCount; } #endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS #endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY #ifndef _VMA_BLOCK_METADATA_LINEAR /* Allocations and their references in internal data structure look like this: if(m_2ndVectorMode == SECOND_VECTOR_EMPTY): 0 +-------+ | | | | | | +-------+ | Alloc | 1st[m_1stNullItemsBeginCount] +-------+ | Alloc | 1st[m_1stNullItemsBeginCount + 1] +-------+ | ... | +-------+ | Alloc | 1st[1st.size() - 1] +-------+ | | | | | | GetSize() +-------+ if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER): 0 +-------+ | Alloc | 2nd[0] +-------+ | Alloc | 2nd[1] +-------+ | ... | +-------+ | Alloc | 2nd[2nd.size() - 1] +-------+ | | | | | | +-------+ | Alloc | 1st[m_1stNullItemsBeginCount] +-------+ | Alloc | 1st[m_1stNullItemsBeginCount + 1] +-------+ | ... | +-------+ | Alloc | 1st[1st.size() - 1] +-------+ | | GetSize() +-------+ if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK): 0 +-------+ | | | | | | +-------+ | Alloc | 1st[m_1stNullItemsBeginCount] +-------+ | Alloc | 1st[m_1stNullItemsBeginCount + 1] +-------+ | ... | +-------+ | Alloc | 1st[1st.size() - 1] +-------+ | | | | | | +-------+ | Alloc | 2nd[2nd.size() - 1] +-------+ | ... | +-------+ | Alloc | 2nd[1] +-------+ | Alloc | 2nd[0] GetSize() +-------+ */ class VmaBlockMetadata_Linear : public VmaBlockMetadata { VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Linear) public: VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize bufferImageGranularity, bool isVirtual); virtual ~VmaBlockMetadata_Linear() = default; VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } bool IsEmpty() const override { return GetAllocationCount() == 0; } VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; } void Init(VkDeviceSize size) override; bool Validate() const override; size_t GetAllocationCount() const override; size_t GetFreeRegionsCount() const override; void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; void AddStatistics(VmaStatistics& inoutStats) const override; #if VMA_STATS_STRING_ENABLED void PrintDetailedMap(class VmaJsonWriter& json) const override; #endif bool CreateAllocationRequest( VkDeviceSize allocSize, VkDeviceSize allocAlignment, bool upperAddress, VmaSuballocationType allocType, uint32_t strategy, VmaAllocationRequest* pAllocationRequest) override; VkResult CheckCorruption(const void* pBlockData) override; void Alloc( const VmaAllocationRequest& request, VmaSuballocationType type, void* userData) override; void Free(VmaAllocHandle allocHandle) override; void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; VmaAllocHandle GetAllocationListBegin() const override; VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; void Clear() override; void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; void DebugLogAllAllocations() const override; private: /* There are two suballocation vectors, used in ping-pong way. The one with index m_1stVectorIndex is called 1st. The one with index (m_1stVectorIndex ^ 1) is called 2nd. 2nd can be non-empty only when 1st is not empty. When 2nd is not empty, m_2ndVectorMode indicates its mode of operation. */ typedef VmaVector> SuballocationVectorType; enum SECOND_VECTOR_MODE { SECOND_VECTOR_EMPTY, /* Suballocations in 2nd vector are created later than the ones in 1st, but they all have smaller offset. */ SECOND_VECTOR_RING_BUFFER, /* Suballocations in 2nd vector are upper side of double stack. They all have offsets higher than those in 1st vector. Top of this stack means smaller offsets, but higher indices in this vector. */ SECOND_VECTOR_DOUBLE_STACK, }; VkDeviceSize m_SumFreeSize; SuballocationVectorType m_Suballocations0, m_Suballocations1; uint32_t m_1stVectorIndex; SECOND_VECTOR_MODE m_2ndVectorMode; // Number of items in 1st vector with hAllocation = null at the beginning. size_t m_1stNullItemsBeginCount; // Number of other items in 1st vector with hAllocation = null somewhere in the middle. size_t m_1stNullItemsMiddleCount; // Number of items in 2nd vector with hAllocation = null. size_t m_2ndNullItemsCount; SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } VmaSuballocation& FindSuballocation(VkDeviceSize offset) const; bool ShouldCompact1st() const; void CleanupAfterFree(); bool CreateAllocationRequest_LowerAddress( VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, uint32_t strategy, VmaAllocationRequest* pAllocationRequest); bool CreateAllocationRequest_UpperAddress( VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, uint32_t strategy, VmaAllocationRequest* pAllocationRequest); }; #ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize bufferImageGranularity, bool isVirtual) : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), m_SumFreeSize(0), m_Suballocations0(VmaStlAllocator(pAllocationCallbacks)), m_Suballocations1(VmaStlAllocator(pAllocationCallbacks)), m_1stVectorIndex(0), m_2ndVectorMode(SECOND_VECTOR_EMPTY), m_1stNullItemsBeginCount(0), m_1stNullItemsMiddleCount(0), m_2ndNullItemsCount(0) {} void VmaBlockMetadata_Linear::Init(VkDeviceSize size) { VmaBlockMetadata::Init(size); m_SumFreeSize = size; } bool VmaBlockMetadata_Linear::Validate() const { const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY)); VMA_VALIDATE(!suballocations1st.empty() || suballocations2nd.empty() || m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); if (!suballocations1st.empty()) { // Null item at the beginning should be accounted into m_1stNullItemsBeginCount. VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE); // Null item at the end should be just pop_back(). VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE); } if (!suballocations2nd.empty()) { // Null item at the end should be just pop_back(). VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE); } VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size()); VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size()); VkDeviceSize sumUsedSize = 0; const size_t suballoc1stCount = suballocations1st.size(); const VkDeviceSize debugMargin = GetDebugMargin(); VkDeviceSize offset = 0; if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { const size_t suballoc2ndCount = suballocations2nd.size(); size_t nullItem2ndCount = 0; for (size_t i = 0; i < suballoc2ndCount; ++i) { const VmaSuballocation& suballoc = suballocations2nd[i]; const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); VmaAllocation const alloc = (VmaAllocation)suballoc.userData; if (!IsVirtual()) { VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); } VMA_VALIDATE(suballoc.offset >= offset); if (!currFree) { if (!IsVirtual()) { VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); VMA_VALIDATE(alloc->GetSize() == suballoc.size); } sumUsedSize += suballoc.size; } else { ++nullItem2ndCount; } offset = suballoc.offset + suballoc.size + debugMargin; } VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); } for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i) { const VmaSuballocation& suballoc = suballocations1st[i]; VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE && suballoc.userData == VMA_NULL); } size_t nullItem1stCount = m_1stNullItemsBeginCount; for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) { const VmaSuballocation& suballoc = suballocations1st[i]; const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); VmaAllocation const alloc = (VmaAllocation)suballoc.userData; if (!IsVirtual()) { VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); } VMA_VALIDATE(suballoc.offset >= offset); VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree); if (!currFree) { if (!IsVirtual()) { VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); VMA_VALIDATE(alloc->GetSize() == suballoc.size); } sumUsedSize += suballoc.size; } else { ++nullItem1stCount; } offset = suballoc.offset + suballoc.size + debugMargin; } VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount); if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { const size_t suballoc2ndCount = suballocations2nd.size(); size_t nullItem2ndCount = 0; for (size_t i = suballoc2ndCount; i--; ) { const VmaSuballocation& suballoc = suballocations2nd[i]; const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); VmaAllocation const alloc = (VmaAllocation)suballoc.userData; if (!IsVirtual()) { VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); } VMA_VALIDATE(suballoc.offset >= offset); if (!currFree) { if (!IsVirtual()) { VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); VMA_VALIDATE(alloc->GetSize() == suballoc.size); } sumUsedSize += suballoc.size; } else { ++nullItem2ndCount; } offset = suballoc.offset + suballoc.size + debugMargin; } VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); } VMA_VALIDATE(offset <= GetSize()); VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize); return true; } size_t VmaBlockMetadata_Linear::GetAllocationCount() const { return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount + AccessSuballocations2nd().size() - m_2ndNullItemsCount; } size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const { // Function only used for defragmentation, which is disabled for this algorithm VMA_ASSERT(0); return SIZE_MAX; } void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const { const VkDeviceSize size = GetSize(); const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); const size_t suballoc1stCount = suballocations1st.size(); const size_t suballoc2ndCount = suballocations2nd.size(); inoutStats.statistics.blockCount++; inoutStats.statistics.blockBytes += size; VkDeviceSize lastOffset = 0; if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; size_t nextAlloc2ndIndex = 0; while (lastOffset < freeSpace2ndTo1stEnd) { // Find next non-null allocation or move nextAllocIndex to the end. while (nextAlloc2ndIndex < suballoc2ndCount && suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { ++nextAlloc2ndIndex; } // Found non-null allocation. if (nextAlloc2ndIndex < suballoc2ndCount) { const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); } // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc2ndIndex; } // We are at the end. else { // There is free space from lastOffset to freeSpace2ndTo1stEnd. if (lastOffset < freeSpace2ndTo1stEnd) { const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); } // End of loop. lastOffset = freeSpace2ndTo1stEnd; } } } size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; const VkDeviceSize freeSpace1stTo2ndEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; while (lastOffset < freeSpace1stTo2ndEnd) { // Find next non-null allocation or move nextAllocIndex to the end. while (nextAlloc1stIndex < suballoc1stCount && suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) { ++nextAlloc1stIndex; } // Found non-null allocation. if (nextAlloc1stIndex < suballoc1stCount) { const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); } // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc1stIndex; } // We are at the end. else { // There is free space from lastOffset to freeSpace1stTo2ndEnd. if (lastOffset < freeSpace1stTo2ndEnd) { const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); } // End of loop. lastOffset = freeSpace1stTo2ndEnd; } } if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; while (lastOffset < size) { // Find next non-null allocation or move nextAllocIndex to the end. while (nextAlloc2ndIndex != SIZE_MAX && suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { --nextAlloc2ndIndex; } // Found non-null allocation. if (nextAlloc2ndIndex != SIZE_MAX) { const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); } // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; --nextAlloc2ndIndex; } // We are at the end. else { // There is free space from lastOffset to size. if (lastOffset < size) { const VkDeviceSize unusedRangeSize = size - lastOffset; VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); } // End of loop. lastOffset = size; } } } } void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const { const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); const VkDeviceSize size = GetSize(); const size_t suballoc1stCount = suballocations1st.size(); const size_t suballoc2ndCount = suballocations2nd.size(); inoutStats.blockCount++; inoutStats.blockBytes += size; inoutStats.allocationBytes += size - m_SumFreeSize; VkDeviceSize lastOffset = 0; if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount; while (lastOffset < freeSpace2ndTo1stEnd) { // Find next non-null allocation or move nextAlloc2ndIndex to the end. while (nextAlloc2ndIndex < suballoc2ndCount && suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { ++nextAlloc2ndIndex; } // Found non-null allocation. if (nextAlloc2ndIndex < suballoc2ndCount) { const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; // Process this allocation. // There is allocation with suballoc.offset, suballoc.size. ++inoutStats.allocationCount; // Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc2ndIndex; } // We are at the end. else { // End of loop. lastOffset = freeSpace2ndTo1stEnd; } } } size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; const VkDeviceSize freeSpace1stTo2ndEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; while (lastOffset < freeSpace1stTo2ndEnd) { // Find next non-null allocation or move nextAllocIndex to the end. while (nextAlloc1stIndex < suballoc1stCount && suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) { ++nextAlloc1stIndex; } // Found non-null allocation. if (nextAlloc1stIndex < suballoc1stCount) { const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; // Process this allocation. // There is allocation with suballoc.offset, suballoc.size. ++inoutStats.allocationCount; // Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc1stIndex; } // We are at the end. else { // End of loop. lastOffset = freeSpace1stTo2ndEnd; } } if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; while (lastOffset < size) { // Find next non-null allocation or move nextAlloc2ndIndex to the end. while (nextAlloc2ndIndex != SIZE_MAX && suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { --nextAlloc2ndIndex; } // Found non-null allocation. if (nextAlloc2ndIndex != SIZE_MAX) { const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; // Process this allocation. // There is allocation with suballoc.offset, suballoc.size. ++inoutStats.allocationCount; // Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; --nextAlloc2ndIndex; } // We are at the end. else { // End of loop. lastOffset = size; } } } } #if VMA_STATS_STRING_ENABLED void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const { const VkDeviceSize size = GetSize(); const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); const size_t suballoc1stCount = suballocations1st.size(); const size_t suballoc2ndCount = suballocations2nd.size(); // FIRST PASS size_t unusedRangeCount = 0; VkDeviceSize usedBytes = 0; VkDeviceSize lastOffset = 0; size_t alloc2ndCount = 0; if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; size_t nextAlloc2ndIndex = 0; while (lastOffset < freeSpace2ndTo1stEnd) { // Find next non-null allocation or move nextAlloc2ndIndex to the end. while (nextAlloc2ndIndex < suballoc2ndCount && suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { ++nextAlloc2ndIndex; } // Found non-null allocation. if (nextAlloc2ndIndex < suballoc2ndCount) { const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) { // There is free space from lastOffset to suballoc.offset. ++unusedRangeCount; } // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. ++alloc2ndCount; usedBytes += suballoc.size; // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc2ndIndex; } // We are at the end. else { if (lastOffset < freeSpace2ndTo1stEnd) { // There is free space from lastOffset to freeSpace2ndTo1stEnd. ++unusedRangeCount; } // End of loop. lastOffset = freeSpace2ndTo1stEnd; } } } size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; size_t alloc1stCount = 0; const VkDeviceSize freeSpace1stTo2ndEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; while (lastOffset < freeSpace1stTo2ndEnd) { // Find next non-null allocation or move nextAllocIndex to the end. while (nextAlloc1stIndex < suballoc1stCount && suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) { ++nextAlloc1stIndex; } // Found non-null allocation. if (nextAlloc1stIndex < suballoc1stCount) { const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) { // There is free space from lastOffset to suballoc.offset. ++unusedRangeCount; } // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. ++alloc1stCount; usedBytes += suballoc.size; // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc1stIndex; } // We are at the end. else { if (lastOffset < freeSpace1stTo2ndEnd) { // There is free space from lastOffset to freeSpace1stTo2ndEnd. ++unusedRangeCount; } // End of loop. lastOffset = freeSpace1stTo2ndEnd; } } if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; while (lastOffset < size) { // Find next non-null allocation or move nextAlloc2ndIndex to the end. while (nextAlloc2ndIndex != SIZE_MAX && suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { --nextAlloc2ndIndex; } // Found non-null allocation. if (nextAlloc2ndIndex != SIZE_MAX) { const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) { // There is free space from lastOffset to suballoc.offset. ++unusedRangeCount; } // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. ++alloc2ndCount; usedBytes += suballoc.size; // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; --nextAlloc2ndIndex; } // We are at the end. else { if (lastOffset < size) { // There is free space from lastOffset to size. ++unusedRangeCount; } // End of loop. lastOffset = size; } } } const VkDeviceSize unusedBytes = size - usedBytes; PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount); // SECOND PASS lastOffset = 0; if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; size_t nextAlloc2ndIndex = 0; while (lastOffset < freeSpace2ndTo1stEnd) { // Find next non-null allocation or move nextAlloc2ndIndex to the end. while (nextAlloc2ndIndex < suballoc2ndCount && suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { ++nextAlloc2ndIndex; } // Found non-null allocation. if (nextAlloc2ndIndex < suballoc2ndCount) { const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); } // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc2ndIndex; } // We are at the end. else { if (lastOffset < freeSpace2ndTo1stEnd) { // There is free space from lastOffset to freeSpace2ndTo1stEnd. const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); } // End of loop. lastOffset = freeSpace2ndTo1stEnd; } } } nextAlloc1stIndex = m_1stNullItemsBeginCount; while (lastOffset < freeSpace1stTo2ndEnd) { // Find next non-null allocation or move nextAllocIndex to the end. while (nextAlloc1stIndex < suballoc1stCount && suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) { ++nextAlloc1stIndex; } // Found non-null allocation. if (nextAlloc1stIndex < suballoc1stCount) { const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); } // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc1stIndex; } // We are at the end. else { if (lastOffset < freeSpace1stTo2ndEnd) { // There is free space from lastOffset to freeSpace1stTo2ndEnd. const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); } // End of loop. lastOffset = freeSpace1stTo2ndEnd; } } if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; while (lastOffset < size) { // Find next non-null allocation or move nextAlloc2ndIndex to the end. while (nextAlloc2ndIndex != SIZE_MAX && suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { --nextAlloc2ndIndex; } // Found non-null allocation. if (nextAlloc2ndIndex != SIZE_MAX) { const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); } // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; --nextAlloc2ndIndex; } // We are at the end. else { if (lastOffset < size) { // There is free space from lastOffset to size. const VkDeviceSize unusedRangeSize = size - lastOffset; PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); } // End of loop. lastOffset = size; } } } PrintDetailedMap_End(json); } #endif // VMA_STATS_STRING_ENABLED bool VmaBlockMetadata_Linear::CreateAllocationRequest( VkDeviceSize allocSize, VkDeviceSize allocAlignment, bool upperAddress, VmaSuballocationType allocType, uint32_t strategy, VmaAllocationRequest* pAllocationRequest) { VMA_ASSERT(allocSize > 0); VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); VMA_ASSERT(pAllocationRequest != VMA_NULL); VMA_HEAVY_ASSERT(Validate()); if(allocSize > GetSize()) return false; pAllocationRequest->size = allocSize; return upperAddress ? CreateAllocationRequest_UpperAddress( allocSize, allocAlignment, allocType, strategy, pAllocationRequest) : CreateAllocationRequest_LowerAddress( allocSize, allocAlignment, allocType, strategy, pAllocationRequest); } VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) { VMA_ASSERT(!IsVirtual()); SuballocationVectorType& suballocations1st = AccessSuballocations1st(); for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) { const VmaSuballocation& suballoc = suballocations1st[i]; if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) { if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); return VK_ERROR_UNKNOWN_COPY; } } } SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i) { const VmaSuballocation& suballoc = suballocations2nd[i]; if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) { if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); return VK_ERROR_UNKNOWN_COPY; } } } return VK_SUCCESS; } void VmaBlockMetadata_Linear::Alloc( const VmaAllocationRequest& request, VmaSuballocationType type, void* userData) { const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1; const VmaSuballocation newSuballoc = { offset, request.size, userData, type }; switch (request.type) { case VmaAllocationRequestType::UpperAddress: { VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER && "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); suballocations2nd.push_back(newSuballoc); m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK; } break; case VmaAllocationRequestType::EndOf1st: { SuballocationVectorType& suballocations1st = AccessSuballocations1st(); VMA_ASSERT(suballocations1st.empty() || offset >= suballocations1st.back().offset + suballocations1st.back().size); // Check if it fits before the end of the block. VMA_ASSERT(offset + request.size <= GetSize()); suballocations1st.push_back(newSuballoc); } break; case VmaAllocationRequestType::EndOf2nd: { SuballocationVectorType& suballocations1st = AccessSuballocations1st(); // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. VMA_ASSERT(!suballocations1st.empty() && offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset); SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); switch (m_2ndVectorMode) { case SECOND_VECTOR_EMPTY: // First allocation from second part ring buffer. VMA_ASSERT(suballocations2nd.empty()); m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER; break; case SECOND_VECTOR_RING_BUFFER: // 2-part ring buffer is already started. VMA_ASSERT(!suballocations2nd.empty()); break; case SECOND_VECTOR_DOUBLE_STACK: VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack."); break; default: VMA_ASSERT(0); } suballocations2nd.push_back(newSuballoc); } break; default: VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR."); } m_SumFreeSize -= newSuballoc.size; } void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) { SuballocationVectorType& suballocations1st = AccessSuballocations1st(); SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); VkDeviceSize offset = (VkDeviceSize)allocHandle - 1; if (!suballocations1st.empty()) { // First allocation: Mark it as next empty at the beginning. VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; if (firstSuballoc.offset == offset) { firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; firstSuballoc.userData = VMA_NULL; m_SumFreeSize += firstSuballoc.size; ++m_1stNullItemsBeginCount; CleanupAfterFree(); return; } } // Last allocation in 2-part ring buffer or top of upper stack (same logic). if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { VmaSuballocation& lastSuballoc = suballocations2nd.back(); if (lastSuballoc.offset == offset) { m_SumFreeSize += lastSuballoc.size; suballocations2nd.pop_back(); CleanupAfterFree(); return; } } // Last allocation in 1st vector. else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY) { VmaSuballocation& lastSuballoc = suballocations1st.back(); if (lastSuballoc.offset == offset) { m_SumFreeSize += lastSuballoc.size; suballocations1st.pop_back(); CleanupAfterFree(); return; } } VmaSuballocation refSuballoc; refSuballoc.offset = offset; // Rest of members stays uninitialized intentionally for better performance. // Item from the middle of 1st vector. { const SuballocationVectorType::iterator it = VmaBinaryFindSorted( suballocations1st.begin() + m_1stNullItemsBeginCount, suballocations1st.end(), refSuballoc, VmaSuballocationOffsetLess()); if (it != suballocations1st.end()) { it->type = VMA_SUBALLOCATION_TYPE_FREE; it->userData = VMA_NULL; ++m_1stNullItemsMiddleCount; m_SumFreeSize += it->size; CleanupAfterFree(); return; } } if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) { // Item from the middle of 2nd vector. const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); if (it != suballocations2nd.end()) { it->type = VMA_SUBALLOCATION_TYPE_FREE; it->userData = VMA_NULL; ++m_2ndNullItemsCount; m_SumFreeSize += it->size; CleanupAfterFree(); return; } } VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); } void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) { outInfo.offset = (VkDeviceSize)allocHandle - 1; VmaSuballocation& suballoc = FindSuballocation(outInfo.offset); outInfo.size = suballoc.size; outInfo.pUserData = suballoc.userData; } void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const { return FindSuballocation((VkDeviceSize)allocHandle - 1).userData; } VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const { // Function only used for defragmentation, which is disabled for this algorithm VMA_ASSERT(0); return VK_NULL_HANDLE; } VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const { // Function only used for defragmentation, which is disabled for this algorithm VMA_ASSERT(0); return VK_NULL_HANDLE; } VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const { // Function only used for defragmentation, which is disabled for this algorithm VMA_ASSERT(0); return 0; } void VmaBlockMetadata_Linear::Clear() { m_SumFreeSize = GetSize(); m_Suballocations0.clear(); m_Suballocations1.clear(); // Leaving m_1stVectorIndex unchanged - it doesn't matter. m_2ndVectorMode = SECOND_VECTOR_EMPTY; m_1stNullItemsBeginCount = 0; m_1stNullItemsMiddleCount = 0; m_2ndNullItemsCount = 0; } void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) { VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1); suballoc.userData = userData; } void VmaBlockMetadata_Linear::DebugLogAllAllocations() const { const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it) if (it->type != VMA_SUBALLOCATION_TYPE_FREE) DebugLogAllocation(it->offset, it->size, it->userData); const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it) if (it->type != VMA_SUBALLOCATION_TYPE_FREE) DebugLogAllocation(it->offset, it->size, it->userData); } VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const { const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); VmaSuballocation refSuballoc; refSuballoc.offset = offset; // Rest of members stays uninitialized intentionally for better performance. // Item from the 1st vector. { SuballocationVectorType::const_iterator it = VmaBinaryFindSorted( suballocations1st.begin() + m_1stNullItemsBeginCount, suballocations1st.end(), refSuballoc, VmaSuballocationOffsetLess()); if (it != suballocations1st.end()) { return const_cast(*it); } } if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) { // Rest of members stays uninitialized intentionally for better performance. SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); if (it != suballocations2nd.end()) { return const_cast(*it); } } VMA_ASSERT(0 && "Allocation not found in linear allocator!"); return const_cast(suballocations1st.back()); // Should never occur. } bool VmaBlockMetadata_Linear::ShouldCompact1st() const { const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; const size_t suballocCount = AccessSuballocations1st().size(); return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3; } void VmaBlockMetadata_Linear::CleanupAfterFree() { SuballocationVectorType& suballocations1st = AccessSuballocations1st(); SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); if (IsEmpty()) { suballocations1st.clear(); suballocations2nd.clear(); m_1stNullItemsBeginCount = 0; m_1stNullItemsMiddleCount = 0; m_2ndNullItemsCount = 0; m_2ndVectorMode = SECOND_VECTOR_EMPTY; } else { const size_t suballoc1stCount = suballocations1st.size(); const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; VMA_ASSERT(nullItem1stCount <= suballoc1stCount); // Find more null items at the beginning of 1st vector. while (m_1stNullItemsBeginCount < suballoc1stCount && suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) { ++m_1stNullItemsBeginCount; --m_1stNullItemsMiddleCount; } // Find more null items at the end of 1st vector. while (m_1stNullItemsMiddleCount > 0 && suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE) { --m_1stNullItemsMiddleCount; suballocations1st.pop_back(); } // Find more null items at the end of 2nd vector. while (m_2ndNullItemsCount > 0 && suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE) { --m_2ndNullItemsCount; suballocations2nd.pop_back(); } // Find more null items at the beginning of 2nd vector. while (m_2ndNullItemsCount > 0 && suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE) { --m_2ndNullItemsCount; VmaVectorRemove(suballocations2nd, 0); } if (ShouldCompact1st()) { const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount; size_t srcIndex = m_1stNullItemsBeginCount; for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) { while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE) { ++srcIndex; } if (dstIndex != srcIndex) { suballocations1st[dstIndex] = suballocations1st[srcIndex]; } ++srcIndex; } suballocations1st.resize(nonNullItemCount); m_1stNullItemsBeginCount = 0; m_1stNullItemsMiddleCount = 0; } // 2nd vector became empty. if (suballocations2nd.empty()) { m_2ndVectorMode = SECOND_VECTOR_EMPTY; } // 1st vector became empty. if (suballocations1st.size() - m_1stNullItemsBeginCount == 0) { suballocations1st.clear(); m_1stNullItemsBeginCount = 0; if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { // Swap 1st with 2nd. Now 2nd is empty. m_2ndVectorMode = SECOND_VECTOR_EMPTY; m_1stNullItemsMiddleCount = m_2ndNullItemsCount; while (m_1stNullItemsBeginCount < suballocations2nd.size() && suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) { ++m_1stNullItemsBeginCount; --m_1stNullItemsMiddleCount; } m_2ndNullItemsCount = 0; m_1stVectorIndex ^= 1; } } } VMA_HEAVY_ASSERT(Validate()); } bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, uint32_t strategy, VmaAllocationRequest* pAllocationRequest) { const VkDeviceSize blockSize = GetSize(); const VkDeviceSize debugMargin = GetDebugMargin(); const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); SuballocationVectorType& suballocations1st = AccessSuballocations1st(); SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { // Try to allocate at the end of 1st vector. VkDeviceSize resultBaseOffset = 0; if (!suballocations1st.empty()) { const VmaSuballocation& lastSuballoc = suballocations1st.back(); resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; } // Start from offset equal to beginning of free space. VkDeviceSize resultOffset = resultBaseOffset; // Apply alignment. resultOffset = VmaAlignUp(resultOffset, allocAlignment); // Check previous suballocations for BufferImageGranularity conflicts. // Make bigger alignment if necessary. if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty()) { bool bufferImageGranularityConflict = false; for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) { const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) { bufferImageGranularityConflict = true; break; } } else // Already on previous page. break; } if (bufferImageGranularityConflict) { resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); } } const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : blockSize; // There is enough free space at the end after alignment. if (resultOffset + allocSize + debugMargin <= freeSpaceEnd) { // Check next suballocations for BufferImageGranularity conflicts. // If conflict exists, allocation cannot be made here. if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) { const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) { return false; } } else { // Already on previous page. break; } } } // All tests passed: Success. pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); // pAllocationRequest->item, customData unused. pAllocationRequest->type = VmaAllocationRequestType::EndOf1st; return true; } } // Wrap-around to end of 2nd vector. Try to allocate there, watching for the // beginning of 1st vector as the end of free space. if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { VMA_ASSERT(!suballocations1st.empty()); VkDeviceSize resultBaseOffset = 0; if (!suballocations2nd.empty()) { const VmaSuballocation& lastSuballoc = suballocations2nd.back(); resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; } // Start from offset equal to beginning of free space. VkDeviceSize resultOffset = resultBaseOffset; // Apply alignment. resultOffset = VmaAlignUp(resultOffset, allocAlignment); // Check previous suballocations for BufferImageGranularity conflicts. // Make bigger alignment if necessary. if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) { bool bufferImageGranularityConflict = false; for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; ) { const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex]; if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) { bufferImageGranularityConflict = true; break; } } else // Already on previous page. break; } if (bufferImageGranularityConflict) { resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); } } size_t index1st = m_1stNullItemsBeginCount; // There is enough free space at the end after alignment. if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) || (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset)) { // Check next suballocations for BufferImageGranularity conflicts. // If conflict exists, allocation cannot be made here. if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) { for (size_t nextSuballocIndex = index1st; nextSuballocIndex < suballocations1st.size(); nextSuballocIndex++) { const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex]; if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) { return false; } } else { // Already on next page. break; } } } // All tests passed: Success. pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd; // pAllocationRequest->item, customData unused. return true; } } return false; } bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, uint32_t strategy, VmaAllocationRequest* pAllocationRequest) { const VkDeviceSize blockSize = GetSize(); const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); SuballocationVectorType& suballocations1st = AccessSuballocations1st(); SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer."); return false; } // Try to allocate before 2nd.back(), or end of block if 2nd.empty(). if (allocSize > blockSize) { return false; } VkDeviceSize resultBaseOffset = blockSize - allocSize; if (!suballocations2nd.empty()) { const VmaSuballocation& lastSuballoc = suballocations2nd.back(); resultBaseOffset = lastSuballoc.offset - allocSize; if (allocSize > lastSuballoc.offset) { return false; } } // Start from offset equal to end of free space. VkDeviceSize resultOffset = resultBaseOffset; const VkDeviceSize debugMargin = GetDebugMargin(); // Apply debugMargin at the end. if (debugMargin > 0) { if (resultOffset < debugMargin) { return false; } resultOffset -= debugMargin; } // Apply alignment. resultOffset = VmaAlignDown(resultOffset, allocAlignment); // Check next suballocations from 2nd for BufferImageGranularity conflicts. // Make bigger alignment if necessary. if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) { bool bufferImageGranularityConflict = false; for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) { const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) { bufferImageGranularityConflict = true; break; } } else // Already on previous page. break; } if (bufferImageGranularityConflict) { resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity); } } // There is enough free space. const VkDeviceSize endOf1st = !suballocations1st.empty() ? suballocations1st.back().offset + suballocations1st.back().size : 0; if (endOf1st + debugMargin <= resultOffset) { // Check previous suballocations for BufferImageGranularity conflicts. // If conflict exists, allocation cannot be made here. if (bufferImageGranularity > 1) { for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) { const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) { return false; } } else { // Already on next page. break; } } } // All tests passed: Success. pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); // pAllocationRequest->item unused. pAllocationRequest->type = VmaAllocationRequestType::UpperAddress; return true; } return false; } #endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS #endif // _VMA_BLOCK_METADATA_LINEAR #ifndef _VMA_BLOCK_METADATA_TLSF // To not search current larger region if first allocation won't succeed and skip to smaller range // use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest(). // When fragmentation and reusal of previous blocks doesn't matter then use with // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible. class VmaBlockMetadata_TLSF : public VmaBlockMetadata { VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_TLSF) public: VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize bufferImageGranularity, bool isVirtual); virtual ~VmaBlockMetadata_TLSF(); size_t GetAllocationCount() const override { return m_AllocCount; } size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; } VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; } bool IsEmpty() const override { return m_NullBlock->offset == 0; } VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; } void Init(VkDeviceSize size) override; bool Validate() const override; void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; void AddStatistics(VmaStatistics& inoutStats) const override; #if VMA_STATS_STRING_ENABLED void PrintDetailedMap(class VmaJsonWriter& json) const override; #endif bool CreateAllocationRequest( VkDeviceSize allocSize, VkDeviceSize allocAlignment, bool upperAddress, VmaSuballocationType allocType, uint32_t strategy, VmaAllocationRequest* pAllocationRequest) override; VkResult CheckCorruption(const void* pBlockData) override; void Alloc( const VmaAllocationRequest& request, VmaSuballocationType type, void* userData) override; void Free(VmaAllocHandle allocHandle) override; void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; VmaAllocHandle GetAllocationListBegin() const override; VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; void Clear() override; void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; void DebugLogAllAllocations() const override; private: // According to original paper it should be preferable 4 or 5: // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems" // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf static const uint8_t SECOND_LEVEL_INDEX = 5; static const uint16_t SMALL_BUFFER_SIZE = 256; static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16; static const uint8_t MEMORY_CLASS_SHIFT = 7; static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT; class Block { public: VkDeviceSize offset; VkDeviceSize size; Block* prevPhysical; Block* nextPhysical; void MarkFree() { prevFree = VMA_NULL; } void MarkTaken() { prevFree = this; } bool IsFree() const { return prevFree != this; } void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; } Block*& PrevFree() { return prevFree; } Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; } private: Block* prevFree; // Address of the same block here indicates that block is taken union { Block* nextFree; void* userData; }; }; size_t m_AllocCount; // Total number of free blocks besides null block size_t m_BlocksFreeCount; // Total size of free blocks excluding null block VkDeviceSize m_BlocksFreeSize; uint32_t m_IsFreeBitmap; uint8_t m_MemoryClasses; uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES]; uint32_t m_ListsCount; /* * 0: 0-3 lists for small buffers * 1+: 0-(2^SLI-1) lists for normal buffers */ Block** m_FreeList; VmaPoolAllocator m_BlockAllocator; Block* m_NullBlock; VmaBlockBufferImageGranularity m_GranularityHandler; uint8_t SizeToMemoryClass(VkDeviceSize size) const; uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const; uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const; uint32_t GetListIndex(VkDeviceSize size) const; void RemoveFreeBlock(Block* block); void InsertFreeBlock(Block* block); void MergeBlock(Block* block, Block* prev); Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const; bool CheckBlock( Block& block, uint32_t listIndex, VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, VmaAllocationRequest* pAllocationRequest); }; #ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize bufferImageGranularity, bool isVirtual) : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), m_AllocCount(0), m_BlocksFreeCount(0), m_BlocksFreeSize(0), m_IsFreeBitmap(0), m_MemoryClasses(0), m_ListsCount(0), m_FreeList(VMA_NULL), m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT), m_NullBlock(VMA_NULL), m_GranularityHandler(bufferImageGranularity) {} VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF() { if (m_FreeList) vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount); m_GranularityHandler.Destroy(GetAllocationCallbacks()); } void VmaBlockMetadata_TLSF::Init(VkDeviceSize size) { VmaBlockMetadata::Init(size); if (!IsVirtual()) m_GranularityHandler.Init(GetAllocationCallbacks(), size); m_NullBlock = m_BlockAllocator.Alloc(); m_NullBlock->size = size; m_NullBlock->offset = 0; m_NullBlock->prevPhysical = VMA_NULL; m_NullBlock->nextPhysical = VMA_NULL; m_NullBlock->MarkFree(); m_NullBlock->NextFree() = VMA_NULL; m_NullBlock->PrevFree() = VMA_NULL; uint8_t memoryClass = SizeToMemoryClass(size); uint16_t sli = SizeToSecondIndex(size, memoryClass); m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1; if (IsVirtual()) m_ListsCount += 1UL << SECOND_LEVEL_INDEX; else m_ListsCount += 4; m_MemoryClasses = memoryClass + uint8_t(2); memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t)); m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount); memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); } bool VmaBlockMetadata_TLSF::Validate() const { VMA_VALIDATE(GetSumFreeSize() <= GetSize()); VkDeviceSize calculatedSize = m_NullBlock->size; VkDeviceSize calculatedFreeSize = m_NullBlock->size; size_t allocCount = 0; size_t freeCount = 0; // Check integrity of free lists for (uint32_t list = 0; list < m_ListsCount; ++list) { Block* block = m_FreeList[list]; if (block != VMA_NULL) { VMA_VALIDATE(block->IsFree()); VMA_VALIDATE(block->PrevFree() == VMA_NULL); while (block->NextFree()) { VMA_VALIDATE(block->NextFree()->IsFree()); VMA_VALIDATE(block->NextFree()->PrevFree() == block); block = block->NextFree(); } } } VkDeviceSize nextOffset = m_NullBlock->offset; auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual()); VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL); if (m_NullBlock->prevPhysical) { VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock); } // Check all blocks for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical) { VMA_VALIDATE(prev->offset + prev->size == nextOffset); nextOffset = prev->offset; calculatedSize += prev->size; uint32_t listIndex = GetListIndex(prev->size); if (prev->IsFree()) { ++freeCount; // Check if free block belongs to free list Block* freeBlock = m_FreeList[listIndex]; VMA_VALIDATE(freeBlock != VMA_NULL); bool found = false; do { if (freeBlock == prev) found = true; freeBlock = freeBlock->NextFree(); } while (!found && freeBlock != VMA_NULL); VMA_VALIDATE(found); calculatedFreeSize += prev->size; } else { ++allocCount; // Check if taken block is not on a free list Block* freeBlock = m_FreeList[listIndex]; while (freeBlock) { VMA_VALIDATE(freeBlock != prev); freeBlock = freeBlock->NextFree(); } if (!IsVirtual()) { VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size)); } } if (prev->prevPhysical) { VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev); } } if (!IsVirtual()) { VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx)); } VMA_VALIDATE(nextOffset == 0); VMA_VALIDATE(calculatedSize == GetSize()); VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize()); VMA_VALIDATE(allocCount == m_AllocCount); VMA_VALIDATE(freeCount == m_BlocksFreeCount); return true; } void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const { inoutStats.statistics.blockCount++; inoutStats.statistics.blockBytes += GetSize(); if (m_NullBlock->size > 0) VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size); for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) { if (block->IsFree()) VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size); else VmaAddDetailedStatisticsAllocation(inoutStats, block->size); } } void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const { inoutStats.blockCount++; inoutStats.allocationCount += (uint32_t)m_AllocCount; inoutStats.blockBytes += GetSize(); inoutStats.allocationBytes += GetSize() - GetSumFreeSize(); } #if VMA_STATS_STRING_ENABLED void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const { size_t blockCount = m_AllocCount + m_BlocksFreeCount; VmaStlAllocator allocator(GetAllocationCallbacks()); VmaVector> blockList(blockCount, allocator); size_t i = blockCount; for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) { blockList[--i] = block; } VMA_ASSERT(i == 0); VmaDetailedStatistics stats; VmaClearDetailedStatistics(stats); AddDetailedStatistics(stats); PrintDetailedMap_Begin(json, stats.statistics.blockBytes - stats.statistics.allocationBytes, stats.statistics.allocationCount, stats.unusedRangeCount); for (; i < blockCount; ++i) { Block* block = blockList[i]; if (block->IsFree()) PrintDetailedMap_UnusedRange(json, block->offset, block->size); else PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData()); } if (m_NullBlock->size > 0) PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size); PrintDetailedMap_End(json); } #endif bool VmaBlockMetadata_TLSF::CreateAllocationRequest( VkDeviceSize allocSize, VkDeviceSize allocAlignment, bool upperAddress, VmaSuballocationType allocType, uint32_t strategy, VmaAllocationRequest* pAllocationRequest) { VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!"); VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); // For small granularity round up if (!IsVirtual()) m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment); allocSize += GetDebugMargin(); // Quick check for too small pool if (allocSize > GetSumFreeSize()) return false; // If no free blocks in pool then check only null block if (m_BlocksFreeCount == 0) return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest); // Round up to the next block VkDeviceSize sizeForNextList = allocSize; VkDeviceSize smallSizeStep = VkDeviceSize(SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4)); if (allocSize > SMALL_BUFFER_SIZE) { sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX)); } else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep) sizeForNextList = SMALL_BUFFER_SIZE + 1; else sizeForNextList += smallSizeStep; uint32_t nextListIndex = m_ListsCount; uint32_t prevListIndex = m_ListsCount; Block* nextListBlock = VMA_NULL; Block* prevListBlock = VMA_NULL; // Check blocks according to strategies if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) { // Quick check for larger block first nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) return true; // If not fitted then null block if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) return true; // Null block failed, search larger bucket while (nextListBlock) { if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) return true; nextListBlock = nextListBlock->NextFree(); } // Failed again, check best fit bucket prevListBlock = FindFreeBlock(allocSize, prevListIndex); while (prevListBlock) { if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) return true; prevListBlock = prevListBlock->NextFree(); } } else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT) { // Check best fit bucket prevListBlock = FindFreeBlock(allocSize, prevListIndex); while (prevListBlock) { if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) return true; prevListBlock = prevListBlock->NextFree(); } // If failed check null block if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) return true; // Check larger bucket nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); while (nextListBlock) { if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) return true; nextListBlock = nextListBlock->NextFree(); } } else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ) { // Perform search from the start VmaStlAllocator allocator(GetAllocationCallbacks()); VmaVector> blockList(m_BlocksFreeCount, allocator); size_t i = m_BlocksFreeCount; for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) { if (block->IsFree() && block->size >= allocSize) blockList[--i] = block; } for (; i < m_BlocksFreeCount; ++i) { Block& block = *blockList[i]; if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest)) return true; } // If failed check null block if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) return true; // Whole range searched, no more memory return false; } else { // Check larger bucket nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); while (nextListBlock) { if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) return true; nextListBlock = nextListBlock->NextFree(); } // If failed check null block if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) return true; // Check best fit bucket prevListBlock = FindFreeBlock(allocSize, prevListIndex); while (prevListBlock) { if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) return true; prevListBlock = prevListBlock->NextFree(); } } // Worst case, full search has to be done while (++nextListIndex < m_ListsCount) { nextListBlock = m_FreeList[nextListIndex]; while (nextListBlock) { if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) return true; nextListBlock = nextListBlock->NextFree(); } } // No more memory sadly return false; } VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData) { for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) { if (!block->IsFree()) { if (!VmaValidateMagicValue(pBlockData, block->offset + block->size)) { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); return VK_ERROR_UNKNOWN_COPY; } } } return VK_SUCCESS; } void VmaBlockMetadata_TLSF::Alloc( const VmaAllocationRequest& request, VmaSuballocationType type, void* userData) { VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF); // Get block and pop it from the free list Block* currentBlock = (Block*)request.allocHandle; VkDeviceSize offset = request.algorithmData; VMA_ASSERT(currentBlock != VMA_NULL); VMA_ASSERT(currentBlock->offset <= offset); if (currentBlock != m_NullBlock) RemoveFreeBlock(currentBlock); VkDeviceSize debugMargin = GetDebugMargin(); VkDeviceSize missingAlignment = offset - currentBlock->offset; // Append missing alignment to prev block or create new one if (missingAlignment) { Block* prevBlock = currentBlock->prevPhysical; VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!"); if (prevBlock->IsFree() && prevBlock->size != debugMargin) { uint32_t oldList = GetListIndex(prevBlock->size); prevBlock->size += missingAlignment; // Check if new size crosses list bucket if (oldList != GetListIndex(prevBlock->size)) { prevBlock->size -= missingAlignment; RemoveFreeBlock(prevBlock); prevBlock->size += missingAlignment; InsertFreeBlock(prevBlock); } else m_BlocksFreeSize += missingAlignment; } else { Block* newBlock = m_BlockAllocator.Alloc(); currentBlock->prevPhysical = newBlock; prevBlock->nextPhysical = newBlock; newBlock->prevPhysical = prevBlock; newBlock->nextPhysical = currentBlock; newBlock->size = missingAlignment; newBlock->offset = currentBlock->offset; newBlock->MarkTaken(); InsertFreeBlock(newBlock); } currentBlock->size -= missingAlignment; currentBlock->offset += missingAlignment; } VkDeviceSize size = request.size + debugMargin; if (currentBlock->size == size) { if (currentBlock == m_NullBlock) { // Setup new null block m_NullBlock = m_BlockAllocator.Alloc(); m_NullBlock->size = 0; m_NullBlock->offset = currentBlock->offset + size; m_NullBlock->prevPhysical = currentBlock; m_NullBlock->nextPhysical = VMA_NULL; m_NullBlock->MarkFree(); m_NullBlock->PrevFree() = VMA_NULL; m_NullBlock->NextFree() = VMA_NULL; currentBlock->nextPhysical = m_NullBlock; currentBlock->MarkTaken(); } } else { VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!"); // Create new free block Block* newBlock = m_BlockAllocator.Alloc(); newBlock->size = currentBlock->size - size; newBlock->offset = currentBlock->offset + size; newBlock->prevPhysical = currentBlock; newBlock->nextPhysical = currentBlock->nextPhysical; currentBlock->nextPhysical = newBlock; currentBlock->size = size; if (currentBlock == m_NullBlock) { m_NullBlock = newBlock; m_NullBlock->MarkFree(); m_NullBlock->NextFree() = VMA_NULL; m_NullBlock->PrevFree() = VMA_NULL; currentBlock->MarkTaken(); } else { newBlock->nextPhysical->prevPhysical = newBlock; newBlock->MarkTaken(); InsertFreeBlock(newBlock); } } currentBlock->UserData() = userData; if (debugMargin > 0) { currentBlock->size -= debugMargin; Block* newBlock = m_BlockAllocator.Alloc(); newBlock->size = debugMargin; newBlock->offset = currentBlock->offset + currentBlock->size; newBlock->prevPhysical = currentBlock; newBlock->nextPhysical = currentBlock->nextPhysical; newBlock->MarkTaken(); currentBlock->nextPhysical->prevPhysical = newBlock; currentBlock->nextPhysical = newBlock; InsertFreeBlock(newBlock); } if (!IsVirtual()) m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData, currentBlock->offset, currentBlock->size); ++m_AllocCount; } void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle) { Block* block = (Block*)allocHandle; Block* next = block->nextPhysical; VMA_ASSERT(!block->IsFree() && "Block is already free!"); if (!IsVirtual()) m_GranularityHandler.FreePages(block->offset, block->size); --m_AllocCount; VkDeviceSize debugMargin = GetDebugMargin(); if (debugMargin > 0) { RemoveFreeBlock(next); MergeBlock(next, block); block = next; next = next->nextPhysical; } // Try merging Block* prev = block->prevPhysical; if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin) { RemoveFreeBlock(prev); MergeBlock(block, prev); } if (!next->IsFree()) InsertFreeBlock(block); else if (next == m_NullBlock) MergeBlock(m_NullBlock, block); else { RemoveFreeBlock(next); MergeBlock(next, block); InsertFreeBlock(next); } } void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) { Block* block = (Block*)allocHandle; VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!"); outInfo.offset = block->offset; outInfo.size = block->size; outInfo.pUserData = block->UserData(); } void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const { Block* block = (Block*)allocHandle; VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!"); return block->UserData(); } VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const { if (m_AllocCount == 0) return VK_NULL_HANDLE; for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical) { if (!block->IsFree()) return (VmaAllocHandle)block; } VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!"); return VK_NULL_HANDLE; } VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const { Block* startBlock = (Block*)prevAlloc; VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!"); for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical) { if (!block->IsFree()) return (VmaAllocHandle)block; } return VK_NULL_HANDLE; } VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const { Block* block = (Block*)alloc; VMA_ASSERT(!block->IsFree() && "Incorrect block!"); if (block->prevPhysical) return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0; return 0; } void VmaBlockMetadata_TLSF::Clear() { m_AllocCount = 0; m_BlocksFreeCount = 0; m_BlocksFreeSize = 0; m_IsFreeBitmap = 0; m_NullBlock->offset = 0; m_NullBlock->size = GetSize(); Block* block = m_NullBlock->prevPhysical; m_NullBlock->prevPhysical = VMA_NULL; while (block) { Block* prev = block->prevPhysical; m_BlockAllocator.Free(block); block = prev; } memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t)); m_GranularityHandler.Clear(); } void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) { Block* block = (Block*)allocHandle; VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!"); block->UserData() = userData; } void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const { for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) if (!block->IsFree()) DebugLogAllocation(block->offset, block->size, block->UserData()); } uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const { if (size > SMALL_BUFFER_SIZE) return uint8_t(VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT); return 0; } uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const { if (memoryClass == 0) { if (IsVirtual()) return static_cast((size - 1) / 8); else return static_cast((size - 1) / 64); } return static_cast((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX)); } uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const { if (memoryClass == 0) return secondIndex; const uint32_t index = static_cast(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex; if (IsVirtual()) return index + (1 << SECOND_LEVEL_INDEX); else return index + 4; } uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const { uint8_t memoryClass = SizeToMemoryClass(size); return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass)); } void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block) { VMA_ASSERT(block != m_NullBlock); VMA_ASSERT(block->IsFree()); if (block->NextFree() != VMA_NULL) block->NextFree()->PrevFree() = block->PrevFree(); if (block->PrevFree() != VMA_NULL) block->PrevFree()->NextFree() = block->NextFree(); else { uint8_t memClass = SizeToMemoryClass(block->size); uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); uint32_t index = GetListIndex(memClass, secondIndex); VMA_ASSERT(m_FreeList[index] == block); m_FreeList[index] = block->NextFree(); if (block->NextFree() == VMA_NULL) { m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex); if (m_InnerIsFreeBitmap[memClass] == 0) m_IsFreeBitmap &= ~(1UL << memClass); } } block->MarkTaken(); block->UserData() = VMA_NULL; --m_BlocksFreeCount; m_BlocksFreeSize -= block->size; } void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block) { VMA_ASSERT(block != m_NullBlock); VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!"); uint8_t memClass = SizeToMemoryClass(block->size); uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); uint32_t index = GetListIndex(memClass, secondIndex); VMA_ASSERT(index < m_ListsCount); block->PrevFree() = VMA_NULL; block->NextFree() = m_FreeList[index]; m_FreeList[index] = block; if (block->NextFree() != VMA_NULL) block->NextFree()->PrevFree() = block; else { m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex; m_IsFreeBitmap |= 1UL << memClass; } ++m_BlocksFreeCount; m_BlocksFreeSize += block->size; } void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev) { VMA_ASSERT(block->prevPhysical == prev && "Cannot merge separate physical regions!"); VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!"); block->offset = prev->offset; block->size += prev->size; block->prevPhysical = prev->prevPhysical; if (block->prevPhysical) block->prevPhysical->nextPhysical = block; m_BlockAllocator.Free(prev); } VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const { uint8_t memoryClass = SizeToMemoryClass(size); uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass)); if (!innerFreeMap) { // Check higher levels for available blocks uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1)); if (!freeMap) return VMA_NULL; // No more memory available // Find lowest free region memoryClass = VMA_BITSCAN_LSB(freeMap); innerFreeMap = m_InnerIsFreeBitmap[memoryClass]; VMA_ASSERT(innerFreeMap != 0); } // Find lowest free subregion listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap)); VMA_ASSERT(m_FreeList[listIndex]); return m_FreeList[listIndex]; } bool VmaBlockMetadata_TLSF::CheckBlock( Block& block, uint32_t listIndex, VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, VmaAllocationRequest* pAllocationRequest) { VMA_ASSERT(block.IsFree() && "Block is already taken!"); VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment); if (block.size < allocSize + alignedOffset - block.offset) return false; // Check for granularity conflicts if (!IsVirtual() && m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType)) return false; // Alloc successful pAllocationRequest->type = VmaAllocationRequestType::TLSF; pAllocationRequest->allocHandle = (VmaAllocHandle)█ pAllocationRequest->size = allocSize - GetDebugMargin(); pAllocationRequest->customData = (void*)allocType; pAllocationRequest->algorithmData = alignedOffset; // Place block at the start of list if it's normal block if (listIndex != m_ListsCount && block.PrevFree()) { block.PrevFree()->NextFree() = block.NextFree(); if (block.NextFree()) block.NextFree()->PrevFree() = block.PrevFree(); block.PrevFree() = VMA_NULL; block.NextFree() = m_FreeList[listIndex]; m_FreeList[listIndex] = █ if (block.NextFree()) block.NextFree()->PrevFree() = █ } return true; } #endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS #endif // _VMA_BLOCK_METADATA_TLSF #ifndef _VMA_BLOCK_VECTOR /* Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific Vulkan memory type. Synchronized internally with a mutex. */ class VmaBlockVector { friend struct VmaDefragmentationContext_T; VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockVector) public: VmaBlockVector( VmaAllocator hAllocator, VmaPool hParentPool, uint32_t memoryTypeIndex, VkDeviceSize preferredBlockSize, size_t minBlockCount, size_t maxBlockCount, VkDeviceSize bufferImageGranularity, bool explicitBlockSize, uint32_t algorithm, float priority, VkDeviceSize minAllocationAlignment, void* pMemoryAllocateNext); ~VmaBlockVector(); VmaAllocator GetAllocator() const { return m_hAllocator; } VmaPool GetParentPool() const { return m_hParentPool; } bool IsCustomPool() const { return m_hParentPool != VMA_NULL; } uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; } VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } uint32_t GetAlgorithm() const { return m_Algorithm; } bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; } float GetPriority() const { return m_Priority; } const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; } // To be used only while the m_Mutex is locked. Used during defragmentation. size_t GetBlockCount() const { return m_Blocks.size(); } // To be used only while the m_Mutex is locked. Used during defragmentation. VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } VMA_RW_MUTEX &GetMutex() { return m_Mutex; } VkResult CreateMinBlocks(); void AddStatistics(VmaStatistics& inoutStats); void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); bool IsEmpty(); bool IsCorruptionDetectionEnabled() const; VkResult Allocate( VkDeviceSize size, VkDeviceSize alignment, const VmaAllocationCreateInfo& createInfo, VmaSuballocationType suballocType, size_t allocationCount, VmaAllocation* pAllocations); void Free(const VmaAllocation hAllocation); #if VMA_STATS_STRING_ENABLED void PrintDetailedMap(class VmaJsonWriter& json); #endif VkResult CheckCorruption(); private: const VmaAllocator m_hAllocator; const VmaPool m_hParentPool; const uint32_t m_MemoryTypeIndex; const VkDeviceSize m_PreferredBlockSize; const size_t m_MinBlockCount; const size_t m_MaxBlockCount; const VkDeviceSize m_BufferImageGranularity; const bool m_ExplicitBlockSize; const uint32_t m_Algorithm; const float m_Priority; const VkDeviceSize m_MinAllocationAlignment; void* const m_pMemoryAllocateNext; VMA_RW_MUTEX m_Mutex; // Incrementally sorted by sumFreeSize, ascending. VmaVector> m_Blocks; uint32_t m_NextBlockId; bool m_IncrementalSort = true; void SetIncrementalSort(bool val) { m_IncrementalSort = val; } VkDeviceSize CalcMaxBlockSize() const; // Finds and removes given block from vector. void Remove(VmaDeviceMemoryBlock* pBlock); // Performs single step in sorting m_Blocks. They may not be fully sorted // after this call. void IncrementallySortBlocks(); void SortByFreeSize(); VkResult AllocatePage( VkDeviceSize size, VkDeviceSize alignment, const VmaAllocationCreateInfo& createInfo, VmaSuballocationType suballocType, VmaAllocation* pAllocation); VkResult AllocateFromBlock( VmaDeviceMemoryBlock* pBlock, VkDeviceSize size, VkDeviceSize alignment, VmaAllocationCreateFlags allocFlags, void* pUserData, VmaSuballocationType suballocType, uint32_t strategy, VmaAllocation* pAllocation); VkResult CommitAllocationRequest( VmaAllocationRequest& allocRequest, VmaDeviceMemoryBlock* pBlock, VkDeviceSize alignment, VmaAllocationCreateFlags allocFlags, void* pUserData, VmaSuballocationType suballocType, VmaAllocation* pAllocation); VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); bool HasEmptyBlock(); }; #endif // _VMA_BLOCK_VECTOR #ifndef _VMA_DEFRAGMENTATION_CONTEXT struct VmaDefragmentationContext_T { VMA_CLASS_NO_COPY_NO_MOVE(VmaDefragmentationContext_T) public: VmaDefragmentationContext_T( VmaAllocator hAllocator, const VmaDefragmentationInfo& info); ~VmaDefragmentationContext_T(); void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; } VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo); VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo); private: // Max number of allocations to ignore due to size constraints before ending single pass static const uint8_t MAX_ALLOCS_TO_IGNORE = 16; enum class CounterStatus { Pass, Ignore, End }; struct FragmentedBlock { uint32_t data; VmaDeviceMemoryBlock* block; }; struct StateBalanced { VkDeviceSize avgFreeSize = 0; VkDeviceSize avgAllocSize = UINT64_MAX; }; struct StateExtensive { enum class Operation : uint8_t { FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll, MoveBuffers, MoveTextures, MoveAll, Cleanup, Done }; Operation operation = Operation::FindFreeBlockTexture; size_t firstFreeBlock = SIZE_MAX; }; struct MoveAllocationData { VkDeviceSize size; VkDeviceSize alignment; VmaSuballocationType type; VmaAllocationCreateFlags flags; VmaDefragmentationMove move = {}; }; const VkDeviceSize m_MaxPassBytes; const uint32_t m_MaxPassAllocations; const PFN_vmaCheckDefragmentationBreakFunction m_BreakCallback; void* m_BreakCallbackUserData; VmaStlAllocator m_MoveAllocator; VmaVector> m_Moves; uint8_t m_IgnoredAllocs = 0; uint32_t m_Algorithm; uint32_t m_BlockVectorCount; VmaBlockVector* m_PoolBlockVector; VmaBlockVector** m_pBlockVectors; size_t m_ImmovableBlockCount = 0; VmaDefragmentationStats m_GlobalStats = { 0 }; VmaDefragmentationStats m_PassStats = { 0 }; void* m_AlgorithmState = VMA_NULL; static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata); CounterStatus CheckCounters(VkDeviceSize bytes); bool IncrementCounters(VkDeviceSize bytes); bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block); bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector); bool ComputeDefragmentation(VmaBlockVector& vector, size_t index); bool ComputeDefragmentation_Fast(VmaBlockVector& vector); bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update); bool ComputeDefragmentation_Full(VmaBlockVector& vector); bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index); void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state); bool MoveDataToFreeBlocks(VmaSuballocationType currentType, VmaBlockVector& vector, size_t firstFreeBlock, bool& texturePresent, bool& bufferPresent, bool& otherPresent); }; #endif // _VMA_DEFRAGMENTATION_CONTEXT #ifndef _VMA_POOL_T struct VmaPool_T { friend struct VmaPoolListItemTraits; VMA_CLASS_NO_COPY_NO_MOVE(VmaPool_T) public: VmaBlockVector m_BlockVector; VmaDedicatedAllocationList m_DedicatedAllocations; VmaPool_T( VmaAllocator hAllocator, const VmaPoolCreateInfo& createInfo, VkDeviceSize preferredBlockSize); ~VmaPool_T(); uint32_t GetId() const { return m_Id; } void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; } const char* GetName() const { return m_Name; } void SetName(const char* pName); #if VMA_STATS_STRING_ENABLED //void PrintDetailedMap(class VmaStringBuilder& sb); #endif private: uint32_t m_Id; char* m_Name; VmaPool_T* m_PrevPool = VMA_NULL; VmaPool_T* m_NextPool = VMA_NULL; }; struct VmaPoolListItemTraits { typedef VmaPool_T ItemType; static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; } static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; } static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; } static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; } }; #endif // _VMA_POOL_T #ifndef _VMA_CURRENT_BUDGET_DATA struct VmaCurrentBudgetData { VMA_CLASS_NO_COPY_NO_MOVE(VmaCurrentBudgetData) public: VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS]; VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS]; VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS]; #if VMA_MEMORY_BUDGET VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch; VMA_RW_MUTEX m_BudgetMutex; uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS]; uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS]; uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS]; #endif // VMA_MEMORY_BUDGET VmaCurrentBudgetData(); void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); }; #ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS VmaCurrentBudgetData::VmaCurrentBudgetData() { for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex) { m_BlockCount[heapIndex] = 0; m_AllocationCount[heapIndex] = 0; m_BlockBytes[heapIndex] = 0; m_AllocationBytes[heapIndex] = 0; #if VMA_MEMORY_BUDGET m_VulkanUsage[heapIndex] = 0; m_VulkanBudget[heapIndex] = 0; m_BlockBytesAtBudgetFetch[heapIndex] = 0; #endif } #if VMA_MEMORY_BUDGET m_OperationsSinceBudgetFetch = 0; #endif } void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) { m_AllocationBytes[heapIndex] += allocationSize; ++m_AllocationCount[heapIndex]; #if VMA_MEMORY_BUDGET ++m_OperationsSinceBudgetFetch; #endif } void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) { VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); m_AllocationBytes[heapIndex] -= allocationSize; VMA_ASSERT(m_AllocationCount[heapIndex] > 0); --m_AllocationCount[heapIndex]; #if VMA_MEMORY_BUDGET ++m_OperationsSinceBudgetFetch; #endif } #endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS #endif // _VMA_CURRENT_BUDGET_DATA #ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR /* Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects. */ class VmaAllocationObjectAllocator { VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocationObjectAllocator) public: VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) : m_Allocator(pAllocationCallbacks, 1024) {} template VmaAllocation Allocate(Types&&... args); void Free(VmaAllocation hAlloc); private: VMA_MUTEX m_Mutex; VmaPoolAllocator m_Allocator; }; template VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args) { VmaMutexLock mutexLock(m_Mutex); return m_Allocator.Alloc(std::forward(args)...); } void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) { VmaMutexLock mutexLock(m_Mutex); m_Allocator.Free(hAlloc); } #endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR #ifndef _VMA_VIRTUAL_BLOCK_T struct VmaVirtualBlock_T { VMA_CLASS_NO_COPY_NO_MOVE(VmaVirtualBlock_T) public: const bool m_AllocationCallbacksSpecified; const VkAllocationCallbacks m_AllocationCallbacks; VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo); ~VmaVirtualBlock_T(); VkResult Init() { return VK_SUCCESS; } bool IsEmpty() const { return m_Metadata->IsEmpty(); } void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); } void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); } void Clear() { m_Metadata->Clear(); } const VkAllocationCallbacks* GetAllocationCallbacks() const; void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo); VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, VkDeviceSize* outOffset); void GetStatistics(VmaStatistics& outStats) const; void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const; #if VMA_STATS_STRING_ENABLED void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const; #endif private: VmaBlockMetadata* m_Metadata; }; #ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo) : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL), m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks) { const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK; switch (algorithm) { case 0: m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); break; case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT: m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true); break; default: VMA_ASSERT(0); m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); } m_Metadata->Init(createInfo.size); } VmaVirtualBlock_T::~VmaVirtualBlock_T() { // Define macro VMA_DEBUG_LOG_FORMAT or more specialized VMA_LEAK_LOG_FORMAT // to receive the list of the unfreed allocations. if (!m_Metadata->IsEmpty()) m_Metadata->DebugLogAllAllocations(); // This is the most important assert in the entire library. // Hitting it means you have some memory leak - unreleased virtual allocations. VMA_ASSERT_LEAK(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!"); vma_delete(GetAllocationCallbacks(), m_Metadata); } const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const { return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; } void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo) { m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo); } VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, VkDeviceSize* outOffset) { VmaAllocationRequest request = {}; if (m_Metadata->CreateAllocationRequest( createInfo.size, // allocSize VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy &request)) { m_Metadata->Alloc(request, VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant createInfo.pUserData); outAllocation = (VmaVirtualAllocation)request.allocHandle; if(outOffset) *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle); return VK_SUCCESS; } outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE; if (outOffset) *outOffset = UINT64_MAX; return VK_ERROR_OUT_OF_DEVICE_MEMORY; } void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const { VmaClearStatistics(outStats); m_Metadata->AddStatistics(outStats); } void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const { VmaClearDetailedStatistics(outStats); m_Metadata->AddDetailedStatistics(outStats); } #if VMA_STATS_STRING_ENABLED void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const { VmaJsonWriter json(GetAllocationCallbacks(), sb); json.BeginObject(); VmaDetailedStatistics stats; CalculateDetailedStatistics(stats); json.WriteString("Stats"); VmaPrintDetailedStatistics(json, stats); if (detailedMap) { json.WriteString("Details"); json.BeginObject(); m_Metadata->PrintDetailedMap(json); json.EndObject(); } json.EndObject(); } #endif // VMA_STATS_STRING_ENABLED #endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS #endif // _VMA_VIRTUAL_BLOCK_T // Main allocator object. struct VmaAllocator_T { VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocator_T) public: const bool m_UseMutex; const uint32_t m_VulkanApiVersion; bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). bool m_UseExtMemoryBudget; bool m_UseAmdDeviceCoherentMemory; bool m_UseKhrBufferDeviceAddress; bool m_UseExtMemoryPriority; bool m_UseKhrMaintenance4; bool m_UseKhrMaintenance5; bool m_UseKhrExternalMemoryWin32; const VkDevice m_hDevice; const VkInstance m_hInstance; const bool m_AllocationCallbacksSpecified; const VkAllocationCallbacks m_AllocationCallbacks; VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks; VmaAllocationObjectAllocator m_AllocationObjectAllocator; // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size. uint32_t m_HeapSizeLimitMask; VkPhysicalDeviceProperties m_PhysicalDeviceProperties; VkPhysicalDeviceMemoryProperties m_MemProps; // Default pools. VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES]; VmaCurrentBudgetData m_Budget; VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects. VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo); ~VmaAllocator_T(); const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; } const VmaVulkanFunctions& GetVulkanFunctions() const { return m_VulkanFunctions; } VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; } VkDeviceSize GetBufferImageGranularity() const { return VMA_MAX( static_cast(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY), m_PhysicalDeviceProperties.limits.bufferImageGranularity); } uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; } uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; } uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const { VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount); return m_MemProps.memoryTypes[memTypeIndex].heapIndex; } // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT. bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const { return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) == VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; } // Minimum alignment for all allocations in specific memory type. VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const { return IsMemoryTypeNonCoherent(memTypeIndex) ? VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : (VkDeviceSize)VMA_MIN_ALIGNMENT; } bool IsIntegratedGpu() const { return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; } uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; } void GetBufferMemoryRequirements( VkBuffer hBuffer, VkMemoryRequirements& memReq, bool& requiresDedicatedAllocation, bool& prefersDedicatedAllocation) const; void GetImageMemoryRequirements( VkImage hImage, VkMemoryRequirements& memReq, bool& requiresDedicatedAllocation, bool& prefersDedicatedAllocation) const; VkResult FindMemoryTypeIndex( uint32_t memoryTypeBits, const VmaAllocationCreateInfo* pAllocationCreateInfo, VmaBufferImageUsage bufImgUsage, uint32_t* pMemoryTypeIndex) const; // Main allocation function. VkResult AllocateMemory( const VkMemoryRequirements& vkMemReq, bool requiresDedicatedAllocation, bool prefersDedicatedAllocation, VkBuffer dedicatedBuffer, VkImage dedicatedImage, VmaBufferImageUsage dedicatedBufferImageUsage, const VmaAllocationCreateInfo& createInfo, VmaSuballocationType suballocType, size_t allocationCount, VmaAllocation* pAllocations); // Main deallocation function. void FreeMemory( size_t allocationCount, const VmaAllocation* pAllocations); void CalculateStatistics(VmaTotalStatistics* pStats); void GetHeapBudgets( VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount); #if VMA_STATS_STRING_ENABLED void PrintDetailedMap(class VmaJsonWriter& json); #endif void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); void GetAllocationInfo2(VmaAllocation hAllocation, VmaAllocationInfo2* pAllocationInfo); VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); void DestroyPool(VmaPool pool); void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats); void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats); void SetCurrentFrameIndex(uint32_t frameIndex); uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } VkResult CheckPoolCorruption(VmaPool hPool); VkResult CheckCorruption(uint32_t memoryTypeBits); // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping. VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory); // Call to Vulkan function vkFreeMemory with accompanying bookkeeping. void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory); // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR. VkResult BindVulkanBuffer( VkDeviceMemory memory, VkDeviceSize memoryOffset, VkBuffer buffer, const void* pNext); // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR. VkResult BindVulkanImage( VkDeviceMemory memory, VkDeviceSize memoryOffset, VkImage image, const void* pNext); VkResult Map(VmaAllocation hAllocation, void** ppData); void Unmap(VmaAllocation hAllocation); VkResult BindBufferMemory( VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkBuffer hBuffer, const void* pNext); VkResult BindImageMemory( VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkImage hImage, const void* pNext); VkResult FlushOrInvalidateAllocation( VmaAllocation hAllocation, VkDeviceSize offset, VkDeviceSize size, VMA_CACHE_OPERATION op); VkResult FlushOrInvalidateAllocations( uint32_t allocationCount, const VmaAllocation* allocations, const VkDeviceSize* offsets, const VkDeviceSize* sizes, VMA_CACHE_OPERATION op); VkResult CopyMemoryToAllocation( const void* pSrcHostPointer, VmaAllocation dstAllocation, VkDeviceSize dstAllocationLocalOffset, VkDeviceSize size); VkResult CopyAllocationToMemory( VmaAllocation srcAllocation, VkDeviceSize srcAllocationLocalOffset, void* pDstHostPointer, VkDeviceSize size); void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern); /* Returns bit mask of memory types that can support defragmentation on GPU as they support creation of required buffer for copy operations. */ uint32_t GetGpuDefragmentationMemoryTypeBits(); #if VMA_EXTERNAL_MEMORY VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const { return m_TypeExternalMemoryHandleTypes[memTypeIndex]; } #endif // #if VMA_EXTERNAL_MEMORY private: VkDeviceSize m_PreferredLargeHeapBlockSize; VkPhysicalDevice m_PhysicalDevice; VMA_ATOMIC_UINT32 m_CurrentFrameIndex; VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized. #if VMA_EXTERNAL_MEMORY VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES]; #endif // #if VMA_EXTERNAL_MEMORY VMA_RW_MUTEX m_PoolsMutex; typedef VmaIntrusiveLinkedList PoolList; // Protected by m_PoolsMutex. PoolList m_Pools; uint32_t m_NextPoolId; VmaVulkanFunctions m_VulkanFunctions; // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types. uint32_t m_GlobalMemoryTypeBits; void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions); #if VMA_STATIC_VULKAN_FUNCTIONS == 1 void ImportVulkanFunctions_Static(); #endif void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions); #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 void ImportVulkanFunctions_Dynamic(); #endif void ValidateVulkanFunctions(); VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); VkResult AllocateMemoryOfType( VmaPool pool, VkDeviceSize size, VkDeviceSize alignment, bool dedicatedPreferred, VkBuffer dedicatedBuffer, VkImage dedicatedImage, VmaBufferImageUsage dedicatedBufferImageUsage, const VmaAllocationCreateInfo& createInfo, uint32_t memTypeIndex, VmaSuballocationType suballocType, VmaDedicatedAllocationList& dedicatedAllocations, VmaBlockVector& blockVector, size_t allocationCount, VmaAllocation* pAllocations); // Helper function only to be used inside AllocateDedicatedMemory. VkResult AllocateDedicatedMemoryPage( VmaPool pool, VkDeviceSize size, VmaSuballocationType suballocType, uint32_t memTypeIndex, const VkMemoryAllocateInfo& allocInfo, bool map, bool isUserDataString, bool isMappingAllowed, void* pUserData, VmaAllocation* pAllocation); // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. VkResult AllocateDedicatedMemory( VmaPool pool, VkDeviceSize size, VmaSuballocationType suballocType, VmaDedicatedAllocationList& dedicatedAllocations, uint32_t memTypeIndex, bool map, bool isUserDataString, bool isMappingAllowed, bool canAliasMemory, void* pUserData, float priority, VkBuffer dedicatedBuffer, VkImage dedicatedImage, VmaBufferImageUsage dedicatedBufferImageUsage, size_t allocationCount, VmaAllocation* pAllocations, const void* pNextChain = VMA_NULL); void FreeDedicatedMemory(const VmaAllocation allocation); VkResult CalcMemTypeParams( VmaAllocationCreateInfo& outCreateInfo, uint32_t memTypeIndex, VkDeviceSize size, size_t allocationCount); VkResult CalcAllocationParams( VmaAllocationCreateInfo& outCreateInfo, bool dedicatedRequired, bool dedicatedPreferred); /* Calculates and returns bit mask of memory types that can support defragmentation on GPU as they support creation of required buffer for copy operations. */ uint32_t CalculateGpuDefragmentationMemoryTypeBits() const; uint32_t CalculateGlobalMemoryTypeBits() const; bool GetFlushOrInvalidateRange( VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size, VkMappedMemoryRange& outRange) const; #if VMA_MEMORY_BUDGET void UpdateVulkanBudget(); #endif // #if VMA_MEMORY_BUDGET }; #ifndef _VMA_MEMORY_FUNCTIONS static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) { return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment); } static void VmaFree(VmaAllocator hAllocator, void* ptr) { VmaFree(&hAllocator->m_AllocationCallbacks, ptr); } template static T* VmaAllocate(VmaAllocator hAllocator) { return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); } template static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count) { return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); } template static void vma_delete(VmaAllocator hAllocator, T* ptr) { if(ptr != VMA_NULL) { ptr->~T(); VmaFree(hAllocator, ptr); } } template static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) { if(ptr != VMA_NULL) { for(size_t i = count; i--; ) ptr[i].~T(); VmaFree(hAllocator, ptr); } } #endif // _VMA_MEMORY_FUNCTIONS #ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) : m_pMetadata(VMA_NULL), m_MemoryTypeIndex(UINT32_MAX), m_Id(0), m_hMemory(VK_NULL_HANDLE), m_MapCount(0), m_pMappedData(VMA_NULL){} VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock() { VMA_ASSERT_LEAK(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped."); VMA_ASSERT_LEAK(m_hMemory == VK_NULL_HANDLE); } void VmaDeviceMemoryBlock::Init( VmaAllocator hAllocator, VmaPool hParentPool, uint32_t newMemoryTypeIndex, VkDeviceMemory newMemory, VkDeviceSize newSize, uint32_t id, uint32_t algorithm, VkDeviceSize bufferImageGranularity) { VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); m_hParentPool = hParentPool; m_MemoryTypeIndex = newMemoryTypeIndex; m_Id = id; m_hMemory = newMemory; switch (algorithm) { case 0: m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), bufferImageGranularity, false); // isVirtual break; case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(), bufferImageGranularity, false); // isVirtual break; default: VMA_ASSERT(0); m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), bufferImageGranularity, false); // isVirtual } m_pMetadata->Init(newSize); } void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) { // Define macro VMA_DEBUG_LOG_FORMAT or more specialized VMA_LEAK_LOG_FORMAT // to receive the list of the unfreed allocations. if (!m_pMetadata->IsEmpty()) m_pMetadata->DebugLogAllAllocations(); // This is the most important assert in the entire library. // Hitting it means you have some memory leak - unreleased VmaAllocation objects. VMA_ASSERT_LEAK(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); VMA_ASSERT_LEAK(m_hMemory != VK_NULL_HANDLE); allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory); m_hMemory = VK_NULL_HANDLE; vma_delete(allocator, m_pMetadata); m_pMetadata = VMA_NULL; } void VmaDeviceMemoryBlock::PostAlloc(VmaAllocator hAllocator) { VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); m_MappingHysteresis.PostAlloc(); } void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator) { VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); if(m_MappingHysteresis.PostFree()) { VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0); if (m_MapCount == 0) { m_pMappedData = VMA_NULL; (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); } } } bool VmaDeviceMemoryBlock::Validate() const { VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && (m_pMetadata->GetSize() != 0)); return m_pMetadata->Validate(); } VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) { void* pData = VMA_NULL; VkResult res = Map(hAllocator, 1, &pData); if (res != VK_SUCCESS) { return res; } res = m_pMetadata->CheckCorruption(pData); Unmap(hAllocator, 1); return res; } VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData) { if (count == 0) { return VK_SUCCESS; } VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); if (oldTotalMapCount != 0) { VMA_ASSERT(m_pMappedData != VMA_NULL); m_MappingHysteresis.PostMap(); m_MapCount += count; if (ppData != VMA_NULL) { *ppData = m_pMappedData; } return VK_SUCCESS; } else { VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( hAllocator->m_hDevice, m_hMemory, 0, // offset VK_WHOLE_SIZE, 0, // flags &m_pMappedData); if (result == VK_SUCCESS) { VMA_ASSERT(m_pMappedData != VMA_NULL); m_MappingHysteresis.PostMap(); m_MapCount = count; if (ppData != VMA_NULL) { *ppData = m_pMappedData; } } return result; } } void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) { if (count == 0) { return; } VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); if (m_MapCount >= count) { m_MapCount -= count; const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); if (totalMapCount == 0) { m_pMappedData = VMA_NULL; (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); } m_MappingHysteresis.PostUnmap(); } else { VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped."); } } VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) { VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); void* pData; VkResult res = Map(hAllocator, 1, &pData); if (res != VK_SUCCESS) { return res; } VmaWriteMagicValue(pData, allocOffset + allocSize); Unmap(hAllocator, 1); return VK_SUCCESS; } VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) { VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); void* pData; VkResult res = Map(hAllocator, 1, &pData); if (res != VK_SUCCESS) { return res; } if (!VmaValidateMagicValue(pData, allocOffset + allocSize)) { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!"); } Unmap(hAllocator, 1); return VK_SUCCESS; } VkResult VmaDeviceMemoryBlock::BindBufferMemory( const VmaAllocator hAllocator, const VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkBuffer hBuffer, const void* pNext) { VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && hAllocation->GetBlock() == this); VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext); } VkResult VmaDeviceMemoryBlock::BindImageMemory( const VmaAllocator hAllocator, const VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkImage hImage, const void* pNext) { VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && hAllocation->GetBlock() == this); VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext); } #if VMA_EXTERNAL_MEMORY_WIN32 VkResult VmaDeviceMemoryBlock::CreateWin32Handle(const VmaAllocator hAllocator, PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, HANDLE hTargetProcess, HANDLE* pHandle) noexcept { VMA_ASSERT(pHandle); return m_Handle.GetHandle(hAllocator->m_hDevice, m_hMemory, pvkGetMemoryWin32HandleKHR, hTargetProcess, hAllocator->m_UseMutex, pHandle); } #endif // VMA_EXTERNAL_MEMORY_WIN32 #endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS #ifndef _VMA_ALLOCATION_T_FUNCTIONS VmaAllocation_T::VmaAllocation_T(bool mappingAllowed) : m_Alignment{ 1 }, m_Size{ 0 }, m_pUserData{ VMA_NULL }, m_pName{ VMA_NULL }, m_MemoryTypeIndex{ 0 }, m_Type{ (uint8_t)ALLOCATION_TYPE_NONE }, m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN }, m_MapCount{ 0 }, m_Flags{ 0 } { if(mappingAllowed) m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED; } VmaAllocation_T::~VmaAllocation_T() { VMA_ASSERT_LEAK(m_MapCount == 0 && "Allocation was not unmapped before destruction."); // Check if owned string was freed. VMA_ASSERT(m_pName == VMA_NULL); } void VmaAllocation_T::InitBlockAllocation( VmaDeviceMemoryBlock* block, VmaAllocHandle allocHandle, VkDeviceSize alignment, VkDeviceSize size, uint32_t memoryTypeIndex, VmaSuballocationType suballocationType, bool mapped) { VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); VMA_ASSERT(block != VMA_NULL); m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; m_Alignment = alignment; m_Size = size; m_MemoryTypeIndex = memoryTypeIndex; if(mapped) { VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; } m_SuballocationType = (uint8_t)suballocationType; m_BlockAllocation.m_Block = block; m_BlockAllocation.m_AllocHandle = allocHandle; } void VmaAllocation_T::InitDedicatedAllocation( VmaAllocator allocator, VmaPool hParentPool, uint32_t memoryTypeIndex, VkDeviceMemory hMemory, VmaSuballocationType suballocationType, void* pMappedData, VkDeviceSize size) { VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); VMA_ASSERT(hMemory != VK_NULL_HANDLE); m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED; m_Alignment = 0; m_Size = size; m_MemoryTypeIndex = memoryTypeIndex; m_SuballocationType = (uint8_t)suballocationType; m_DedicatedAllocation.m_ExtraData = VMA_NULL; m_DedicatedAllocation.m_hParentPool = hParentPool; m_DedicatedAllocation.m_hMemory = hMemory; m_DedicatedAllocation.m_Prev = VMA_NULL; m_DedicatedAllocation.m_Next = VMA_NULL; if (pMappedData != VMA_NULL) { VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; EnsureExtraData(allocator); m_DedicatedAllocation.m_ExtraData->m_pMappedData = pMappedData; } } void VmaAllocation_T::Destroy(VmaAllocator allocator) { FreeName(allocator); if (GetType() == ALLOCATION_TYPE_DEDICATED) { vma_delete(allocator, m_DedicatedAllocation.m_ExtraData); } } void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName) { VMA_ASSERT(pName == VMA_NULL || pName != m_pName); FreeName(hAllocator); if (pName != VMA_NULL) m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName); } uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation) { VMA_ASSERT(allocation != VMA_NULL); VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK); if (m_MapCount != 0) m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount); m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation); std::swap(m_BlockAllocation, allocation->m_BlockAllocation); m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this); #if VMA_STATS_STRING_ENABLED std::swap(m_BufferImageUsage, allocation->m_BufferImageUsage); #endif return m_MapCount; } VmaAllocHandle VmaAllocation_T::GetAllocHandle() const { switch (m_Type) { case ALLOCATION_TYPE_BLOCK: return m_BlockAllocation.m_AllocHandle; case ALLOCATION_TYPE_DEDICATED: return VK_NULL_HANDLE; default: VMA_ASSERT(0); return VK_NULL_HANDLE; } } VkDeviceSize VmaAllocation_T::GetOffset() const { switch (m_Type) { case ALLOCATION_TYPE_BLOCK: return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle); case ALLOCATION_TYPE_DEDICATED: return 0; default: VMA_ASSERT(0); return 0; } } VmaPool VmaAllocation_T::GetParentPool() const { switch (m_Type) { case ALLOCATION_TYPE_BLOCK: return m_BlockAllocation.m_Block->GetParentPool(); case ALLOCATION_TYPE_DEDICATED: return m_DedicatedAllocation.m_hParentPool; default: VMA_ASSERT(0); return VK_NULL_HANDLE; } } VkDeviceMemory VmaAllocation_T::GetMemory() const { switch (m_Type) { case ALLOCATION_TYPE_BLOCK: return m_BlockAllocation.m_Block->GetDeviceMemory(); case ALLOCATION_TYPE_DEDICATED: return m_DedicatedAllocation.m_hMemory; default: VMA_ASSERT(0); return VK_NULL_HANDLE; } } void* VmaAllocation_T::GetMappedData() const { switch (m_Type) { case ALLOCATION_TYPE_BLOCK: if (m_MapCount != 0 || IsPersistentMap()) { void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); VMA_ASSERT(pBlockData != VMA_NULL); return (char*)pBlockData + GetOffset(); } else { return VMA_NULL; } break; case ALLOCATION_TYPE_DEDICATED: VMA_ASSERT((m_DedicatedAllocation.m_ExtraData != VMA_NULL && m_DedicatedAllocation.m_ExtraData->m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap())); return m_DedicatedAllocation.m_ExtraData != VMA_NULL ? m_DedicatedAllocation.m_ExtraData->m_pMappedData : VMA_NULL; default: VMA_ASSERT(0); return VMA_NULL; } } void VmaAllocation_T::BlockAllocMap() { VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); if (m_MapCount < 0xFF) { ++m_MapCount; } else { VMA_ASSERT(0 && "Allocation mapped too many times simultaneously."); } } void VmaAllocation_T::BlockAllocUnmap() { VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); if (m_MapCount > 0) { --m_MapCount; } else { VMA_ASSERT(0 && "Unmapping allocation not previously mapped."); } } VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) { VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); EnsureExtraData(hAllocator); if (m_MapCount != 0 || IsPersistentMap()) { if (m_MapCount < 0xFF) { VMA_ASSERT(m_DedicatedAllocation.m_ExtraData->m_pMappedData != VMA_NULL); *ppData = m_DedicatedAllocation.m_ExtraData->m_pMappedData; ++m_MapCount; return VK_SUCCESS; } else { VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously."); return VK_ERROR_MEMORY_MAP_FAILED; } } else { VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory, 0, // offset VK_WHOLE_SIZE, 0, // flags ppData); if (result == VK_SUCCESS) { m_DedicatedAllocation.m_ExtraData->m_pMappedData = *ppData; m_MapCount = 1; } return result; } } void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) { VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); if (m_MapCount > 0) { --m_MapCount; if (m_MapCount == 0 && !IsPersistentMap()) { VMA_ASSERT(m_DedicatedAllocation.m_ExtraData != VMA_NULL); m_DedicatedAllocation.m_ExtraData->m_pMappedData = VMA_NULL; (*hAllocator->GetVulkanFunctions().vkUnmapMemory)( hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory); } } else { VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped."); } } #if VMA_STATS_STRING_ENABLED void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const { json.WriteString("Type"); json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]); json.WriteString("Size"); json.WriteNumber(m_Size); json.WriteString("Usage"); json.WriteNumber(m_BufferImageUsage.Value); // It may be uint32_t or uint64_t. if (m_pUserData != VMA_NULL) { json.WriteString("CustomData"); json.BeginString(); json.ContinueString_Pointer(m_pUserData); json.EndString(); } if (m_pName != VMA_NULL) { json.WriteString("Name"); json.WriteString(m_pName); } } #if VMA_EXTERNAL_MEMORY_WIN32 VkResult VmaAllocation_T::GetWin32Handle(VmaAllocator hAllocator, HANDLE hTargetProcess, HANDLE* pHandle) noexcept { auto pvkGetMemoryWin32HandleKHR = hAllocator->GetVulkanFunctions().vkGetMemoryWin32HandleKHR; switch (m_Type) { case ALLOCATION_TYPE_BLOCK: return m_BlockAllocation.m_Block->CreateWin32Handle(hAllocator, pvkGetMemoryWin32HandleKHR, hTargetProcess, pHandle); case ALLOCATION_TYPE_DEDICATED: EnsureExtraData(hAllocator); return m_DedicatedAllocation.m_ExtraData->m_Handle.GetHandle(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory, pvkGetMemoryWin32HandleKHR, hTargetProcess, hAllocator->m_UseMutex, pHandle); default: VMA_ASSERT(0); return VK_ERROR_FEATURE_NOT_PRESENT; } } #endif // VMA_EXTERNAL_MEMORY_WIN32 #endif // VMA_STATS_STRING_ENABLED void VmaAllocation_T::EnsureExtraData(VmaAllocator hAllocator) { if (m_DedicatedAllocation.m_ExtraData == VMA_NULL) { m_DedicatedAllocation.m_ExtraData = vma_new(hAllocator, VmaAllocationExtraData)(); } } void VmaAllocation_T::FreeName(VmaAllocator hAllocator) { if(m_pName) { VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName); m_pName = VMA_NULL; } } #endif // _VMA_ALLOCATION_T_FUNCTIONS #ifndef _VMA_BLOCK_VECTOR_FUNCTIONS VmaBlockVector::VmaBlockVector( VmaAllocator hAllocator, VmaPool hParentPool, uint32_t memoryTypeIndex, VkDeviceSize preferredBlockSize, size_t minBlockCount, size_t maxBlockCount, VkDeviceSize bufferImageGranularity, bool explicitBlockSize, uint32_t algorithm, float priority, VkDeviceSize minAllocationAlignment, void* pMemoryAllocateNext) : m_hAllocator(hAllocator), m_hParentPool(hParentPool), m_MemoryTypeIndex(memoryTypeIndex), m_PreferredBlockSize(preferredBlockSize), m_MinBlockCount(minBlockCount), m_MaxBlockCount(maxBlockCount), m_BufferImageGranularity(bufferImageGranularity), m_ExplicitBlockSize(explicitBlockSize), m_Algorithm(algorithm), m_Priority(priority), m_MinAllocationAlignment(minAllocationAlignment), m_pMemoryAllocateNext(pMemoryAllocateNext), m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), m_NextBlockId(0) {} VmaBlockVector::~VmaBlockVector() { for (size_t i = m_Blocks.size(); i--; ) { m_Blocks[i]->Destroy(m_hAllocator); vma_delete(m_hAllocator, m_Blocks[i]); } } VkResult VmaBlockVector::CreateMinBlocks() { for (size_t i = 0; i < m_MinBlockCount; ++i) { VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL); if (res != VK_SUCCESS) { return res; } } return VK_SUCCESS; } void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats) { VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); const size_t blockCount = m_Blocks.size(); for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) { const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; VMA_ASSERT(pBlock); VMA_HEAVY_ASSERT(pBlock->Validate()); pBlock->m_pMetadata->AddStatistics(inoutStats); } } void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) { VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); const size_t blockCount = m_Blocks.size(); for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) { const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; VMA_ASSERT(pBlock); VMA_HEAVY_ASSERT(pBlock->Validate()); pBlock->m_pMetadata->AddDetailedStatistics(inoutStats); } } bool VmaBlockVector::IsEmpty() { VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); return m_Blocks.empty(); } bool VmaBlockVector::IsCorruptionDetectionEnabled() const { const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; return (VMA_DEBUG_DETECT_CORRUPTION != 0) && (VMA_DEBUG_MARGIN > 0) && (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; } VkResult VmaBlockVector::Allocate( VkDeviceSize size, VkDeviceSize alignment, const VmaAllocationCreateInfo& createInfo, VmaSuballocationType suballocType, size_t allocationCount, VmaAllocation* pAllocations) { size_t allocIndex; VkResult res = VK_SUCCESS; alignment = VMA_MAX(alignment, m_MinAllocationAlignment); if (IsCorruptionDetectionEnabled()) { size = VmaAlignUp(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); alignment = VmaAlignUp(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); } { VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) { res = AllocatePage( size, alignment, createInfo, suballocType, pAllocations + allocIndex); if (res != VK_SUCCESS) { break; } } } if (res != VK_SUCCESS) { // Free all already created allocations. while (allocIndex--) Free(pAllocations[allocIndex]); memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); } return res; } VkResult VmaBlockVector::AllocatePage( VkDeviceSize size, VkDeviceSize alignment, const VmaAllocationCreateInfo& createInfo, VmaSuballocationType suballocType, VmaAllocation* pAllocation) { const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; VkDeviceSize freeMemory; { const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); VmaBudget heapBudget = {}; m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0; } const bool canFallbackToDedicated = !HasExplicitBlockSize() && (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0; const bool canCreateNewBlock = ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && (m_Blocks.size() < m_MaxBlockCount) && (freeMemory >= size || !canFallbackToDedicated); uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK; // Upper address can only be used with linear allocator and within single memory block. if (isUpperAddress && (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) { return VK_ERROR_FEATURE_NOT_PRESENT; } // Early reject: requested allocation size is larger that maximum block size for this block vector. if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize) { return VK_ERROR_OUT_OF_DEVICE_MEMORY; } // 1. Search existing allocations. Try to allocate. if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) { // Use only last block. if (!m_Blocks.empty()) { VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); VMA_ASSERT(pCurrBlock); VkResult res = AllocateFromBlock( pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); if (res == VK_SUCCESS) { VMA_DEBUG_LOG_FORMAT(" Returned from last block #%" PRIu32, pCurrBlock->GetId()); IncrementallySortBlocks(); return VK_SUCCESS; } } } else { if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default { const bool isHostVisible = (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; if(isHostVisible) { const bool isMappingAllowed = (createInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; /* For non-mappable allocations, check blocks that are not mapped first. For mappable allocations, check blocks that are already mapped first. This way, having many blocks, we will separate mappable and non-mappable allocations, hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc. */ for(size_t mappingI = 0; mappingI < 2; ++mappingI) { // Forward order in m_Blocks - prefer blocks with smallest amount of free space. for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; VMA_ASSERT(pCurrBlock); const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL; if((mappingI == 0) == (isMappingAllowed == isBlockMapped)) { VkResult res = AllocateFromBlock( pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); if (res == VK_SUCCESS) { VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId()); IncrementallySortBlocks(); return VK_SUCCESS; } } } } } else { // Forward order in m_Blocks - prefer blocks with smallest amount of free space. for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; VMA_ASSERT(pCurrBlock); VkResult res = AllocateFromBlock( pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); if (res == VK_SUCCESS) { VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId()); IncrementallySortBlocks(); return VK_SUCCESS; } } } } else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT { // Backward order in m_Blocks - prefer blocks with largest amount of free space. for (size_t blockIndex = m_Blocks.size(); blockIndex--; ) { VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; VMA_ASSERT(pCurrBlock); VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); if (res == VK_SUCCESS) { VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId()); IncrementallySortBlocks(); return VK_SUCCESS; } } } } // 2. Try to create new block. if (canCreateNewBlock) { // Calculate optimal size for new block. VkDeviceSize newBlockSize = m_PreferredBlockSize; uint32_t newBlockSizeShift = 0; const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3; if (!m_ExplicitBlockSize) { // Allocate 1/8, 1/4, 1/2 as first blocks. const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize(); for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) { const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) { newBlockSize = smallerNewBlockSize; ++newBlockSizeShift; } else { break; } } } size_t newBlockIndex = 0; VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize. if (!m_ExplicitBlockSize) { while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) { const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; if (smallerNewBlockSize >= size) { newBlockSize = smallerNewBlockSize; ++newBlockSizeShift; res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; } else { break; } } } if (res == VK_SUCCESS) { VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex]; VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); res = AllocateFromBlock( pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); if (res == VK_SUCCESS) { VMA_DEBUG_LOG_FORMAT(" Created new block #%" PRIu32 " Size=%" PRIu64, pBlock->GetId(), newBlockSize); IncrementallySortBlocks(); return VK_SUCCESS; } else { // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment. return VK_ERROR_OUT_OF_DEVICE_MEMORY; } } } return VK_ERROR_OUT_OF_DEVICE_MEMORY; } void VmaBlockVector::Free(const VmaAllocation hAllocation) { VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; bool budgetExceeded = false; { const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); VmaBudget heapBudget = {}; m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); budgetExceeded = heapBudget.usage >= heapBudget.budget; } // Scope for lock. { VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); if (IsCorruptionDetectionEnabled()) { VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize()); VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value."); } if (hAllocation->IsPersistentMap()) { pBlock->Unmap(m_hAllocator, 1); } const bool hadEmptyBlockBeforeFree = HasEmptyBlock(); pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle()); pBlock->PostFree(m_hAllocator); VMA_HEAVY_ASSERT(pBlock->Validate()); VMA_DEBUG_LOG_FORMAT(" Freed from MemoryTypeIndex=%" PRIu32, m_MemoryTypeIndex); const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount; // pBlock became empty after this deallocation. if (pBlock->m_pMetadata->IsEmpty()) { // Already had empty block. We don't want to have two, so delete this one. if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock) { pBlockToDelete = pBlock; Remove(pBlock); } // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth. } // pBlock didn't become empty, but we have another empty block - find and free that one. // (This is optional, heuristics.) else if (hadEmptyBlockBeforeFree && canDeleteBlock) { VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); if (pLastBlock->m_pMetadata->IsEmpty()) { pBlockToDelete = pLastBlock; m_Blocks.pop_back(); } } IncrementallySortBlocks(); m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize()); hAllocation->Destroy(m_hAllocator); m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation); } // Destruction of a free block. Deferred until this point, outside of mutex // lock, for performance reason. if (pBlockToDelete != VMA_NULL) { VMA_DEBUG_LOG_FORMAT(" Deleted empty block #%" PRIu32, pBlockToDelete->GetId()); pBlockToDelete->Destroy(m_hAllocator); vma_delete(m_hAllocator, pBlockToDelete); } } VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const { VkDeviceSize result = 0; for (size_t i = m_Blocks.size(); i--; ) { result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize()); if (result >= m_PreferredBlockSize) { break; } } return result; } void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) { for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { if (m_Blocks[blockIndex] == pBlock) { VmaVectorRemove(m_Blocks, blockIndex); return; } } VMA_ASSERT(0); } void VmaBlockVector::IncrementallySortBlocks() { if (!m_IncrementalSort) return; if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) { // Bubble sort only until first swap. for (size_t i = 1; i < m_Blocks.size(); ++i) { if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) { std::swap(m_Blocks[i - 1], m_Blocks[i]); return; } } } } void VmaBlockVector::SortByFreeSize() { VMA_SORT(m_Blocks.begin(), m_Blocks.end(), [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool { return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize(); }); } VkResult VmaBlockVector::AllocateFromBlock( VmaDeviceMemoryBlock* pBlock, VkDeviceSize size, VkDeviceSize alignment, VmaAllocationCreateFlags allocFlags, void* pUserData, VmaSuballocationType suballocType, uint32_t strategy, VmaAllocation* pAllocation) { const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; VmaAllocationRequest currRequest = {}; if (pBlock->m_pMetadata->CreateAllocationRequest( size, alignment, isUpperAddress, suballocType, strategy, &currRequest)) { return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation); } return VK_ERROR_OUT_OF_DEVICE_MEMORY; } VkResult VmaBlockVector::CommitAllocationRequest( VmaAllocationRequest& allocRequest, VmaDeviceMemoryBlock* pBlock, VkDeviceSize alignment, VmaAllocationCreateFlags allocFlags, void* pUserData, VmaSuballocationType suballocType, VmaAllocation* pAllocation) { const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; const bool isMappingAllowed = (allocFlags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; pBlock->PostAlloc(m_hAllocator); // Allocate from pCurrBlock. if (mapped) { VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); if (res != VK_SUCCESS) { return res; } } *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed); pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation); (*pAllocation)->InitBlockAllocation( pBlock, allocRequest.allocHandle, alignment, allocRequest.size, // Not size, as actual allocation size may be larger than requested! m_MemoryTypeIndex, suballocType, mapped); VMA_HEAVY_ASSERT(pBlock->Validate()); if (isUserDataString) (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData); else (*pAllocation)->SetUserData(m_hAllocator, pUserData); m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size); if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) { m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); } if (IsCorruptionDetectionEnabled()) { VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size); VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); } return VK_SUCCESS; } VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) { VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; allocInfo.pNext = m_pMemoryAllocateNext; allocInfo.memoryTypeIndex = m_MemoryTypeIndex; allocInfo.allocationSize = blockSize; #if VMA_BUFFER_DEVICE_ADDRESS // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature. VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; if (m_hAllocator->m_UseKhrBufferDeviceAddress) { allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); } #endif // VMA_BUFFER_DEVICE_ADDRESS #if VMA_MEMORY_PRIORITY VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; if (m_hAllocator->m_UseExtMemoryPriority) { VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f); priorityInfo.priority = m_Priority; VmaPnextChainPushFront(&allocInfo, &priorityInfo); } #endif // VMA_MEMORY_PRIORITY #if VMA_EXTERNAL_MEMORY // Attach VkExportMemoryAllocateInfoKHR if necessary. VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex); if (exportMemoryAllocInfo.handleTypes != 0) { VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); } #endif // VMA_EXTERNAL_MEMORY VkDeviceMemory mem = VK_NULL_HANDLE; VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem); if (res < 0) { return res; } // New VkDeviceMemory successfully created. // Create new Allocation for it. VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); pBlock->Init( m_hAllocator, m_hParentPool, m_MemoryTypeIndex, mem, allocInfo.allocationSize, m_NextBlockId++, m_Algorithm, m_BufferImageGranularity); m_Blocks.push_back(pBlock); if (pNewBlockIndex != VMA_NULL) { *pNewBlockIndex = m_Blocks.size() - 1; } return VK_SUCCESS; } bool VmaBlockVector::HasEmptyBlock() { for (size_t index = 0, count = m_Blocks.size(); index < count; ++index) { VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; if (pBlock->m_pMetadata->IsEmpty()) { return true; } } return false; } #if VMA_STATS_STRING_ENABLED void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) { VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); json.BeginObject(); for (size_t i = 0; i < m_Blocks.size(); ++i) { json.BeginString(); json.ContinueString(m_Blocks[i]->GetId()); json.EndString(); json.BeginObject(); json.WriteString("MapRefCount"); json.WriteNumber(m_Blocks[i]->GetMapRefCount()); m_Blocks[i]->m_pMetadata->PrintDetailedMap(json); json.EndObject(); } json.EndObject(); } #endif // VMA_STATS_STRING_ENABLED VkResult VmaBlockVector::CheckCorruption() { if (!IsCorruptionDetectionEnabled()) { return VK_ERROR_FEATURE_NOT_PRESENT; } VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; VMA_ASSERT(pBlock); VkResult res = pBlock->CheckCorruption(m_hAllocator); if (res != VK_SUCCESS) { return res; } } return VK_SUCCESS; } #endif // _VMA_BLOCK_VECTOR_FUNCTIONS #ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS VmaDefragmentationContext_T::VmaDefragmentationContext_T( VmaAllocator hAllocator, const VmaDefragmentationInfo& info) : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass), m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass), m_BreakCallback(info.pfnBreakCallback), m_BreakCallbackUserData(info.pBreakCallbackUserData), m_MoveAllocator(hAllocator->GetAllocationCallbacks()), m_Moves(m_MoveAllocator) { m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK; if (info.pool != VMA_NULL) { m_BlockVectorCount = 1; m_PoolBlockVector = &info.pool->m_BlockVector; m_pBlockVectors = &m_PoolBlockVector; m_PoolBlockVector->SetIncrementalSort(false); m_PoolBlockVector->SortByFreeSize(); } else { m_BlockVectorCount = hAllocator->GetMemoryTypeCount(); m_PoolBlockVector = VMA_NULL; m_pBlockVectors = hAllocator->m_pBlockVectors; for (uint32_t i = 0; i < m_BlockVectorCount; ++i) { VmaBlockVector* vector = m_pBlockVectors[i]; if (vector != VMA_NULL) { vector->SetIncrementalSort(false); vector->SortByFreeSize(); } } } switch (m_Algorithm) { case 0: // Default algorithm m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT; m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); break; case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); break; case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: if (hAllocator->GetBufferImageGranularity() > 1) { m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount); } break; } } VmaDefragmentationContext_T::~VmaDefragmentationContext_T() { if (m_PoolBlockVector != VMA_NULL) { m_PoolBlockVector->SetIncrementalSort(true); } else { for (uint32_t i = 0; i < m_BlockVectorCount; ++i) { VmaBlockVector* vector = m_pBlockVectors[i]; if (vector != VMA_NULL) vector->SetIncrementalSort(true); } } if (m_AlgorithmState) { switch (m_Algorithm) { case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); break; case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); break; default: VMA_ASSERT(0); } } } VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo) { if (m_PoolBlockVector != VMA_NULL) { VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex); if (m_PoolBlockVector->GetBlockCount() > 1) ComputeDefragmentation(*m_PoolBlockVector, 0); else if (m_PoolBlockVector->GetBlockCount() == 1) ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0)); } else { for (uint32_t i = 0; i < m_BlockVectorCount; ++i) { if (m_pBlockVectors[i] != VMA_NULL) { VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex); if (m_pBlockVectors[i]->GetBlockCount() > 1) { if (ComputeDefragmentation(*m_pBlockVectors[i], i)) break; } else if (m_pBlockVectors[i]->GetBlockCount() == 1) { if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0))) break; } } } } moveInfo.moveCount = static_cast(m_Moves.size()); if (moveInfo.moveCount > 0) { moveInfo.pMoves = m_Moves.data(); return VK_INCOMPLETE; } moveInfo.pMoves = VMA_NULL; return VK_SUCCESS; } VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo) { VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true); VkResult result = VK_SUCCESS; VmaStlAllocator blockAllocator(m_MoveAllocator.m_pCallbacks); VmaVector> immovableBlocks(blockAllocator); VmaVector> mappedBlocks(blockAllocator); VmaAllocator allocator = VMA_NULL; for (uint32_t i = 0; i < moveInfo.moveCount; ++i) { VmaDefragmentationMove& move = moveInfo.pMoves[i]; size_t prevCount = 0, currentCount = 0; VkDeviceSize freedBlockSize = 0; uint32_t vectorIndex; VmaBlockVector* vector; if (m_PoolBlockVector != VMA_NULL) { vectorIndex = 0; vector = m_PoolBlockVector; } else { vectorIndex = move.srcAllocation->GetMemoryTypeIndex(); vector = m_pBlockVectors[vectorIndex]; VMA_ASSERT(vector != VMA_NULL); } switch (move.operation) { case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY: { uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation); if (mapCount > 0) { allocator = vector->m_hAllocator; VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock(); bool notPresent = true; for (FragmentedBlock& block : mappedBlocks) { if (block.block == newMapBlock) { notPresent = false; block.data += mapCount; break; } } if (notPresent) mappedBlocks.push_back({ mapCount, newMapBlock }); } // Scope for locks, Free have it's own lock { VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); prevCount = vector->GetBlockCount(); freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); } vector->Free(move.dstTmpAllocation); { VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); currentCount = vector->GetBlockCount(); } result = VK_INCOMPLETE; break; } case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE: { m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); --m_PassStats.allocationsMoved; vector->Free(move.dstTmpAllocation); VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock(); bool notPresent = true; for (const FragmentedBlock& block : immovableBlocks) { if (block.block == newBlock) { notPresent = false; break; } } if (notPresent) immovableBlocks.push_back({ vectorIndex, newBlock }); break; } case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY: { m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); --m_PassStats.allocationsMoved; // Scope for locks, Free have it's own lock { VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); prevCount = vector->GetBlockCount(); freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize(); } vector->Free(move.srcAllocation); { VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); currentCount = vector->GetBlockCount(); } freedBlockSize *= prevCount - currentCount; VkDeviceSize dstBlockSize; { VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); } vector->Free(move.dstTmpAllocation); { VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount()); currentCount = vector->GetBlockCount(); } result = VK_INCOMPLETE; break; } default: VMA_ASSERT(0); } if (prevCount > currentCount) { size_t freedBlocks = prevCount - currentCount; m_PassStats.deviceMemoryBlocksFreed += static_cast(freedBlocks); m_PassStats.bytesFreed += freedBlockSize; } if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT && m_AlgorithmState != VMA_NULL) { // Avoid unnecessary tries to allocate when new free block is available StateExtensive& state = reinterpret_cast(m_AlgorithmState)[vectorIndex]; if (state.firstFreeBlock != SIZE_MAX) { const size_t diff = prevCount - currentCount; if (state.firstFreeBlock >= diff) { state.firstFreeBlock -= diff; if (state.firstFreeBlock != 0) state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty(); } else state.firstFreeBlock = 0; } } } moveInfo.moveCount = 0; moveInfo.pMoves = VMA_NULL; m_Moves.clear(); // Update stats m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved; m_GlobalStats.bytesFreed += m_PassStats.bytesFreed; m_GlobalStats.bytesMoved += m_PassStats.bytesMoved; m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed; m_PassStats = { 0 }; // Move blocks with immovable allocations according to algorithm if (immovableBlocks.size() > 0) { do { if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT) { if (m_AlgorithmState != VMA_NULL) { bool swapped = false; // Move to the start of free blocks range for (const FragmentedBlock& block : immovableBlocks) { StateExtensive& state = reinterpret_cast(m_AlgorithmState)[block.data]; if (state.operation != StateExtensive::Operation::Cleanup) { VmaBlockVector* vector = m_pBlockVectors[block.data]; VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i) { if (vector->GetBlock(i) == block.block) { std::swap(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]); if (state.firstFreeBlock != SIZE_MAX) { if (i + 1 < state.firstFreeBlock) { if (state.firstFreeBlock > 1) std::swap(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]); else --state.firstFreeBlock; } } swapped = true; break; } } } } if (swapped) result = VK_INCOMPLETE; break; } } // Move to the beginning for (const FragmentedBlock& block : immovableBlocks) { VmaBlockVector* vector = m_pBlockVectors[block.data]; VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i) { if (vector->GetBlock(i) == block.block) { std::swap(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]); break; } } } } while (false); } // Bulk-map destination blocks for (const FragmentedBlock& block : mappedBlocks) { VkResult res = block.block->Map(allocator, block.data, VMA_NULL); VMA_ASSERT(res == VK_SUCCESS); } return result; } bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index) { switch (m_Algorithm) { case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT: return ComputeDefragmentation_Fast(vector); case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: return ComputeDefragmentation_Balanced(vector, index, true); case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT: return ComputeDefragmentation_Full(vector); case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: return ComputeDefragmentation_Extensive(vector, index); default: VMA_ASSERT(0); return ComputeDefragmentation_Balanced(vector, index, true); } } VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData( VmaAllocHandle handle, VmaBlockMetadata* metadata) { MoveAllocationData moveData; moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle); moveData.size = moveData.move.srcAllocation->GetSize(); moveData.alignment = moveData.move.srcAllocation->GetAlignment(); moveData.type = moveData.move.srcAllocation->GetSuballocationType(); moveData.flags = 0; if (moveData.move.srcAllocation->IsPersistentMap()) moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT; if (moveData.move.srcAllocation->IsMappingAllowed()) moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; return moveData; } VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes) { // Check custom criteria if exists if (m_BreakCallback && m_BreakCallback(m_BreakCallbackUserData)) return CounterStatus::End; // Ignore allocation if will exceed max size for copy if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes) { if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE) return CounterStatus::Ignore; else return CounterStatus::End; } else m_IgnoredAllocs = 0; return CounterStatus::Pass; } bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes) { m_PassStats.bytesMoved += bytes; // Early return when max found if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes) { VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations || m_PassStats.bytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!"); return true; } return false; } bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block) { VmaBlockMetadata* metadata = block->m_pMetadata; for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); handle != VK_NULL_HANDLE; handle = metadata->GetNextAllocation(handle)) { MoveAllocationData moveData = GetMoveData(handle, metadata); // Ignore newly created allocations by defragmentation algorithm if (moveData.move.srcAllocation->GetUserData() == this) continue; switch (CheckCounters(moveData.move.srcAllocation->GetSize())) { case CounterStatus::Ignore: continue; case CounterStatus::End: return true; case CounterStatus::Pass: break; default: VMA_ASSERT(0); } VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size) { VmaAllocationRequest request = {}; if (metadata->CreateAllocationRequest( moveData.size, moveData.alignment, false, moveData.type, VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, &request)) { if (metadata->GetAllocationOffset(request.allocHandle) < offset) { if (vector.CommitAllocationRequest( request, block, moveData.alignment, moveData.flags, this, moveData.type, &moveData.move.dstTmpAllocation) == VK_SUCCESS) { m_Moves.push_back(moveData.move); if (IncrementCounters(moveData.size)) return true; } } } } } return false; } bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector) { for (; start < end; ++start) { VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start); if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size) { if (vector.AllocateFromBlock(dstBlock, data.size, data.alignment, data.flags, this, data.type, 0, &data.move.dstTmpAllocation) == VK_SUCCESS) { m_Moves.push_back(data.move); if (IncrementCounters(data.size)) return true; break; } } } return false; } bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector) { // Move only between blocks // Go through allocations in last blocks and try to fit them inside first ones for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) { VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); handle != VK_NULL_HANDLE; handle = metadata->GetNextAllocation(handle)) { MoveAllocationData moveData = GetMoveData(handle, metadata); // Ignore newly created allocations by defragmentation algorithm if (moveData.move.srcAllocation->GetUserData() == this) continue; switch (CheckCounters(moveData.move.srcAllocation->GetSize())) { case CounterStatus::Ignore: continue; case CounterStatus::End: return true; case CounterStatus::Pass: break; default: VMA_ASSERT(0); } // Check all previous blocks for free space if (AllocInOtherBlock(0, i, moveData, vector)) return true; } } return false; } bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update) { // Go over every allocation and try to fit it in previous blocks at lowest offsets, // if not possible: realloc within single block to minimize offset (exclude offset == 0), // but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block) VMA_ASSERT(m_AlgorithmState != VMA_NULL); StateBalanced& vectorState = reinterpret_cast(m_AlgorithmState)[index]; if (update && vectorState.avgAllocSize == UINT64_MAX) UpdateVectorStatistics(vector, vectorState); const size_t startMoveCount = m_Moves.size(); VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2; for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) { VmaDeviceMemoryBlock* block = vector.GetBlock(i); VmaBlockMetadata* metadata = block->m_pMetadata; VkDeviceSize prevFreeRegionSize = 0; for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); handle != VK_NULL_HANDLE; handle = metadata->GetNextAllocation(handle)) { MoveAllocationData moveData = GetMoveData(handle, metadata); // Ignore newly created allocations by defragmentation algorithm if (moveData.move.srcAllocation->GetUserData() == this) continue; switch (CheckCounters(moveData.move.srcAllocation->GetSize())) { case CounterStatus::Ignore: continue; case CounterStatus::End: return true; case CounterStatus::Pass: break; default: VMA_ASSERT(0); } // Check all previous blocks for free space const size_t prevMoveCount = m_Moves.size(); if (AllocInOtherBlock(0, i, moveData, vector)) return true; VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle); // If no room found then realloc within block for lower offset VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) { // Check if realloc will make sense if (prevFreeRegionSize >= minimalFreeRegion || nextFreeRegionSize >= minimalFreeRegion || moveData.size <= vectorState.avgFreeSize || moveData.size <= vectorState.avgAllocSize) { VmaAllocationRequest request = {}; if (metadata->CreateAllocationRequest( moveData.size, moveData.alignment, false, moveData.type, VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, &request)) { if (metadata->GetAllocationOffset(request.allocHandle) < offset) { if (vector.CommitAllocationRequest( request, block, moveData.alignment, moveData.flags, this, moveData.type, &moveData.move.dstTmpAllocation) == VK_SUCCESS) { m_Moves.push_back(moveData.move); if (IncrementCounters(moveData.size)) return true; } } } } } prevFreeRegionSize = nextFreeRegionSize; } } // No moves performed, update statistics to current vector state if (startMoveCount == m_Moves.size() && !update) { vectorState.avgAllocSize = UINT64_MAX; return ComputeDefragmentation_Balanced(vector, index, false); } return false; } bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector) { // Go over every allocation and try to fit it in previous blocks at lowest offsets, // if not possible: realloc within single block to minimize offset (exclude offset == 0) for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) { VmaDeviceMemoryBlock* block = vector.GetBlock(i); VmaBlockMetadata* metadata = block->m_pMetadata; for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); handle != VK_NULL_HANDLE; handle = metadata->GetNextAllocation(handle)) { MoveAllocationData moveData = GetMoveData(handle, metadata); // Ignore newly created allocations by defragmentation algorithm if (moveData.move.srcAllocation->GetUserData() == this) continue; switch (CheckCounters(moveData.move.srcAllocation->GetSize())) { case CounterStatus::Ignore: continue; case CounterStatus::End: return true; case CounterStatus::Pass: break; default: VMA_ASSERT(0); } // Check all previous blocks for free space const size_t prevMoveCount = m_Moves.size(); if (AllocInOtherBlock(0, i, moveData, vector)) return true; // If no room found then realloc within block for lower offset VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) { VmaAllocationRequest request = {}; if (metadata->CreateAllocationRequest( moveData.size, moveData.alignment, false, moveData.type, VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, &request)) { if (metadata->GetAllocationOffset(request.allocHandle) < offset) { if (vector.CommitAllocationRequest( request, block, moveData.alignment, moveData.flags, this, moveData.type, &moveData.move.dstTmpAllocation) == VK_SUCCESS) { m_Moves.push_back(moveData.move); if (IncrementCounters(moveData.size)) return true; } } } } } } return false; } bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index) { // First free single block, then populate it to the brim, then free another block, and so on // Fallback to previous algorithm since without granularity conflicts it can achieve max packing if (vector.m_BufferImageGranularity == 1) return ComputeDefragmentation_Full(vector); VMA_ASSERT(m_AlgorithmState != VMA_NULL); StateExtensive& vectorState = reinterpret_cast(m_AlgorithmState)[index]; bool texturePresent = false, bufferPresent = false, otherPresent = false; switch (vectorState.operation) { case StateExtensive::Operation::Done: // Vector defragmented return false; case StateExtensive::Operation::FindFreeBlockBuffer: case StateExtensive::Operation::FindFreeBlockTexture: case StateExtensive::Operation::FindFreeBlockAll: { // No more blocks to free, just perform fast realloc and move to cleanup if (vectorState.firstFreeBlock == 0) { vectorState.operation = StateExtensive::Operation::Cleanup; return ComputeDefragmentation_Fast(vector); } // No free blocks, have to clear last one size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1; VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata; const size_t prevMoveCount = m_Moves.size(); for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin(); handle != VK_NULL_HANDLE; handle = freeMetadata->GetNextAllocation(handle)) { MoveAllocationData moveData = GetMoveData(handle, freeMetadata); switch (CheckCounters(moveData.move.srcAllocation->GetSize())) { case CounterStatus::Ignore: continue; case CounterStatus::End: return true; case CounterStatus::Pass: break; default: VMA_ASSERT(0); } // Check all previous blocks for free space if (AllocInOtherBlock(0, last, moveData, vector)) { // Full clear performed already if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE) vectorState.firstFreeBlock = last; return true; } } if (prevMoveCount == m_Moves.size()) { // Cannot perform full clear, have to move data in other blocks around if (last != 0) { for (size_t i = last - 1; i; --i) { if (ReallocWithinBlock(vector, vector.GetBlock(i))) return true; } } if (prevMoveCount == m_Moves.size()) { // No possible reallocs within blocks, try to move them around fast return ComputeDefragmentation_Fast(vector); } } else { switch (vectorState.operation) { case StateExtensive::Operation::FindFreeBlockBuffer: vectorState.operation = StateExtensive::Operation::MoveBuffers; break; case StateExtensive::Operation::FindFreeBlockTexture: vectorState.operation = StateExtensive::Operation::MoveTextures; break; case StateExtensive::Operation::FindFreeBlockAll: vectorState.operation = StateExtensive::Operation::MoveAll; break; default: VMA_ASSERT(0); vectorState.operation = StateExtensive::Operation::MoveTextures; } vectorState.firstFreeBlock = last; // Nothing done, block found without reallocations, can perform another reallocs in same pass return ComputeDefragmentation_Extensive(vector, index); } break; } case StateExtensive::Operation::MoveTextures: { if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector, vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) { if (texturePresent) { vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture; return ComputeDefragmentation_Extensive(vector, index); } if (!bufferPresent && !otherPresent) { vectorState.operation = StateExtensive::Operation::Cleanup; break; } // No more textures to move, check buffers vectorState.operation = StateExtensive::Operation::MoveBuffers; bufferPresent = false; otherPresent = false; } else break; VMA_FALLTHROUGH; // Fallthrough } case StateExtensive::Operation::MoveBuffers: { if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector, vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) { if (bufferPresent) { vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; return ComputeDefragmentation_Extensive(vector, index); } if (!otherPresent) { vectorState.operation = StateExtensive::Operation::Cleanup; break; } // No more buffers to move, check all others vectorState.operation = StateExtensive::Operation::MoveAll; otherPresent = false; } else break; VMA_FALLTHROUGH; // Fallthrough } case StateExtensive::Operation::MoveAll: { if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector, vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) { if (otherPresent) { vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; return ComputeDefragmentation_Extensive(vector, index); } // Everything moved vectorState.operation = StateExtensive::Operation::Cleanup; } break; } case StateExtensive::Operation::Cleanup: // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062). break; } if (vectorState.operation == StateExtensive::Operation::Cleanup) { // All other work done, pack data in blocks even tighter if possible const size_t prevMoveCount = m_Moves.size(); for (size_t i = 0; i < vector.GetBlockCount(); ++i) { if (ReallocWithinBlock(vector, vector.GetBlock(i))) return true; } if (prevMoveCount == m_Moves.size()) vectorState.operation = StateExtensive::Operation::Done; } return false; } void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state) { size_t allocCount = 0; size_t freeCount = 0; state.avgFreeSize = 0; state.avgAllocSize = 0; for (size_t i = 0; i < vector.GetBlockCount(); ++i) { VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; allocCount += metadata->GetAllocationCount(); freeCount += metadata->GetFreeRegionsCount(); state.avgFreeSize += metadata->GetSumFreeSize(); state.avgAllocSize += metadata->GetSize(); } state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount; state.avgFreeSize /= freeCount; } bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType, VmaBlockVector& vector, size_t firstFreeBlock, bool& texturePresent, bool& bufferPresent, bool& otherPresent) { const size_t prevMoveCount = m_Moves.size(); for (size_t i = firstFreeBlock ; i;) { VmaDeviceMemoryBlock* block = vector.GetBlock(--i); VmaBlockMetadata* metadata = block->m_pMetadata; for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); handle != VK_NULL_HANDLE; handle = metadata->GetNextAllocation(handle)) { MoveAllocationData moveData = GetMoveData(handle, metadata); // Ignore newly created allocations by defragmentation algorithm if (moveData.move.srcAllocation->GetUserData() == this) continue; switch (CheckCounters(moveData.move.srcAllocation->GetSize())) { case CounterStatus::Ignore: continue; case CounterStatus::End: return true; case CounterStatus::Pass: break; default: VMA_ASSERT(0); } // Move only single type of resources at once if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType)) { // Try to fit allocation into free blocks if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector)) return false; } if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)) texturePresent = true; else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER)) bufferPresent = true; else otherPresent = true; } } return prevMoveCount == m_Moves.size(); } #endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS #ifndef _VMA_POOL_T_FUNCTIONS VmaPool_T::VmaPool_T( VmaAllocator hAllocator, const VmaPoolCreateInfo& createInfo, VkDeviceSize preferredBlockSize) : m_BlockVector( hAllocator, this, // hParentPool createInfo.memoryTypeIndex, createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, createInfo.minBlockCount, createInfo.maxBlockCount, (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), createInfo.blockSize != 0, // explicitBlockSize createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm createInfo.priority, VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment), createInfo.pMemoryAllocateNext), m_Id(0), m_Name(VMA_NULL) {} VmaPool_T::~VmaPool_T() { VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL); const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); VmaFreeString(allocs, m_Name); } void VmaPool_T::SetName(const char* pName) { const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); VmaFreeString(allocs, m_Name); if (pName != VMA_NULL) { m_Name = VmaCreateStringCopy(allocs, pName); } else { m_Name = VMA_NULL; } } #endif // _VMA_POOL_T_FUNCTIONS #ifndef _VMA_ALLOCATOR_T_FUNCTIONS VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0), m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0), m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0), m_UseKhrMaintenance4((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT) != 0), m_UseKhrMaintenance5((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT) != 0), m_UseKhrExternalMemoryWin32((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT) != 0), m_hDevice(pCreateInfo->device), m_hInstance(pCreateInfo->instance), m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), m_AllocationObjectAllocator(&m_AllocationCallbacks), m_HeapSizeLimitMask(0), m_DeviceMemoryCount(0), m_PreferredLargeHeapBlockSize(0), m_PhysicalDevice(pCreateInfo->physicalDevice), m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), m_NextPoolId(0), m_GlobalMemoryTypeBits(UINT32_MAX) { if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { m_UseKhrDedicatedAllocation = false; m_UseKhrBindMemory2 = false; } if(VMA_DEBUG_DETECT_CORRUPTION) { // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it. VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0); } VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance); if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) { #if !(VMA_DEDICATED_ALLOCATION) if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); } #endif #if !(VMA_BIND_MEMORY2) if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros."); } #endif } #if !(VMA_MEMORY_BUDGET) if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros."); } #endif #if !(VMA_BUFFER_DEVICE_ADDRESS) if(m_UseKhrBufferDeviceAddress) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); } #endif #if VMA_VULKAN_VERSION < 1004000 VMA_ASSERT(m_VulkanApiVersion < VK_MAKE_VERSION(1, 4, 0) && "vulkanApiVersion >= VK_API_VERSION_1_4 but required Vulkan version is disabled by preprocessor macros."); #endif #if VMA_VULKAN_VERSION < 1003000 VMA_ASSERT(m_VulkanApiVersion < VK_MAKE_VERSION(1, 3, 0) && "vulkanApiVersion >= VK_API_VERSION_1_3 but required Vulkan version is disabled by preprocessor macros."); #endif #if VMA_VULKAN_VERSION < 1002000 VMA_ASSERT(m_VulkanApiVersion < VK_MAKE_VERSION(1, 2, 0) && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros."); #endif #if VMA_VULKAN_VERSION < 1001000 VMA_ASSERT(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0) && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros."); #endif #if !(VMA_MEMORY_PRIORITY) if(m_UseExtMemoryPriority) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); } #endif #if !(VMA_KHR_MAINTENANCE4) if(m_UseKhrMaintenance4) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); } #endif #if !(VMA_KHR_MAINTENANCE5) if(m_UseKhrMaintenance5) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); } #endif #if !(VMA_KHR_MAINTENANCE5) if(m_UseKhrMaintenance5) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); } #endif #if !(VMA_EXTERNAL_MEMORY_WIN32) if(m_UseKhrExternalMemoryWin32) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); } #endif memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks)); memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); memset(&m_MemProps, 0, sizeof(m_MemProps)); memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors)); memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions)); #if VMA_EXTERNAL_MEMORY memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes)); #endif // #if VMA_EXTERNAL_MEMORY if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) { m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData; m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate; m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree; } ImportVulkanFunctions(pCreateInfo->pVulkanFunctions); (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties); (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps); VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT)); VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY)); VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity)); VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize)); m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? pCreateInfo->preferredLargeHeapBlockSize : static_cast(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits(); #if VMA_EXTERNAL_MEMORY if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL) { memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes, sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount()); } #endif // #if VMA_EXTERNAL_MEMORY if(pCreateInfo->pHeapSizeLimit != VMA_NULL) { for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) { const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex]; if(limit != VK_WHOLE_SIZE) { m_HeapSizeLimitMask |= 1u << heapIndex; if(limit < m_MemProps.memoryHeaps[heapIndex].size) { m_MemProps.memoryHeaps[heapIndex].size = limit; } } } } for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { // Create only supported types if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) { const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( this, VK_NULL_HANDLE, // hParentPool memTypeIndex, preferredBlockSize, 0, SIZE_MAX, GetBufferImageGranularity(), false, // explicitBlockSize 0, // algorithm 0.5f, // priority (0.5 is the default per Vulkan spec) GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment VMA_NULL); // // pMemoryAllocateNext // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, // because minBlockCount is 0. } } } VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo) { VkResult res = VK_SUCCESS; #if VMA_MEMORY_BUDGET if(m_UseExtMemoryBudget) { UpdateVulkanBudget(); } #endif // #if VMA_MEMORY_BUDGET return res; } VmaAllocator_T::~VmaAllocator_T() { VMA_ASSERT(m_Pools.IsEmpty()); for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; ) { vma_delete(this, m_pBlockVectors[memTypeIndex]); } } void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions) { #if VMA_STATIC_VULKAN_FUNCTIONS == 1 ImportVulkanFunctions_Static(); #endif if(pVulkanFunctions != VMA_NULL) { ImportVulkanFunctions_Custom(pVulkanFunctions); } #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 ImportVulkanFunctions_Dynamic(); #endif ValidateVulkanFunctions(); } #if VMA_STATIC_VULKAN_FUNCTIONS == 1 void VmaAllocator_T::ImportVulkanFunctions_Static() { // Vulkan 1.0 m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr; m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr; m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties; m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties; m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory; m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory; m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory; m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges; m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges; m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory; m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory; m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements; m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements; m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer; m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer; m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage; m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage; m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer; // Vulkan 1.1 #if VMA_VULKAN_VERSION >= 1001000 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2; m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2; m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2; m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2; } #endif #if VMA_VULKAN_VERSION >= 1001000 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2; } #endif #if VMA_VULKAN_VERSION >= 1003000 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) { m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements; m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements; } #endif } #endif // VMA_STATIC_VULKAN_FUNCTIONS == 1 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions) { VMA_ASSERT(pVulkanFunctions != VMA_NULL); #define VMA_COPY_IF_NOT_NULL(funcName) \ if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr); VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr); VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties); VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties); VMA_COPY_IF_NOT_NULL(vkAllocateMemory); VMA_COPY_IF_NOT_NULL(vkFreeMemory); VMA_COPY_IF_NOT_NULL(vkMapMemory); VMA_COPY_IF_NOT_NULL(vkUnmapMemory); VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges); VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges); VMA_COPY_IF_NOT_NULL(vkBindBufferMemory); VMA_COPY_IF_NOT_NULL(vkBindImageMemory); VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements); VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements); VMA_COPY_IF_NOT_NULL(vkCreateBuffer); VMA_COPY_IF_NOT_NULL(vkDestroyBuffer); VMA_COPY_IF_NOT_NULL(vkCreateImage); VMA_COPY_IF_NOT_NULL(vkDestroyImage); VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer); #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR); VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR); #endif #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR); VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR); #endif #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR); #endif #if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements); VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements); #endif #if VMA_EXTERNAL_MEMORY_WIN32 VMA_COPY_IF_NOT_NULL(vkGetMemoryWin32HandleKHR); #endif #undef VMA_COPY_IF_NOT_NULL } #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 void VmaAllocator_T::ImportVulkanFunctions_Dynamic() { VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr && "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass " "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. " "Other members can be null."); #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \ if(m_VulkanFunctions.memberName == VMA_NULL) \ m_VulkanFunctions.memberName = \ (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString); #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \ if(m_VulkanFunctions.memberName == VMA_NULL) \ m_VulkanFunctions.memberName = \ (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString); VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties"); VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties"); VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory"); VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory"); VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory"); VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory"); VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges"); VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges"); VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory"); VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory"); VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements"); VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements"); VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer"); VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer"); VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage"); VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage"); VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer"); #if VMA_VULKAN_VERSION >= 1001000 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2"); VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2"); VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2"); VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2"); } #endif #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2"); // Try to fetch the pointer from the other name, based on suspected driver bug - see issue #410. VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); } else if(m_UseExtMemoryBudget) { VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); // Try to fetch the pointer from the other name, based on suspected driver bug - see issue #410. VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2"); } #endif #if VMA_DEDICATED_ALLOCATION if(m_UseKhrDedicatedAllocation) { VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR"); VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR"); } #endif #if VMA_BIND_MEMORY2 if(m_UseKhrBindMemory2) { VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR"); VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR"); } #endif // #if VMA_BIND_MEMORY2 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2"); } else if(m_UseExtMemoryBudget) { VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); } #endif // #if VMA_MEMORY_BUDGET #if VMA_VULKAN_VERSION >= 1003000 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) { VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements"); VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements"); } #endif #if VMA_KHR_MAINTENANCE4 if(m_UseKhrMaintenance4) { VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirementsKHR, "vkGetDeviceBufferMemoryRequirementsKHR"); VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirementsKHR, "vkGetDeviceImageMemoryRequirementsKHR"); } #endif #if VMA_EXTERNAL_MEMORY_WIN32 if (m_UseKhrExternalMemoryWin32) { VMA_FETCH_DEVICE_FUNC(vkGetMemoryWin32HandleKHR, PFN_vkGetMemoryWin32HandleKHR, "vkGetMemoryWin32HandleKHR"); } #endif #undef VMA_FETCH_DEVICE_FUNC #undef VMA_FETCH_INSTANCE_FUNC } #endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 void VmaAllocator_T::ValidateVulkanFunctions() { VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL); #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) { VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); } #endif #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) { VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL); } #endif #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); } #endif #if VMA_EXTERNAL_MEMORY_WIN32 if (m_UseKhrExternalMemoryWin32) { VMA_ASSERT(m_VulkanFunctions.vkGetMemoryWin32HandleKHR != VMA_NULL); } #endif // Not validating these due to suspected driver bugs with these function // pointers being null despite correct extension or Vulkan version is enabled. // See issue #397. Their usage in VMA is optional anyway. // // VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL); // VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL); } VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) { const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE; return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32); } VkResult VmaAllocator_T::AllocateMemoryOfType( VmaPool pool, VkDeviceSize size, VkDeviceSize alignment, bool dedicatedPreferred, VkBuffer dedicatedBuffer, VkImage dedicatedImage, VmaBufferImageUsage dedicatedBufferImageUsage, const VmaAllocationCreateInfo& createInfo, uint32_t memTypeIndex, VmaSuballocationType suballocType, VmaDedicatedAllocationList& dedicatedAllocations, VmaBlockVector& blockVector, size_t allocationCount, VmaAllocation* pAllocations) { VMA_ASSERT(pAllocations != VMA_NULL); VMA_DEBUG_LOG_FORMAT(" AllocateMemory: MemoryTypeIndex=%" PRIu32 ", AllocationCount=%zu, Size=%" PRIu64, memTypeIndex, allocationCount, size); VmaAllocationCreateInfo finalCreateInfo = createInfo; VkResult res = CalcMemTypeParams( finalCreateInfo, memTypeIndex, size, allocationCount); if(res != VK_SUCCESS) return res; if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) { return AllocateDedicatedMemory( pool, size, suballocType, dedicatedAllocations, memTypeIndex, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, (finalCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, finalCreateInfo.pUserData, finalCreateInfo.priority, dedicatedBuffer, dedicatedImage, dedicatedBufferImageUsage, allocationCount, pAllocations, blockVector.GetAllocationNextPtr()); } else { const bool canAllocateDedicated = (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize()); if(canAllocateDedicated) { // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. if(size > blockVector.GetPreferredBlockSize() / 2) { dedicatedPreferred = true; } // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget, // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above // 3/4 of the maximum allocation count. if(m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 && m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4) { dedicatedPreferred = false; } if(dedicatedPreferred) { res = AllocateDedicatedMemory( pool, size, suballocType, dedicatedAllocations, memTypeIndex, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, (finalCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, finalCreateInfo.pUserData, finalCreateInfo.priority, dedicatedBuffer, dedicatedImage, dedicatedBufferImageUsage, allocationCount, pAllocations, blockVector.GetAllocationNextPtr()); if(res == VK_SUCCESS) { // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here. VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); return VK_SUCCESS; } } } res = blockVector.Allocate( size, alignment, finalCreateInfo, suballocType, allocationCount, pAllocations); if(res == VK_SUCCESS) return VK_SUCCESS; // Try dedicated memory. if(canAllocateDedicated && !dedicatedPreferred) { res = AllocateDedicatedMemory( pool, size, suballocType, dedicatedAllocations, memTypeIndex, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, (finalCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, finalCreateInfo.pUserData, finalCreateInfo.priority, dedicatedBuffer, dedicatedImage, dedicatedBufferImageUsage, allocationCount, pAllocations, blockVector.GetAllocationNextPtr()); if(res == VK_SUCCESS) { // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here. VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); return VK_SUCCESS; } } // Everything failed: Return error code. VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); return res; } } VkResult VmaAllocator_T::AllocateDedicatedMemory( VmaPool pool, VkDeviceSize size, VmaSuballocationType suballocType, VmaDedicatedAllocationList& dedicatedAllocations, uint32_t memTypeIndex, bool map, bool isUserDataString, bool isMappingAllowed, bool canAliasMemory, void* pUserData, float priority, VkBuffer dedicatedBuffer, VkImage dedicatedImage, VmaBufferImageUsage dedicatedBufferImageUsage, size_t allocationCount, VmaAllocation* pAllocations, const void* pNextChain) { VMA_ASSERT(allocationCount > 0 && pAllocations); VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; allocInfo.memoryTypeIndex = memTypeIndex; allocInfo.allocationSize = size; allocInfo.pNext = pNextChain; #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; if(!canAliasMemory) { if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { if(dedicatedBuffer != VK_NULL_HANDLE) { VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE); dedicatedAllocInfo.buffer = dedicatedBuffer; VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); } else if(dedicatedImage != VK_NULL_HANDLE) { dedicatedAllocInfo.image = dedicatedImage; VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); } } } #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 #if VMA_BUFFER_DEVICE_ADDRESS VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; if(m_UseKhrBufferDeviceAddress) { bool canContainBufferWithDeviceAddress = true; if(dedicatedBuffer != VK_NULL_HANDLE) { canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == VmaBufferImageUsage::UNKNOWN || dedicatedBufferImageUsage.Contains(VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT); } else if(dedicatedImage != VK_NULL_HANDLE) { canContainBufferWithDeviceAddress = false; } if(canContainBufferWithDeviceAddress) { allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); } } #endif // #if VMA_BUFFER_DEVICE_ADDRESS #if VMA_MEMORY_PRIORITY VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; if(m_UseExtMemoryPriority) { VMA_ASSERT(priority >= 0.f && priority <= 1.f); priorityInfo.priority = priority; VmaPnextChainPushFront(&allocInfo, &priorityInfo); } #endif // #if VMA_MEMORY_PRIORITY #if VMA_EXTERNAL_MEMORY // Attach VkExportMemoryAllocateInfoKHR if necessary. VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex); if(exportMemoryAllocInfo.handleTypes != 0) { VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); } #endif // #if VMA_EXTERNAL_MEMORY size_t allocIndex; VkResult res = VK_SUCCESS; for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) { res = AllocateDedicatedMemoryPage( pool, size, suballocType, memTypeIndex, allocInfo, map, isUserDataString, isMappingAllowed, pUserData, pAllocations + allocIndex); if(res != VK_SUCCESS) { break; } } if(res == VK_SUCCESS) { for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) { dedicatedAllocations.Register(pAllocations[allocIndex]); } VMA_DEBUG_LOG_FORMAT(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%" PRIu32, allocationCount, memTypeIndex); } else { // Free all already created allocations. while(allocIndex--) { VmaAllocation currAlloc = pAllocations[allocIndex]; VkDeviceMemory hMemory = currAlloc->GetMemory(); /* There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory before vkFreeMemory. if(currAlloc->GetMappedData() != VMA_NULL) { (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); } */ FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize()); m_AllocationObjectAllocator.Free(currAlloc); } memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); } return res; } VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( VmaPool pool, VkDeviceSize size, VmaSuballocationType suballocType, uint32_t memTypeIndex, const VkMemoryAllocateInfo& allocInfo, bool map, bool isUserDataString, bool isMappingAllowed, void* pUserData, VmaAllocation* pAllocation) { VkDeviceMemory hMemory = VK_NULL_HANDLE; VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); if(res < 0) { VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); return res; } void* pMappedData = VMA_NULL; if(map) { res = (*m_VulkanFunctions.vkMapMemory)( m_hDevice, hMemory, 0, VK_WHOLE_SIZE, 0, &pMappedData); if(res < 0) { VMA_DEBUG_LOG(" vkMapMemory FAILED"); FreeVulkanMemory(memTypeIndex, size, hMemory); return res; } } *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed); (*pAllocation)->InitDedicatedAllocation(this, pool, memTypeIndex, hMemory, suballocType, pMappedData, size); if (isUserDataString) (*pAllocation)->SetName(this, (const char*)pUserData); else (*pAllocation)->SetUserData(this, pUserData); m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) { FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); } return VK_SUCCESS; } void VmaAllocator_T::GetBufferMemoryRequirements( VkBuffer hBuffer, VkMemoryRequirements& memReq, bool& requiresDedicatedAllocation, bool& prefersDedicatedAllocation) const { #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; memReqInfo.buffer = hBuffer; VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; VmaPnextChainPushFront(&memReq2, &memDedicatedReq); (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); memReq = memReq2.memoryRequirements; requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); } else #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 { (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); requiresDedicatedAllocation = false; prefersDedicatedAllocation = false; } } void VmaAllocator_T::GetImageMemoryRequirements( VkImage hImage, VkMemoryRequirements& memReq, bool& requiresDedicatedAllocation, bool& prefersDedicatedAllocation) const { #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; memReqInfo.image = hImage; VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; VmaPnextChainPushFront(&memReq2, &memDedicatedReq); (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); memReq = memReq2.memoryRequirements; requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); } else #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 { (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); requiresDedicatedAllocation = false; prefersDedicatedAllocation = false; } } VkResult VmaAllocator_T::FindMemoryTypeIndex( uint32_t memoryTypeBits, const VmaAllocationCreateInfo* pAllocationCreateInfo, VmaBufferImageUsage bufImgUsage, uint32_t* pMemoryTypeIndex) const { memoryTypeBits &= GetGlobalMemoryTypeBits(); if(pAllocationCreateInfo->memoryTypeBits != 0) { memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; } VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0; if(!FindMemoryPreferences( IsIntegratedGpu(), *pAllocationCreateInfo, bufImgUsage, requiredFlags, preferredFlags, notPreferredFlags)) { return VK_ERROR_FEATURE_NOT_PRESENT; } *pMemoryTypeIndex = UINT32_MAX; uint32_t minCost = UINT32_MAX; for(uint32_t memTypeIndex = 0, memTypeBit = 1; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex, memTypeBit <<= 1) { // This memory type is acceptable according to memoryTypeBits bitmask. if((memTypeBit & memoryTypeBits) != 0) { const VkMemoryPropertyFlags currFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags; // This memory type contains requiredFlags. if((requiredFlags & ~currFlags) == 0) { // Calculate cost as number of bits from preferredFlags not present in this memory type. uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) + VMA_COUNT_BITS_SET(currFlags & notPreferredFlags); // Remember memory type with lowest cost. if(currCost < minCost) { *pMemoryTypeIndex = memTypeIndex; if(currCost == 0) { return VK_SUCCESS; } minCost = currCost; } } } } return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; } VkResult VmaAllocator_T::CalcMemTypeParams( VmaAllocationCreateInfo& inoutCreateInfo, uint32_t memTypeIndex, VkDeviceSize size, size_t allocationCount) { // If memory type is not HOST_VISIBLE, disable MAPPED. if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; } if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0) { const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); VmaBudget heapBudget = {}; GetHeapBudgets(&heapBudget, heapIndex, 1); if(heapBudget.usage + size * allocationCount > heapBudget.budget) { return VK_ERROR_OUT_OF_DEVICE_MEMORY; } } return VK_SUCCESS; } VkResult VmaAllocator_T::CalcAllocationParams( VmaAllocationCreateInfo& inoutCreateInfo, bool dedicatedRequired, bool dedicatedPreferred) { VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) && "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect."); VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 || (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) && "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST) { if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0) { VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 && "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); } } // If memory is lazily allocated, it should be always dedicated. if(dedicatedRequired || inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) { inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; } if(inoutCreateInfo.pool != VK_NULL_HANDLE) { if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() && (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) { VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations."); return VK_ERROR_FEATURE_NOT_PRESENT; } inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority(); } if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) { VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); return VK_ERROR_FEATURE_NOT_PRESENT; } if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY && (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) { inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; } // Non-auto USAGE values imply HOST_ACCESS flags. // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools. // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*. // Otherwise they just protect from assert on mapping. if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO && inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE && inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST) { if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0) { inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; } } return VK_SUCCESS; } VkResult VmaAllocator_T::AllocateMemory( const VkMemoryRequirements& vkMemReq, bool requiresDedicatedAllocation, bool prefersDedicatedAllocation, VkBuffer dedicatedBuffer, VkImage dedicatedImage, VmaBufferImageUsage dedicatedBufferImageUsage, const VmaAllocationCreateInfo& createInfo, VmaSuballocationType suballocType, size_t allocationCount, VmaAllocation* pAllocations) { memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); if(vkMemReq.size == 0) { return VK_ERROR_INITIALIZATION_FAILED; } VmaAllocationCreateInfo createInfoFinal = createInfo; VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation); if(res != VK_SUCCESS) return res; if(createInfoFinal.pool != VK_NULL_HANDLE) { VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector; return AllocateMemoryOfType( createInfoFinal.pool, vkMemReq.size, vkMemReq.alignment, prefersDedicatedAllocation, dedicatedBuffer, dedicatedImage, dedicatedBufferImageUsage, createInfoFinal, blockVector.GetMemoryTypeIndex(), suballocType, createInfoFinal.pool->m_DedicatedAllocations, blockVector, allocationCount, pAllocations); } else { // Bit mask of memory Vulkan types acceptable for this allocation. uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; uint32_t memTypeIndex = UINT32_MAX; res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. if(res != VK_SUCCESS) return res; do { VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex]; VMA_ASSERT(blockVector && "Trying to use unsupported memory type!"); res = AllocateMemoryOfType( VK_NULL_HANDLE, vkMemReq.size, vkMemReq.alignment, requiresDedicatedAllocation || prefersDedicatedAllocation, dedicatedBuffer, dedicatedImage, dedicatedBufferImageUsage, createInfoFinal, memTypeIndex, suballocType, m_DedicatedAllocations[memTypeIndex], *blockVector, allocationCount, pAllocations); // Allocation succeeded if(res == VK_SUCCESS) return VK_SUCCESS; // Remove old memTypeIndex from list of possibilities. memoryTypeBits &= ~(1u << memTypeIndex); // Find alternative memTypeIndex. res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); } while(res == VK_SUCCESS); // No other matching memory type index could be found. // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. return VK_ERROR_OUT_OF_DEVICE_MEMORY; } } void VmaAllocator_T::FreeMemory( size_t allocationCount, const VmaAllocation* pAllocations) { VMA_ASSERT(pAllocations); for(size_t allocIndex = allocationCount; allocIndex--; ) { VmaAllocation allocation = pAllocations[allocIndex]; if(allocation != VK_NULL_HANDLE) { if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) { FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); } switch(allocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { VmaBlockVector* pBlockVector = VMA_NULL; VmaPool hPool = allocation->GetParentPool(); if(hPool != VK_NULL_HANDLE) { pBlockVector = &hPool->m_BlockVector; } else { const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); pBlockVector = m_pBlockVectors[memTypeIndex]; VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); } pBlockVector->Free(allocation); } break; case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: FreeDedicatedMemory(allocation); break; default: VMA_ASSERT(0); } } } } void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats) { // Initialize. VmaClearDetailedStatistics(pStats->total); for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) VmaClearDetailedStatistics(pStats->memoryType[i]); for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) VmaClearDetailedStatistics(pStats->memoryHeap[i]); // Process default pools. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; if (pBlockVector != VMA_NULL) pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]); } // Process custom pools. { VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) { VmaBlockVector& blockVector = pool->m_BlockVector; const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex(); blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); } } // Process dedicated allocations. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]); } // Sum from memory types to memory heaps. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex; VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]); } // Sum from memory heaps to total. for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex) VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]); VMA_ASSERT(pStats->total.statistics.allocationCount == 0 || pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin); VMA_ASSERT(pStats->total.unusedRangeCount == 0 || pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin); } void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount) { #if VMA_MEMORY_BUDGET if(m_UseExtMemoryBudget) { if(m_Budget.m_OperationsSinceBudgetFetch < 30) { VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex); for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) { const uint32_t heapIndex = firstHeap + i; outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) { outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; } else { outBudgets->usage = 0; } // Have to take MIN with heap size because explicit HeapSizeLimit is included in it. outBudgets->budget = VMA_MIN( m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size); } } else { UpdateVulkanBudget(); // Outside of mutex lock GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion } } else #endif { for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) { const uint32_t heapIndex = firstHeap + i; outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; outBudgets->usage = outBudgets->statistics.blockBytes; outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. } } } void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) { pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); pAllocationInfo->deviceMemory = hAllocation->GetMemory(); pAllocationInfo->offset = hAllocation->GetOffset(); pAllocationInfo->size = hAllocation->GetSize(); pAllocationInfo->pMappedData = hAllocation->GetMappedData(); pAllocationInfo->pUserData = hAllocation->GetUserData(); pAllocationInfo->pName = hAllocation->GetName(); } void VmaAllocator_T::GetAllocationInfo2(VmaAllocation hAllocation, VmaAllocationInfo2* pAllocationInfo) { GetAllocationInfo(hAllocation, &pAllocationInfo->allocationInfo); switch (hAllocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: pAllocationInfo->blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize(); pAllocationInfo->dedicatedMemory = VK_FALSE; break; case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: pAllocationInfo->blockSize = pAllocationInfo->allocationInfo.size; pAllocationInfo->dedicatedMemory = VK_TRUE; break; default: VMA_ASSERT(0); } } VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) { VMA_DEBUG_LOG_FORMAT(" CreatePool: MemoryTypeIndex=%" PRIu32 ", flags=%" PRIu32, pCreateInfo->memoryTypeIndex, pCreateInfo->flags); VmaPoolCreateInfo newCreateInfo = *pCreateInfo; // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash. if(pCreateInfo->pMemoryAllocateNext) { VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0); } if(newCreateInfo.maxBlockCount == 0) { newCreateInfo.maxBlockCount = SIZE_MAX; } if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) { return VK_ERROR_INITIALIZATION_FAILED; } // Memory type index out of range or forbidden. if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) { return VK_ERROR_FEATURE_NOT_PRESENT; } if(newCreateInfo.minAllocationAlignment > 0) { VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment)); } const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); if(res != VK_SUCCESS) { vma_delete(this, *pPool); *pPool = VMA_NULL; return res; } // Add to m_Pools. { VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); (*pPool)->SetId(m_NextPoolId++); m_Pools.PushBack(*pPool); } return VK_SUCCESS; } void VmaAllocator_T::DestroyPool(VmaPool pool) { // Remove from m_Pools. { VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); m_Pools.Remove(pool); } vma_delete(this, pool); } void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats) { VmaClearStatistics(*pPoolStats); pool->m_BlockVector.AddStatistics(*pPoolStats); pool->m_DedicatedAllocations.AddStatistics(*pPoolStats); } void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats) { VmaClearDetailedStatistics(*pPoolStats); pool->m_BlockVector.AddDetailedStatistics(*pPoolStats); pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats); } void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) { m_CurrentFrameIndex.store(frameIndex); #if VMA_MEMORY_BUDGET if(m_UseExtMemoryBudget) { UpdateVulkanBudget(); } #endif // #if VMA_MEMORY_BUDGET } VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) { return hPool->m_BlockVector.CheckCorruption(); } VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) { VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT; // Process default pools. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; if(pBlockVector != VMA_NULL) { VkResult localRes = pBlockVector->CheckCorruption(); switch(localRes) { case VK_ERROR_FEATURE_NOT_PRESENT: break; case VK_SUCCESS: finalRes = VK_SUCCESS; break; default: return localRes; } } } // Process custom pools. { VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) { if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) { VkResult localRes = pool->m_BlockVector.CheckCorruption(); switch(localRes) { case VK_ERROR_FEATURE_NOT_PRESENT: break; case VK_SUCCESS: finalRes = VK_SUCCESS; break; default: return localRes; } } } } return finalRes; } VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory) { AtomicTransactionalIncrement deviceMemoryCountIncrement; const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount); #if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount) { return VK_ERROR_TOO_MANY_OBJECTS; } #endif const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex); // HeapSizeLimit is in effect for this heap. if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0) { const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex]; for(;;) { const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize; if(blockBytesAfterAllocation > heapSize) { return VK_ERROR_OUT_OF_DEVICE_MEMORY; } if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) { break; } } } else { m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize; } ++m_Budget.m_BlockCount[heapIndex]; // VULKAN CALL vkAllocateMemory. VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); if(res == VK_SUCCESS) { #if VMA_MEMORY_BUDGET ++m_Budget.m_OperationsSinceBudgetFetch; #endif // Informative callback. if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) { (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData); } deviceMemoryCountIncrement.Commit(); } else { --m_Budget.m_BlockCount[heapIndex]; m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize; } return res; } void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) { // Informative callback. if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) { (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData); } // VULKAN CALL vkFreeMemory. (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks()); const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType); --m_Budget.m_BlockCount[heapIndex]; m_Budget.m_BlockBytes[heapIndex] -= size; --m_DeviceMemoryCount; } VkResult VmaAllocator_T::BindVulkanBuffer( VkDeviceMemory memory, VkDeviceSize memoryOffset, VkBuffer buffer, const void* pNext) { if(pNext != VMA_NULL) { #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL) { VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR }; bindBufferMemoryInfo.pNext = pNext; bindBufferMemoryInfo.buffer = buffer; bindBufferMemoryInfo.memory = memory; bindBufferMemoryInfo.memoryOffset = memoryOffset; return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); } else #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 { return VK_ERROR_EXTENSION_NOT_PRESENT; } } else { return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset); } } VkResult VmaAllocator_T::BindVulkanImage( VkDeviceMemory memory, VkDeviceSize memoryOffset, VkImage image, const void* pNext) { if(pNext != VMA_NULL) { #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL) { VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR }; bindBufferMemoryInfo.pNext = pNext; bindBufferMemoryInfo.image = image; bindBufferMemoryInfo.memory = memory; bindBufferMemoryInfo.memoryOffset = memoryOffset; return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); } else #endif // #if VMA_BIND_MEMORY2 { return VK_ERROR_EXTENSION_NOT_PRESENT; } } else { return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset); } } VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) { switch(hAllocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); char *pBytes = VMA_NULL; VkResult res = pBlock->Map(this, 1, (void**)&pBytes); if(res == VK_SUCCESS) { *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); hAllocation->BlockAllocMap(); } return res; } case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: return hAllocation->DedicatedAllocMap(this, ppData); default: VMA_ASSERT(0); return VK_ERROR_MEMORY_MAP_FAILED; } } void VmaAllocator_T::Unmap(VmaAllocation hAllocation) { switch(hAllocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); hAllocation->BlockAllocUnmap(); pBlock->Unmap(this, 1); } break; case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: hAllocation->DedicatedAllocUnmap(this); break; default: VMA_ASSERT(0); } } VkResult VmaAllocator_T::BindBufferMemory( VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkBuffer hBuffer, const void* pNext) { VkResult res = VK_ERROR_UNKNOWN_COPY; switch(hAllocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext); break; case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block."); res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext); break; } default: VMA_ASSERT(0); } return res; } VkResult VmaAllocator_T::BindImageMemory( VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkImage hImage, const void* pNext) { VkResult res = VK_ERROR_UNKNOWN_COPY; switch(hAllocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext); break; case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block."); res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext); break; } default: VMA_ASSERT(0); } return res; } VkResult VmaAllocator_T::FlushOrInvalidateAllocation( VmaAllocation hAllocation, VkDeviceSize offset, VkDeviceSize size, VMA_CACHE_OPERATION op) { VkResult res = VK_SUCCESS; VkMappedMemoryRange memRange = {}; if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange)) { switch(op) { case VMA_CACHE_FLUSH: res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); break; case VMA_CACHE_INVALIDATE: res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange); break; default: VMA_ASSERT(0); } } // else: Just ignore this call. return res; } VkResult VmaAllocator_T::FlushOrInvalidateAllocations( uint32_t allocationCount, const VmaAllocation* allocations, const VkDeviceSize* offsets, const VkDeviceSize* sizes, VMA_CACHE_OPERATION op) { typedef VmaStlAllocator RangeAllocator; typedef VmaSmallVector RangeVector; RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks())); for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) { const VmaAllocation alloc = allocations[allocIndex]; const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0; const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE; VkMappedMemoryRange newRange; if(GetFlushOrInvalidateRange(alloc, offset, size, newRange)) { ranges.push_back(newRange); } } VkResult res = VK_SUCCESS; if(!ranges.empty()) { switch(op) { case VMA_CACHE_FLUSH: res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); break; case VMA_CACHE_INVALIDATE: res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); break; default: VMA_ASSERT(0); } } // else: Just ignore this call. return res; } VkResult VmaAllocator_T::CopyMemoryToAllocation( const void* pSrcHostPointer, VmaAllocation dstAllocation, VkDeviceSize dstAllocationLocalOffset, VkDeviceSize size) { void* dstMappedData = VMA_NULL; VkResult res = Map(dstAllocation, &dstMappedData); if(res == VK_SUCCESS) { memcpy((char*)dstMappedData + dstAllocationLocalOffset, pSrcHostPointer, (size_t)size); Unmap(dstAllocation); res = FlushOrInvalidateAllocation(dstAllocation, dstAllocationLocalOffset, size, VMA_CACHE_FLUSH); } return res; } VkResult VmaAllocator_T::CopyAllocationToMemory( VmaAllocation srcAllocation, VkDeviceSize srcAllocationLocalOffset, void* pDstHostPointer, VkDeviceSize size) { void* srcMappedData = VMA_NULL; VkResult res = Map(srcAllocation, &srcMappedData); if(res == VK_SUCCESS) { res = FlushOrInvalidateAllocation(srcAllocation, srcAllocationLocalOffset, size, VMA_CACHE_INVALIDATE); if(res == VK_SUCCESS) { memcpy(pDstHostPointer, (const char*)srcMappedData + srcAllocationLocalOffset, (size_t)size); Unmap(srcAllocation); } } return res; } void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) { VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); VmaPool parentPool = allocation->GetParentPool(); if(parentPool == VK_NULL_HANDLE) { // Default pool m_DedicatedAllocations[memTypeIndex].Unregister(allocation); } else { // Custom pool parentPool->m_DedicatedAllocations.Unregister(allocation); } VkDeviceMemory hMemory = allocation->GetMemory(); /* There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory before vkFreeMemory. if(allocation->GetMappedData() != VMA_NULL) { (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); } */ FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); allocation->Destroy(this); m_AllocationObjectAllocator.Free(allocation); VMA_DEBUG_LOG_FORMAT(" Freed DedicatedMemory MemoryTypeIndex=%" PRIu32, memTypeIndex); } uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const { VkBufferCreateInfo dummyBufCreateInfo; VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo); uint32_t memoryTypeBits = 0; // Create buffer. VkBuffer buf = VK_NULL_HANDLE; VkResult res = (*GetVulkanFunctions().vkCreateBuffer)( m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); if(res == VK_SUCCESS) { // Query for supported memory types. VkMemoryRequirements memReq; (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq); memoryTypeBits = memReq.memoryTypeBits; // Destroy buffer. (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks()); } return memoryTypeBits; } uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const { // Make sure memory information is already fetched. VMA_ASSERT(GetMemoryTypeCount() > 0); uint32_t memoryTypeBits = UINT32_MAX; if(!m_UseAmdDeviceCoherentMemory) { // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) { memoryTypeBits &= ~(1u << memTypeIndex); } } } return memoryTypeBits; } bool VmaAllocator_T::GetFlushOrInvalidateRange( VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size, VkMappedMemoryRange& outRange) const { const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) { const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; const VkDeviceSize allocationSize = allocation->GetSize(); VMA_ASSERT(offset <= allocationSize); outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; outRange.pNext = VMA_NULL; outRange.memory = allocation->GetMemory(); switch(allocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); if(size == VK_WHOLE_SIZE) { outRange.size = allocationSize - outRange.offset; } else { VMA_ASSERT(offset + size <= allocationSize); outRange.size = VMA_MIN( VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize), allocationSize - outRange.offset); } break; case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { // 1. Still within this allocation. outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); if(size == VK_WHOLE_SIZE) { size = allocationSize - offset; } else { VMA_ASSERT(offset + size <= allocationSize); } outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize); // 2. Adjust to whole block. const VkDeviceSize allocationOffset = allocation->GetOffset(); VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0); const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize(); outRange.offset += allocationOffset; outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset); break; } default: VMA_ASSERT(0); } return true; } return false; } #if VMA_MEMORY_BUDGET void VmaAllocator_T::UpdateVulkanBudget() { VMA_ASSERT(m_UseExtMemoryBudget); VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR }; VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT }; VmaPnextChainPushFront(&memProps, &budgetProps); GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps); { VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex); for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) { m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex]; m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex]; m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load(); // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size. if(m_Budget.m_VulkanBudget[heapIndex] == 0) { m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. } else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size) { m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size; } if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0) { m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; } } m_Budget.m_OperationsSinceBudgetFetch = 0; } } #endif // VMA_MEMORY_BUDGET void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) { if(VMA_DEBUG_INITIALIZE_ALLOCATIONS && hAllocation->IsMappingAllowed() && (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) { void* pData = VMA_NULL; VkResult res = Map(hAllocation, &pData); if(res == VK_SUCCESS) { memset(pData, (int)pattern, (size_t)hAllocation->GetSize()); FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH); Unmap(hAllocation); } else { VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation."); } } } uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() { uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load(); if(memoryTypeBits == UINT32_MAX) { memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits(); m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits); } return memoryTypeBits; } #if VMA_STATS_STRING_ENABLED void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) { json.WriteString("DefaultPools"); json.BeginObject(); { for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex]; VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex]; if (pBlockVector != VMA_NULL) { json.BeginString("Type "); json.ContinueString(memTypeIndex); json.EndString(); json.BeginObject(); { json.WriteString("PreferredBlockSize"); json.WriteNumber(pBlockVector->GetPreferredBlockSize()); json.WriteString("Blocks"); pBlockVector->PrintDetailedMap(json); json.WriteString("DedicatedAllocations"); dedicatedAllocList.BuildStatsString(json); } json.EndObject(); } } } json.EndObject(); json.WriteString("CustomPools"); json.BeginObject(); { VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); if (!m_Pools.IsEmpty()) { for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { bool displayType = true; size_t index = 0; for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) { VmaBlockVector& blockVector = pool->m_BlockVector; if (blockVector.GetMemoryTypeIndex() == memTypeIndex) { if (displayType) { json.BeginString("Type "); json.ContinueString(memTypeIndex); json.EndString(); json.BeginArray(); displayType = false; } json.BeginObject(); { json.WriteString("Name"); json.BeginString(); json.ContinueString((uint64_t)index++); if (pool->GetName()) { json.ContinueString(" - "); json.ContinueString(pool->GetName()); } json.EndString(); json.WriteString("PreferredBlockSize"); json.WriteNumber(blockVector.GetPreferredBlockSize()); json.WriteString("Blocks"); blockVector.PrintDetailedMap(json); json.WriteString("DedicatedAllocations"); pool->m_DedicatedAllocations.BuildStatsString(json); } json.EndObject(); } } if (!displayType) json.EndArray(); } } } json.EndObject(); } #endif // VMA_STATS_STRING_ENABLED #endif // _VMA_ALLOCATOR_T_FUNCTIONS #ifndef _VMA_PUBLIC_INTERFACE VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( const VmaAllocatorCreateInfo* pCreateInfo, VmaAllocator* pAllocator) { VMA_ASSERT(pCreateInfo && pAllocator); VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 || (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 4)); VMA_DEBUG_LOG("vmaCreateAllocator"); *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo); VkResult result = (*pAllocator)->Init(pCreateInfo); if(result < 0) { vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator); *pAllocator = VK_NULL_HANDLE; } return result; } VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( VmaAllocator allocator) { if(allocator != VK_NULL_HANDLE) { VMA_DEBUG_LOG("vmaDestroyAllocator"); VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying. vma_delete(&allocationCallbacks, allocator); } } VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo) { VMA_ASSERT(allocator && pAllocatorInfo); pAllocatorInfo->instance = allocator->m_hInstance; pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice(); pAllocatorInfo->device = allocator->m_hDevice; } VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) { VMA_ASSERT(allocator && ppPhysicalDeviceProperties); *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties; } VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties) { VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties); *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps; } VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags* pFlags) { VMA_ASSERT(allocator && pFlags); VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount()); *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags; } VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( VmaAllocator allocator, uint32_t frameIndex) { VMA_ASSERT(allocator); VMA_DEBUG_GLOBAL_MUTEX_LOCK allocator->SetCurrentFrameIndex(frameIndex); } VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( VmaAllocator allocator, VmaTotalStatistics* pStats) { VMA_ASSERT(allocator && pStats); VMA_DEBUG_GLOBAL_MUTEX_LOCK allocator->CalculateStatistics(pStats); } VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( VmaAllocator allocator, VmaBudget* pBudgets) { VMA_ASSERT(allocator && pBudgets); VMA_DEBUG_GLOBAL_MUTEX_LOCK allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount()); } #if VMA_STATS_STRING_ENABLED VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( VmaAllocator allocator, char** ppStatsString, VkBool32 detailedMap) { VMA_ASSERT(allocator && ppStatsString); VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaStringBuilder sb(allocator->GetAllocationCallbacks()); { VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount()); VmaTotalStatistics stats; allocator->CalculateStatistics(&stats); VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); json.BeginObject(); { json.WriteString("General"); json.BeginObject(); { const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties; const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps; json.WriteString("API"); json.WriteString("Vulkan"); json.WriteString("apiVersion"); json.BeginString(); json.ContinueString(VK_VERSION_MAJOR(deviceProperties.apiVersion)); json.ContinueString("."); json.ContinueString(VK_VERSION_MINOR(deviceProperties.apiVersion)); json.ContinueString("."); json.ContinueString(VK_VERSION_PATCH(deviceProperties.apiVersion)); json.EndString(); json.WriteString("GPU"); json.WriteString(deviceProperties.deviceName); json.WriteString("deviceType"); json.WriteNumber(static_cast(deviceProperties.deviceType)); json.WriteString("maxMemoryAllocationCount"); json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount); json.WriteString("bufferImageGranularity"); json.WriteNumber(deviceProperties.limits.bufferImageGranularity); json.WriteString("nonCoherentAtomSize"); json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize); json.WriteString("memoryHeapCount"); json.WriteNumber(memoryProperties.memoryHeapCount); json.WriteString("memoryTypeCount"); json.WriteNumber(memoryProperties.memoryTypeCount); } json.EndObject(); } { json.WriteString("Total"); VmaPrintDetailedStatistics(json, stats.total); } { json.WriteString("MemoryInfo"); json.BeginObject(); { for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) { json.BeginString("Heap "); json.ContinueString(heapIndex); json.EndString(); json.BeginObject(); { const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex]; json.WriteString("Flags"); json.BeginArray(true); { if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) json.WriteString("DEVICE_LOCAL"); #if VMA_VULKAN_VERSION >= 1001000 if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT) json.WriteString("MULTI_INSTANCE"); #endif VkMemoryHeapFlags flags = heapInfo.flags & ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT #if VMA_VULKAN_VERSION >= 1001000 | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT #endif ); if (flags != 0) json.WriteNumber(flags); } json.EndArray(); json.WriteString("Size"); json.WriteNumber(heapInfo.size); json.WriteString("Budget"); json.BeginObject(); { json.WriteString("BudgetBytes"); json.WriteNumber(budgets[heapIndex].budget); json.WriteString("UsageBytes"); json.WriteNumber(budgets[heapIndex].usage); } json.EndObject(); json.WriteString("Stats"); VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]); json.WriteString("MemoryPools"); json.BeginObject(); { for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) { if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) { json.BeginString("Type "); json.ContinueString(typeIndex); json.EndString(); json.BeginObject(); { json.WriteString("Flags"); json.BeginArray(true); { VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) json.WriteString("DEVICE_LOCAL"); if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) json.WriteString("HOST_VISIBLE"); if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) json.WriteString("HOST_COHERENT"); if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) json.WriteString("HOST_CACHED"); if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) json.WriteString("LAZILY_ALLOCATED"); #if VMA_VULKAN_VERSION >= 1001000 if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) json.WriteString("PROTECTED"); #endif #if VK_AMD_device_coherent_memory if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) json.WriteString("DEVICE_COHERENT_AMD"); if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) json.WriteString("DEVICE_UNCACHED_AMD"); #endif flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT #if VMA_VULKAN_VERSION >= 1001000 | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT #endif #if VK_AMD_device_coherent_memory | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY #endif | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT); if (flags != 0) json.WriteNumber(flags); } json.EndArray(); json.WriteString("Stats"); VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]); } json.EndObject(); } } } json.EndObject(); } json.EndObject(); } } json.EndObject(); } if (detailedMap == VK_TRUE) allocator->PrintDetailedMap(json); json.EndObject(); } *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength()); } VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( VmaAllocator allocator, char* pStatsString) { if(pStatsString != VMA_NULL) { VMA_ASSERT(allocator); VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString); } } #endif // VMA_STATS_STRING_ENABLED /* This function is not protected by any mutex because it just reads immutable data. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo* pAllocationCreateInfo, uint32_t* pMemoryTypeIndex) { VMA_ASSERT(allocator != VK_NULL_HANDLE); VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, VmaBufferImageUsage::UNKNOWN, pMemoryTypeIndex); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( VmaAllocator allocator, const VkBufferCreateInfo* pBufferCreateInfo, const VmaAllocationCreateInfo* pAllocationCreateInfo, uint32_t* pMemoryTypeIndex) { VMA_ASSERT(allocator != VK_NULL_HANDLE); VMA_ASSERT(pBufferCreateInfo != VMA_NULL); VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); const VkDevice hDev = allocator->m_hDevice; const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); VkResult res; #if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 if(funcs->vkGetDeviceBufferMemoryRequirements) { // Can query straight from VkBufferCreateInfo :) VkDeviceBufferMemoryRequirementsKHR devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS_KHR}; devBufMemReq.pCreateInfo = pBufferCreateInfo; VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq); res = allocator->FindMemoryTypeIndex( memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), pMemoryTypeIndex); } else #endif // VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 { // Must create a dummy buffer to query :( VkBuffer hBuffer = VK_NULL_HANDLE; res = funcs->vkCreateBuffer( hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); if(res == VK_SUCCESS) { VkMemoryRequirements memReq = {}; funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq); res = allocator->FindMemoryTypeIndex( memReq.memoryTypeBits, pAllocationCreateInfo, VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), pMemoryTypeIndex); funcs->vkDestroyBuffer( hDev, hBuffer, allocator->GetAllocationCallbacks()); } } return res; } VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( VmaAllocator allocator, const VkImageCreateInfo* pImageCreateInfo, const VmaAllocationCreateInfo* pAllocationCreateInfo, uint32_t* pMemoryTypeIndex) { VMA_ASSERT(allocator != VK_NULL_HANDLE); VMA_ASSERT(pImageCreateInfo != VMA_NULL); VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); const VkDevice hDev = allocator->m_hDevice; const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); VkResult res; #if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 if(funcs->vkGetDeviceImageMemoryRequirements) { // Can query straight from VkImageCreateInfo :) VkDeviceImageMemoryRequirementsKHR devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS_KHR}; devImgMemReq.pCreateInfo = pImageCreateInfo; VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 && "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect."); VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq); res = allocator->FindMemoryTypeIndex( memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, VmaBufferImageUsage(*pImageCreateInfo), pMemoryTypeIndex); } else #endif // VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 { // Must create a dummy image to query :( VkImage hImage = VK_NULL_HANDLE; res = funcs->vkCreateImage( hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); if(res == VK_SUCCESS) { VkMemoryRequirements memReq = {}; funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq); res = allocator->FindMemoryTypeIndex( memReq.memoryTypeBits, pAllocationCreateInfo, VmaBufferImageUsage(*pImageCreateInfo), pMemoryTypeIndex); funcs->vkDestroyImage( hDev, hImage, allocator->GetAllocationCallbacks()); } } return res; } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( VmaAllocator allocator, const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) { VMA_ASSERT(allocator && pCreateInfo && pPool); VMA_DEBUG_LOG("vmaCreatePool"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return allocator->CreatePool(pCreateInfo, pPool); } VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( VmaAllocator allocator, VmaPool pool) { VMA_ASSERT(allocator); if(pool == VK_NULL_HANDLE) { return; } VMA_DEBUG_LOG("vmaDestroyPool"); VMA_DEBUG_GLOBAL_MUTEX_LOCK allocator->DestroyPool(pool); } VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( VmaAllocator allocator, VmaPool pool, VmaStatistics* pPoolStats) { VMA_ASSERT(allocator && pool && pPoolStats); VMA_DEBUG_GLOBAL_MUTEX_LOCK allocator->GetPoolStatistics(pool, pPoolStats); } VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( VmaAllocator allocator, VmaPool pool, VmaDetailedStatistics* pPoolStats) { VMA_ASSERT(allocator && pool && pPoolStats); VMA_DEBUG_GLOBAL_MUTEX_LOCK allocator->CalculatePoolStatistics(pool, pPoolStats); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) { VMA_ASSERT(allocator && pool); VMA_DEBUG_GLOBAL_MUTEX_LOCK VMA_DEBUG_LOG("vmaCheckPoolCorruption"); return allocator->CheckPoolCorruption(pool); } VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( VmaAllocator allocator, VmaPool pool, const char** ppName) { VMA_ASSERT(allocator && pool && ppName); VMA_DEBUG_LOG("vmaGetPoolName"); VMA_DEBUG_GLOBAL_MUTEX_LOCK *ppName = pool->GetName(); } VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( VmaAllocator allocator, VmaPool pool, const char* pName) { VMA_ASSERT(allocator && pool); VMA_DEBUG_LOG("vmaSetPoolName"); VMA_DEBUG_GLOBAL_MUTEX_LOCK pool->SetName(pName); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( VmaAllocator allocator, const VkMemoryRequirements* pVkMemoryRequirements, const VmaAllocationCreateInfo* pCreateInfo, VmaAllocation* pAllocation, VmaAllocationInfo* pAllocationInfo) { VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); VMA_DEBUG_LOG("vmaAllocateMemory"); VMA_DEBUG_GLOBAL_MUTEX_LOCK VkResult result = allocator->AllocateMemory( *pVkMemoryRequirements, false, // requiresDedicatedAllocation false, // prefersDedicatedAllocation VK_NULL_HANDLE, // dedicatedBuffer VK_NULL_HANDLE, // dedicatedImage VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage *pCreateInfo, VMA_SUBALLOCATION_TYPE_UNKNOWN, 1, // allocationCount pAllocation); if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } return result; } VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( VmaAllocator allocator, const VkMemoryRequirements* pVkMemoryRequirements, const VmaAllocationCreateInfo* pCreateInfo, size_t allocationCount, VmaAllocation* pAllocations, VmaAllocationInfo* pAllocationInfo) { if(allocationCount == 0) { return VK_SUCCESS; } VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations); VMA_DEBUG_LOG("vmaAllocateMemoryPages"); VMA_DEBUG_GLOBAL_MUTEX_LOCK VkResult result = allocator->AllocateMemory( *pVkMemoryRequirements, false, // requiresDedicatedAllocation false, // prefersDedicatedAllocation VK_NULL_HANDLE, // dedicatedBuffer VK_NULL_HANDLE, // dedicatedImage VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage *pCreateInfo, VMA_SUBALLOCATION_TYPE_UNKNOWN, allocationCount, pAllocations); if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) { for(size_t i = 0; i < allocationCount; ++i) { allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); } } return result; } VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo* pCreateInfo, VmaAllocation* pAllocation, VmaAllocationInfo* pAllocationInfo) { VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation); VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer"); VMA_DEBUG_GLOBAL_MUTEX_LOCK VkMemoryRequirements vkMemReq = {}; bool requiresDedicatedAllocation = false; bool prefersDedicatedAllocation = false; allocator->GetBufferMemoryRequirements(buffer, vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation); VkResult result = allocator->AllocateMemory( vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation, buffer, // dedicatedBuffer VK_NULL_HANDLE, // dedicatedImage VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage *pCreateInfo, VMA_SUBALLOCATION_TYPE_BUFFER, 1, // allocationCount pAllocation); if(pAllocationInfo && result == VK_SUCCESS) { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } return result; } VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo* pCreateInfo, VmaAllocation* pAllocation, VmaAllocationInfo* pAllocationInfo) { VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation); VMA_DEBUG_LOG("vmaAllocateMemoryForImage"); VMA_DEBUG_GLOBAL_MUTEX_LOCK VkMemoryRequirements vkMemReq = {}; bool requiresDedicatedAllocation = false; bool prefersDedicatedAllocation = false; allocator->GetImageMemoryRequirements(image, vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation); VkResult result = allocator->AllocateMemory( vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation, VK_NULL_HANDLE, // dedicatedBuffer image, // dedicatedImage VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage *pCreateInfo, VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, 1, // allocationCount pAllocation); if(pAllocationInfo && result == VK_SUCCESS) { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } return result; } VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( VmaAllocator allocator, VmaAllocation allocation) { VMA_ASSERT(allocator); if(allocation == VK_NULL_HANDLE) { return; } VMA_DEBUG_LOG("vmaFreeMemory"); VMA_DEBUG_GLOBAL_MUTEX_LOCK allocator->FreeMemory( 1, // allocationCount &allocation); } VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( VmaAllocator allocator, size_t allocationCount, const VmaAllocation* pAllocations) { if(allocationCount == 0) { return; } VMA_ASSERT(allocator); VMA_DEBUG_LOG("vmaFreeMemoryPages"); VMA_DEBUG_GLOBAL_MUTEX_LOCK allocator->FreeMemory(allocationCount, pAllocations); } VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo* pAllocationInfo) { VMA_ASSERT(allocator && allocation && pAllocationInfo); VMA_DEBUG_GLOBAL_MUTEX_LOCK allocator->GetAllocationInfo(allocation, pAllocationInfo); } VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo2( VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo2* pAllocationInfo) { VMA_ASSERT(allocator && allocation && pAllocationInfo); VMA_DEBUG_GLOBAL_MUTEX_LOCK allocator->GetAllocationInfo2(allocation, pAllocationInfo); } VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( VmaAllocator allocator, VmaAllocation allocation, void* pUserData) { VMA_ASSERT(allocator && allocation); VMA_DEBUG_GLOBAL_MUTEX_LOCK allocation->SetUserData(allocator, pUserData); } VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, const char* VMA_NULLABLE pName) { allocation->SetName(allocator, pName); } VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkMemoryPropertyFlags* VMA_NOT_NULL pFlags) { VMA_ASSERT(allocator && allocation && pFlags); const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; } VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( VmaAllocator allocator, VmaAllocation allocation, void** ppData) { VMA_ASSERT(allocator && allocation && ppData); VMA_DEBUG_GLOBAL_MUTEX_LOCK return allocator->Map(allocation, ppData); } VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( VmaAllocator allocator, VmaAllocation allocation) { VMA_ASSERT(allocator && allocation); VMA_DEBUG_GLOBAL_MUTEX_LOCK allocator->Unmap(allocation); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) { VMA_ASSERT(allocator && allocation); VMA_DEBUG_LOG("vmaFlushAllocation"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) { VMA_ASSERT(allocator && allocation); VMA_DEBUG_LOG("vmaInvalidateAllocation"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation* allocations, const VkDeviceSize* offsets, const VkDeviceSize* sizes) { VMA_ASSERT(allocator); if(allocationCount == 0) { return VK_SUCCESS; } VMA_ASSERT(allocations); VMA_DEBUG_LOG("vmaFlushAllocations"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation* allocations, const VkDeviceSize* offsets, const VkDeviceSize* sizes) { VMA_ASSERT(allocator); if(allocationCount == 0) { return VK_SUCCESS; } VMA_ASSERT(allocations); VMA_DEBUG_LOG("vmaInvalidateAllocations"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyMemoryToAllocation( VmaAllocator allocator, const void* pSrcHostPointer, VmaAllocation dstAllocation, VkDeviceSize dstAllocationLocalOffset, VkDeviceSize size) { VMA_ASSERT(allocator && pSrcHostPointer && dstAllocation); if(size == 0) { return VK_SUCCESS; } VMA_DEBUG_LOG("vmaCopyMemoryToAllocation"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return allocator->CopyMemoryToAllocation(pSrcHostPointer, dstAllocation, dstAllocationLocalOffset, size); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyAllocationToMemory( VmaAllocator allocator, VmaAllocation srcAllocation, VkDeviceSize srcAllocationLocalOffset, void* pDstHostPointer, VkDeviceSize size) { VMA_ASSERT(allocator && srcAllocation && pDstHostPointer); if(size == 0) { return VK_SUCCESS; } VMA_DEBUG_LOG("vmaCopyAllocationToMemory"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return allocator->CopyAllocationToMemory(srcAllocation, srcAllocationLocalOffset, pDstHostPointer, size); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( VmaAllocator allocator, uint32_t memoryTypeBits) { VMA_ASSERT(allocator); VMA_DEBUG_LOG("vmaCheckCorruption"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return allocator->CheckCorruption(memoryTypeBits); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( VmaAllocator allocator, const VmaDefragmentationInfo* pInfo, VmaDefragmentationContext* pContext) { VMA_ASSERT(allocator && pInfo && pContext); VMA_DEBUG_LOG("vmaBeginDefragmentation"); if (pInfo->pool != VMA_NULL) { // Check if run on supported algorithms if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) return VK_ERROR_FEATURE_NOT_PRESENT; } VMA_DEBUG_GLOBAL_MUTEX_LOCK *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo); return VK_SUCCESS; } VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationStats* pStats) { VMA_ASSERT(allocator && context); VMA_DEBUG_LOG("vmaEndDefragmentation"); VMA_DEBUG_GLOBAL_MUTEX_LOCK if (pStats) context->GetStats(*pStats); vma_delete(allocator, context); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( VmaAllocator VMA_NOT_NULL allocator, VmaDefragmentationContext VMA_NOT_NULL context, VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) { VMA_ASSERT(context && pPassInfo); VMA_DEBUG_LOG("vmaBeginDefragmentationPass"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return context->DefragmentPassBegin(*pPassInfo); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( VmaAllocator VMA_NOT_NULL allocator, VmaDefragmentationContext VMA_NOT_NULL context, VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) { VMA_ASSERT(context && pPassInfo); VMA_DEBUG_LOG("vmaEndDefragmentationPass"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return context->DefragmentPassEnd(*pPassInfo); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer) { VMA_ASSERT(allocator && allocation && buffer); VMA_DEBUG_LOG("vmaBindBufferMemory"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void* pNext) { VMA_ASSERT(allocator && allocation && buffer); VMA_DEBUG_LOG("vmaBindBufferMemory2"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( VmaAllocator allocator, VmaAllocation allocation, VkImage image) { VMA_ASSERT(allocator && allocation && image); VMA_DEBUG_LOG("vmaBindImageMemory"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return allocator->BindImageMemory(allocation, 0, image, VMA_NULL); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void* pNext) { VMA_ASSERT(allocator && allocation && image); VMA_DEBUG_LOG("vmaBindImageMemory2"); VMA_DEBUG_GLOBAL_MUTEX_LOCK return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( VmaAllocator allocator, const VkBufferCreateInfo* pBufferCreateInfo, const VmaAllocationCreateInfo* pAllocationCreateInfo, VkBuffer* pBuffer, VmaAllocation* pAllocation, VmaAllocationInfo* pAllocationInfo) { VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); if(pBufferCreateInfo->size == 0) { return VK_ERROR_INITIALIZATION_FAILED; } if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && !allocator->m_UseKhrBufferDeviceAddress) { VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); return VK_ERROR_INITIALIZATION_FAILED; } VMA_DEBUG_LOG("vmaCreateBuffer"); VMA_DEBUG_GLOBAL_MUTEX_LOCK *pBuffer = VK_NULL_HANDLE; *pAllocation = VK_NULL_HANDLE; // 1. Create VkBuffer. VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( allocator->m_hDevice, pBufferCreateInfo, allocator->GetAllocationCallbacks(), pBuffer); if(res >= 0) { // 2. vkGetBufferMemoryRequirements. VkMemoryRequirements vkMemReq = {}; bool requiresDedicatedAllocation = false; bool prefersDedicatedAllocation = false; allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation); // 3. Allocate memory using allocator. res = allocator->AllocateMemory( vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation, *pBuffer, // dedicatedBuffer VK_NULL_HANDLE, // dedicatedImage VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), // dedicatedBufferImageUsage *pAllocationCreateInfo, VMA_SUBALLOCATION_TYPE_BUFFER, 1, // allocationCount pAllocation); if(res >= 0) { // 3. Bind buffer with memory. if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) { res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); } if(res >= 0) { // All steps succeeded. #if VMA_STATS_STRING_ENABLED (*pAllocation)->InitBufferUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5); #endif if(pAllocationInfo != VMA_NULL) { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } return VK_SUCCESS; } allocator->FreeMemory( 1, // allocationCount pAllocation); *pAllocation = VK_NULL_HANDLE; (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); *pBuffer = VK_NULL_HANDLE; return res; } (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); *pBuffer = VK_NULL_HANDLE; return res; } return res; } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( VmaAllocator allocator, const VkBufferCreateInfo* pBufferCreateInfo, const VmaAllocationCreateInfo* pAllocationCreateInfo, VkDeviceSize minAlignment, VkBuffer* pBuffer, VmaAllocation* pAllocation, VmaAllocationInfo* pAllocationInfo) { VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation); if(pBufferCreateInfo->size == 0) { return VK_ERROR_INITIALIZATION_FAILED; } if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && !allocator->m_UseKhrBufferDeviceAddress) { VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); return VK_ERROR_INITIALIZATION_FAILED; } VMA_DEBUG_LOG("vmaCreateBufferWithAlignment"); VMA_DEBUG_GLOBAL_MUTEX_LOCK *pBuffer = VK_NULL_HANDLE; *pAllocation = VK_NULL_HANDLE; // 1. Create VkBuffer. VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( allocator->m_hDevice, pBufferCreateInfo, allocator->GetAllocationCallbacks(), pBuffer); if(res >= 0) { // 2. vkGetBufferMemoryRequirements. VkMemoryRequirements vkMemReq = {}; bool requiresDedicatedAllocation = false; bool prefersDedicatedAllocation = false; allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation); // 2a. Include minAlignment vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment); // 3. Allocate memory using allocator. res = allocator->AllocateMemory( vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation, *pBuffer, // dedicatedBuffer VK_NULL_HANDLE, // dedicatedImage VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), // dedicatedBufferImageUsage *pAllocationCreateInfo, VMA_SUBALLOCATION_TYPE_BUFFER, 1, // allocationCount pAllocation); if(res >= 0) { // 3. Bind buffer with memory. if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) { res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); } if(res >= 0) { // All steps succeeded. #if VMA_STATS_STRING_ENABLED (*pAllocation)->InitBufferUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5); #endif if(pAllocationInfo != VMA_NULL) { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } return VK_SUCCESS; } allocator->FreeMemory( 1, // allocationCount pAllocation); *pAllocation = VK_NULL_HANDLE; (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); *pBuffer = VK_NULL_HANDLE; return res; } (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); *pBuffer = VK_NULL_HANDLE; return res; } return res; } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) { return vmaCreateAliasingBuffer2(allocator, allocation, 0, pBufferCreateInfo, pBuffer); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize allocationLocalOffset, const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) { VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation); VMA_ASSERT(allocationLocalOffset + pBufferCreateInfo->size <= allocation->GetSize()); VMA_DEBUG_LOG("vmaCreateAliasingBuffer2"); *pBuffer = VK_NULL_HANDLE; if (pBufferCreateInfo->size == 0) { return VK_ERROR_INITIALIZATION_FAILED; } if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && !allocator->m_UseKhrBufferDeviceAddress) { VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); return VK_ERROR_INITIALIZATION_FAILED; } VMA_DEBUG_GLOBAL_MUTEX_LOCK // 1. Create VkBuffer. VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( allocator->m_hDevice, pBufferCreateInfo, allocator->GetAllocationCallbacks(), pBuffer); if (res >= 0) { // 2. Bind buffer with memory. res = allocator->BindBufferMemory(allocation, allocationLocalOffset, *pBuffer, VMA_NULL); if (res >= 0) { return VK_SUCCESS; } (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); } return res; } VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation) { VMA_ASSERT(allocator); if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) { return; } VMA_DEBUG_LOG("vmaDestroyBuffer"); VMA_DEBUG_GLOBAL_MUTEX_LOCK if(buffer != VK_NULL_HANDLE) { (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks()); } if(allocation != VK_NULL_HANDLE) { allocator->FreeMemory( 1, // allocationCount &allocation); } } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( VmaAllocator allocator, const VkImageCreateInfo* pImageCreateInfo, const VmaAllocationCreateInfo* pAllocationCreateInfo, VkImage* pImage, VmaAllocation* pAllocation, VmaAllocationInfo* pAllocationInfo) { VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); if(pImageCreateInfo->extent.width == 0 || pImageCreateInfo->extent.height == 0 || pImageCreateInfo->extent.depth == 0 || pImageCreateInfo->mipLevels == 0 || pImageCreateInfo->arrayLayers == 0) { return VK_ERROR_INITIALIZATION_FAILED; } VMA_DEBUG_LOG("vmaCreateImage"); VMA_DEBUG_GLOBAL_MUTEX_LOCK *pImage = VK_NULL_HANDLE; *pAllocation = VK_NULL_HANDLE; // 1. Create VkImage. VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( allocator->m_hDevice, pImageCreateInfo, allocator->GetAllocationCallbacks(), pImage); if(res == VK_SUCCESS) { VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; // 2. Allocate memory using allocator. VkMemoryRequirements vkMemReq = {}; bool requiresDedicatedAllocation = false; bool prefersDedicatedAllocation = false; allocator->GetImageMemoryRequirements(*pImage, vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation); res = allocator->AllocateMemory( vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation, VK_NULL_HANDLE, // dedicatedBuffer *pImage, // dedicatedImage VmaBufferImageUsage(*pImageCreateInfo), // dedicatedBufferImageUsage *pAllocationCreateInfo, suballocType, 1, // allocationCount pAllocation); if(res == VK_SUCCESS) { // 3. Bind image with memory. if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) { res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL); } if(res == VK_SUCCESS) { // All steps succeeded. #if VMA_STATS_STRING_ENABLED (*pAllocation)->InitImageUsage(*pImageCreateInfo); #endif if(pAllocationInfo != VMA_NULL) { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } return VK_SUCCESS; } allocator->FreeMemory( 1, // allocationCount pAllocation); *pAllocation = VK_NULL_HANDLE; (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); *pImage = VK_NULL_HANDLE; return res; } (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); *pImage = VK_NULL_HANDLE; return res; } return res; } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) { return vmaCreateAliasingImage2(allocator, allocation, 0, pImageCreateInfo, pImage); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize allocationLocalOffset, const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) { VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation); *pImage = VK_NULL_HANDLE; VMA_DEBUG_LOG("vmaCreateImage2"); if (pImageCreateInfo->extent.width == 0 || pImageCreateInfo->extent.height == 0 || pImageCreateInfo->extent.depth == 0 || pImageCreateInfo->mipLevels == 0 || pImageCreateInfo->arrayLayers == 0) { return VK_ERROR_INITIALIZATION_FAILED; } VMA_DEBUG_GLOBAL_MUTEX_LOCK // 1. Create VkImage. VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( allocator->m_hDevice, pImageCreateInfo, allocator->GetAllocationCallbacks(), pImage); if (res >= 0) { // 2. Bind image with memory. res = allocator->BindImageMemory(allocation, allocationLocalOffset, *pImage, VMA_NULL); if (res >= 0) { return VK_SUCCESS; } (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); } return res; } VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( VmaAllocator VMA_NOT_NULL allocator, VkImage VMA_NULLABLE_NON_DISPATCHABLE image, VmaAllocation VMA_NULLABLE allocation) { VMA_ASSERT(allocator); if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) { return; } VMA_DEBUG_LOG("vmaDestroyImage"); VMA_DEBUG_GLOBAL_MUTEX_LOCK if(image != VK_NULL_HANDLE) { (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks()); } if(allocation != VK_NULL_HANDLE) { allocator->FreeMemory( 1, // allocationCount &allocation); } } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock) { VMA_ASSERT(pCreateInfo && pVirtualBlock); VMA_ASSERT(pCreateInfo->size > 0); VMA_DEBUG_LOG("vmaCreateVirtualBlock"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo); VkResult res = (*pVirtualBlock)->Init(); if(res < 0) { vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock); *pVirtualBlock = VK_NULL_HANDLE; } return res; } VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock) { if(virtualBlock != VK_NULL_HANDLE) { VMA_DEBUG_LOG("vmaDestroyVirtualBlock"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying. vma_delete(&allocationCallbacks, virtualBlock); } } VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE; } VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL); VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock, const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, VkDeviceSize* VMA_NULLABLE pOffset) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL); VMA_DEBUG_LOG("vmaVirtualAllocate"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset); } VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation) { if(allocation != VK_NULL_HANDLE) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); VMA_DEBUG_LOG("vmaVirtualFree"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; virtualBlock->Free(allocation); } } VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); VMA_DEBUG_LOG("vmaClearVirtualBlock"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; virtualBlock->Clear(); } VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; virtualBlock->SetAllocationUserData(allocation, pUserData); } VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaStatistics* VMA_NOT_NULL pStats) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; virtualBlock->GetStatistics(*pStats); } VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaDetailedStatistics* VMA_NOT_NULL pStats) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; virtualBlock->CalculateDetailedStatistics(*pStats); } #if VMA_STATS_STRING_ENABLED VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL); VMA_DEBUG_GLOBAL_MUTEX_LOCK; const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks(); VmaStringBuilder sb(allocationCallbacks); virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb); *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength()); } VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, char* VMA_NULLABLE pStatsString) { if(pStatsString != VMA_NULL) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); VMA_DEBUG_GLOBAL_MUTEX_LOCK; VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString); } } #if VMA_EXTERNAL_MEMORY_WIN32 VMA_CALL_PRE VkResult VMA_CALL_POST vmaGetMemoryWin32Handle(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, HANDLE hTargetProcess, HANDLE* VMA_NOT_NULL pHandle) { VMA_ASSERT(allocator && allocation && pHandle); VMA_DEBUG_GLOBAL_MUTEX_LOCK; return allocation->GetWin32Handle(allocator, hTargetProcess, pHandle); } #endif // VMA_EXTERNAL_MEMORY_WIN32 #endif // VMA_STATS_STRING_ENABLED #endif // _VMA_PUBLIC_INTERFACE #endif // VMA_IMPLEMENTATION /** \page quick_start Quick start \section quick_start_project_setup Project setup Vulkan Memory Allocator comes in form of a "stb-style" single header file. While you can pull the entire repository e.g. as Git module, there is also Cmake script provided, you don't need to build it as a separate library project. You can add file "vk_mem_alloc.h" directly to your project and submit it to code repository next to your other source files. "Single header" doesn't mean that everything is contained in C/C++ declarations, like it tends to be in case of inline functions or C++ templates. It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro. If you don't do it properly, it will result in linker errors. To do it properly: -# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library. This includes declarations of all members of the library. -# In exactly one CPP file define following macro before this include. It enables also internal definitions. \code #define VMA_IMPLEMENTATION #include "vk_mem_alloc.h" \endcode It may be a good idea to create dedicated CPP file just for this purpose, e.g. "VmaUsage.cpp". This library includes header ``, which in turn includes `` on Windows. If you need some specific macros defined before including these headers (like `WIN32_LEAN_AND_MEAN` or `WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define them before every `#include` of this library. It may be a good idea to create a dedicate header file for this purpose, e.g. "VmaUsage.h", that will be included in other source files instead of VMA header directly. This library is written in C++, but has C-compatible interface. Thus, you can include and use "vk_mem_alloc.h" in C or C++ code, but full implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C. Some features of C++14 are used and required. Features of C++20 are used optionally when available. Some headers of standard C and C++ library are used, but STL containers, RTTI, or C++ exceptions are not used. \section quick_start_initialization Initialization VMA offers library interface in a style similar to Vulkan, with object handles like #VmaAllocation, structures describing parameters of objects to be created like #VmaAllocationCreateInfo, and errors codes returned from functions using `VkResult` type. The first and the main object that needs to be created is #VmaAllocator. It represents the initialization of the entire library. Only one such object should be created per `VkDevice`. You should create it at program startup, after `VkDevice` was created, and before any device memory allocator needs to be made. It must be destroyed before `VkDevice` is destroyed. At program startup: -# Initialize Vulkan to have `VkInstance`, `VkPhysicalDevice`, `VkDevice` object. -# Fill VmaAllocatorCreateInfo structure and call vmaCreateAllocator() to create #VmaAllocator object. Only members `physicalDevice`, `device`, `instance` are required. However, you should inform the library which Vulkan version do you use by setting VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable by setting VmaAllocatorCreateInfo::flags. Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions. See below for details. \subsection quick_start_initialization_selecting_vulkan_version Selecting Vulkan version VMA supports Vulkan version down to 1.0, for backward compatibility. If you want to use higher version, you need to inform the library about it. This is a two-step process. Step 1: Compile time. By default, VMA compiles with code supporting the highest Vulkan version found in the included `` that is also supported by the library. If this is OK, you don't need to do anything. However, if you want to compile VMA as if only some lower Vulkan version was available, define macro `VMA_VULKAN_VERSION` before every `#include "vk_mem_alloc.h"`. It should have decimal numeric value in form of ABBBCCC, where A = major, BBB = minor, CCC = patch Vulkan version. For example, to compile against Vulkan 1.2: \code #define VMA_VULKAN_VERSION 1002000 // Vulkan 1.2 #include "vk_mem_alloc.h" \endcode Step 2: Runtime. Even when compiled with higher Vulkan version available, VMA can use only features of a lower version, which is configurable during creation of the #VmaAllocator object. By default, only Vulkan 1.0 is used. To initialize the allocator with support for higher Vulkan version, you need to set member VmaAllocatorCreateInfo::vulkanApiVersion to an appropriate value, e.g. using constants like `VK_API_VERSION_1_2`. See code sample below. \subsection quick_start_initialization_importing_vulkan_functions Importing Vulkan functions You may need to configure importing Vulkan functions. There are 3 ways to do this: -# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows): - You don't need to do anything. - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default. -# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`, `vkGetDeviceProcAddr` (this is the option presented in the example below): - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1. - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr, VmaVulkanFunctions::vkGetDeviceProcAddr. - The library will fetch pointers to all other functions it needs internally. -# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like [Volk](https://github.com/zeux/volk): - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0. - Pass these pointers via structure #VmaVulkanFunctions. \subsection quick_start_initialization_enabling_extensions Enabling extensions VMA can automatically use following Vulkan extensions. If you found them available on the selected physical device and you enabled them while creating `VkInstance` / `VkDevice` object, inform VMA about their availability by setting appropriate flags in VmaAllocatorCreateInfo::flags. Vulkan extension | VMA flag ------------------------------|----------------------------------------------------- VK_KHR_dedicated_allocation | #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT VK_KHR_bind_memory2 | #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT VK_KHR_maintenance4 | #VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT VK_KHR_maintenance5 | #VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT VK_EXT_memory_budget | #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT VK_KHR_buffer_device_address | #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT VK_EXT_memory_priority | #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT VK_AMD_device_coherent_memory | #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT VK_KHR_external_memory_win32 | #VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT Example with fetching pointers to Vulkan functions dynamically: \code #define VMA_STATIC_VULKAN_FUNCTIONS 0 #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 #include "vk_mem_alloc.h" ... VmaVulkanFunctions vulkanFunctions = {}; vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr; vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr; VmaAllocatorCreateInfo allocatorCreateInfo = {}; allocatorCreateInfo.flags = VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT; allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2; allocatorCreateInfo.physicalDevice = physicalDevice; allocatorCreateInfo.device = device; allocatorCreateInfo.instance = instance; allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions; VmaAllocator allocator; vmaCreateAllocator(&allocatorCreateInfo, &allocator); // Entire program... // At the end, don't forget to: vmaDestroyAllocator(allocator); \endcode \subsection quick_start_initialization_other_config Other configuration options There are additional configuration options available through preprocessor macros that you can define before including VMA header and through parameters passed in #VmaAllocatorCreateInfo. They include a possibility to use your own callbacks for host memory allocations (`VkAllocationCallbacks`), callbacks for device memory allocations (instead of `vkAllocateMemory`, `vkFreeMemory`), or your custom `VMA_ASSERT` macro, among others. For more information, see: @ref configuration. \section quick_start_resource_allocation Resource allocation When you want to create a buffer or image: -# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure. -# Fill VmaAllocationCreateInfo structure. -# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory. \code VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; bufferInfo.size = 65536; bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VmaAllocationCreateInfo allocInfo = {}; allocInfo.usage = VMA_MEMORY_USAGE_AUTO; VkBuffer buffer; VmaAllocation allocation; vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); \endcode Don't forget to destroy your buffer and allocation objects when no longer needed: \code vmaDestroyBuffer(allocator, buffer, allocation); \endcode If you need to map the buffer, you must set flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT in VmaAllocationCreateInfo::flags. There are many additional parameters that can control the choice of memory type to be used for the allocation and other features. For more information, see documentation chapters: @ref choosing_memory_type, @ref memory_mapping. \page choosing_memory_type Choosing memory type Physical devices in Vulkan support various combinations of memory heaps and types. Help with choosing correct and optimal memory type for your specific resource is one of the key features of this library. You can use it by filling appropriate members of VmaAllocationCreateInfo structure, as described below. You can also combine multiple methods. -# If you just want to find memory type index that meets your requirements, you can use function: vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex(). -# If you want to allocate a region of device memory without association with any specific image or buffer, you can use function vmaAllocateMemory(). Usage of this function is not recommended and usually not needed. vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, which may be useful for sparse binding. -# If you already have a buffer or an image created, you want to allocate memory for it and then you will bind it yourself, you can use function vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2(). -# If you want to create a buffer or an image, allocate memory for it, and bind them together, all in one call, you can use function vmaCreateBuffer(), vmaCreateImage(). This is the easiest and recommended way to use this library! When using 3. or 4., the library internally queries Vulkan for memory types supported for that buffer or image (function `vkGetBufferMemoryRequirements()`) and uses only one of these types. If no memory type can be found that meets all the requirements, these functions return `VK_ERROR_FEATURE_NOT_PRESENT`. You can leave VmaAllocationCreateInfo structure completely filled with zeros. It means no requirements are specified for memory type. It is valid, although not very useful. \section choosing_memory_type_usage Usage The easiest way to specify memory requirements is to fill member VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage. It defines high level, common usage types. Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically. For example, if you want to create a uniform buffer that will be filled using transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can do it using following code. The buffer will most likely end up in a memory type with `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device. \code VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; bufferInfo.size = 65536; bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VmaAllocationCreateInfo allocInfo = {}; allocInfo.usage = VMA_MEMORY_USAGE_AUTO; VkBuffer buffer; VmaAllocation allocation; vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); \endcode If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory on systems with discrete graphics card that have the memories separate, you can use #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST. When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory, you also need to specify one of the host access flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` so you can map it. For example, a staging buffer that will be filled via mapped pointer and then used as a source of transfer to the buffer described previously can be created like this. It will likely end up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT` but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM). \code VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; stagingBufferInfo.size = 65536; stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; VmaAllocationCreateInfo stagingAllocInfo = {}; stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO; stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; VkBuffer stagingBuffer; VmaAllocation stagingAllocation; vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr); \endcode For more examples of creating different kinds of resources, see chapter \ref usage_patterns. See also: @ref memory_mapping. Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed, so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc. If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting memory type, as described below. \note Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`, `VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`) are still available and work same way as in previous versions of the library for backward compatibility, but they are deprecated. \section choosing_memory_type_required_preferred_flags Required and preferred flags You can specify more detailed requirements by filling members VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags with a combination of bits from enum `VkMemoryPropertyFlags`. For example, if you want to create a buffer that will be persistently mapped on host (so it must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`, use following code: \code VmaAllocationCreateInfo allocInfo = {}; allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; VkBuffer buffer; VmaAllocation allocation; vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); \endcode A memory type is chosen that has all the required flags and as many preferred flags set as possible. Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags, plus some extra "magic" (heuristics). \section choosing_memory_type_explicit_memory_types Explicit memory types If you inspected memory types available on the physical device and you have a preference for memory types that you want to use, you can fill member VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set means that a memory type with that index is allowed to be used for the allocation. Special value 0, just like `UINT32_MAX`, means there are no restrictions to memory type index. Please note that this member is NOT just a memory type index. Still you can use it to choose just one, specific memory type. For example, if you already determined that your buffer should be created in memory type 2, use following code: \code uint32_t memoryTypeIndex = 2; VmaAllocationCreateInfo allocInfo = {}; allocInfo.memoryTypeBits = 1u << memoryTypeIndex; VkBuffer buffer; VmaAllocation allocation; vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); \endcode You can also use this parameter to exclude some memory types. If you inspect memory heaps and types available on the current physical device and you determine that for some reason you don't want to use a specific memory type for the allocation, you can enable automatic memory type selection but exclude certain memory type or types by setting all bits of `memoryTypeBits` to 1 except the ones you choose. \code // ... uint32_t excludedMemoryTypeIndex = 2; VmaAllocationCreateInfo allocInfo = {}; allocInfo.usage = VMA_MEMORY_USAGE_AUTO; allocInfo.memoryTypeBits = ~(1u << excludedMemoryTypeIndex); // ... \endcode \section choosing_memory_type_custom_memory_pools Custom memory pools If you allocate from custom memory pool, all the ways of specifying memory requirements described above are not applicable and the aforementioned members of VmaAllocationCreateInfo structure are ignored. Memory type is selected explicitly when creating the pool and then used to make all the allocations from that pool. For further details, see \ref custom_memory_pools. \section choosing_memory_type_dedicated_allocations Dedicated allocations Memory for allocations is reserved out of larger block of `VkDeviceMemory` allocated from Vulkan internally. That is the main feature of this whole library. You can still request a separate memory block to be created for an allocation, just like you would do in a trivial solution without using any allocator. In that case, a buffer or image is always bound to that memory at offset 0. This is called a "dedicated allocation". You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. The library can also internally decide to use dedicated allocation in some cases, e.g.: - When the size of the allocation is large. - When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled and it reports that dedicated allocation is required or recommended for the resource. - When allocation of next big memory block fails due to not enough device memory, but allocation with the exact requested size succeeds. \page memory_mapping Memory mapping To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`, to be able to read from it or write to it in CPU code. Mapping is possible only of memory allocated from a memory type that has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose. You can use them directly with memory allocated by this library, but it is not recommended because of following issue: Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed. This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan. It is also not thread-safe. Because of this, Vulkan Memory Allocator provides following facilities: \note If you want to be able to map an allocation, you need to specify one of the flags #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values. For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable, but these flags can still be used for consistency. \section memory_mapping_copy_functions Copy functions The easiest way to copy data from a host pointer to an allocation is to use convenience function vmaCopyMemoryToAllocation(). It automatically maps the Vulkan memory temporarily (if not already mapped), performs `memcpy`, and calls `vkFlushMappedMemoryRanges` (if required - if memory type is not `HOST_COHERENT`). It is also the safest one, because using `memcpy` avoids a risk of accidentally introducing memory reads (e.g. by doing `pMappedVectors[i] += v`), which may be very slow on memory types that are not `HOST_CACHED`. \code struct ConstantBuffer { ... }; ConstantBuffer constantBufferData = ... VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; bufCreateInfo.size = sizeof(ConstantBuffer); bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; VkBuffer buf; VmaAllocation alloc; vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); vmaCopyMemoryToAllocation(allocator, &constantBufferData, alloc, 0, sizeof(ConstantBuffer)); \endcode Copy in the other direction - from an allocation to a host pointer can be performed the same way using function vmaCopyAllocationToMemory(). \section memory_mapping_mapping_functions Mapping functions The library provides following functions for mapping of a specific allocation: vmaMapMemory(), vmaUnmapMemory(). They are safer and more convenient to use than standard Vulkan functions. You can map an allocation multiple times simultaneously - mapping is reference-counted internally. You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block. The way it is implemented is that the library always maps entire memory block, not just region of the allocation. For further details, see description of vmaMapMemory() function. Example: \code // Having these objects initialized: struct ConstantBuffer { ... }; ConstantBuffer constantBufferData = ... VmaAllocator allocator = ... VkBuffer constantBuffer = ... VmaAllocation constantBufferAllocation = ... // You can map and fill your buffer using following code: void* mappedData; vmaMapMemory(allocator, constantBufferAllocation, &mappedData); memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); vmaUnmapMemory(allocator, constantBufferAllocation); \endcode When mapping, you may see a warning from Vulkan validation layer similar to this one: Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used. It happens because the library maps entire `VkDeviceMemory` block, where different types of images and buffers may end up together, especially on GPUs with unified memory like Intel. You can safely ignore it if you are sure you access only memory of the intended object that you wanted to map. \section memory_mapping_persistently_mapped_memory Persistently mapped memory Keeping your memory persistently mapped is generally OK in Vulkan. You don't need to unmap it before using its data on the GPU. The library provides a special feature designed for that: Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in VmaAllocationCreateInfo::flags stay mapped all the time, so you can just access CPU pointer to it any time without a need to call any "map" or "unmap" function. Example: \code VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; bufCreateInfo.size = sizeof(ConstantBuffer); bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo; vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); // Buffer is already mapped. You can access its memory. memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); \endcode \note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up in a mappable memory type. For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. #VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation. For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading. \section memory_mapping_cache_control Cache flush and invalidate Memory in Vulkan doesn't need to be unmapped before using it on GPU, but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set, you need to manually **invalidate** cache before reading of mapped pointer and **flush** cache after writing to mapped pointer. Map/unmap operations don't do that automatically. Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`, `vkInvalidateMappedMemoryRanges()`, but this library provides more convenient functions that refer to given allocation object: vmaFlushAllocation(), vmaInvalidateAllocation(), or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations(). Regions of memory specified for flush/invalidate must be aligned to `VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library. In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations within blocks are aligned to this value, so their offsets are always multiply of `nonCoherentAtomSize` and two different allocations never share same "line" of this size. Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA) currently provide `HOST_COHERENT` flag on all memory types that are `HOST_VISIBLE`, so on PC you may not need to bother. \page staying_within_budget Staying within budget When developing a graphics-intensive game or program, it is important to avoid allocating more GPU memory than it is physically available. When the memory is over-committed, various bad things can happen, depending on the specific GPU, graphics driver, and operating system: - It may just work without any problems. - The application may slow down because some memory blocks are moved to system RAM and the GPU has to access them through PCI Express bus. - A new allocation may take very long time to complete, even few seconds, and possibly freeze entire system. - The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. - It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST` returned somewhere later. \section staying_within_budget_querying_for_budget Querying for budget To query for current memory usage and available budget, use function vmaGetHeapBudgets(). Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap. Please note that this function returns different information and works faster than vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every allocation, while vmaCalculateStatistics() is intended to be used rarely, only to obtain statistical information, e.g. for debugging purposes. It is recommended to use VK_EXT_memory_budget device extension to obtain information about the budget from Vulkan device. VMA is able to use this extension automatically. When not enabled, the allocator behaves same way, but then it estimates current usage and available budget based on its internal information and Vulkan memory heap sizes, which may be less precise. In order to use this extension: 1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2 required by it are available and enable them. Please note that the first is a device extension and the second is instance extension! 2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object. 3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from Vulkan inside of it to avoid overhead of querying it with every allocation. \section staying_within_budget_controlling_memory_usage Controlling memory usage There are many ways in which you can try to stay within the budget. First, when making new allocation requires allocating a new memory block, the library tries not to exceed the budget automatically. If a block with default recommended size (e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even dedicated memory for just this resource. If the size of the requested resource plus current memory usage is more than the budget, by default the library still tries to create it, leaving it to the Vulkan implementation whether the allocation succeeds or fails. You can change this behavior by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is not made if it would exceed the budget or if the budget is already exceeded. VMA then tries to make the allocation from the next eligible Vulkan memory type. If all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag when creating resources that are not essential for the application (e.g. the texture of a specific object) and not to pass it when creating critically important resources (e.g. render targets). On AMD graphics cards there is a custom vendor extension available: VK_AMD_memory_overallocation_behavior that allows to control the behavior of the Vulkan implementation in out-of-memory cases - whether it should fail with an error code or still allow the allocation. Usage of this extension involves only passing extra structure on Vulkan device creation, so it is out of scope of this library. Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure a new allocation is created only when it fits inside one of the existing memory blocks. If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. This also ensures that the function call is very fast because it never goes to Vulkan to obtain a new block. \note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount set to more than 0 will currently try to allocate memory blocks without checking whether they fit within budget. \page resource_aliasing Resource aliasing (overlap) New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory management, give an opportunity to alias (overlap) multiple resources in the same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL). It can be useful to save video memory, but it must be used with caution. For example, if you know the flow of your whole render frame in advance, you are going to use some intermediate textures or buffers only during a small range of render passes, and you know these ranges don't overlap in time, you can bind these resources to the same place in memory, even if they have completely different parameters (width, height, format etc.). ![Resource aliasing (overlap)](../gfx/Aliasing.png) Such scenario is possible using VMA, but you need to create your images manually. Then you need to calculate parameters of an allocation to be made using formula: - allocation size = max(size of each image) - allocation alignment = max(alignment of each image) - allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image) Following example shows two different images bound to the same place in memory, allocated to fit largest of them. \code // A 512x512 texture to be sampled. VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; img1CreateInfo.imageType = VK_IMAGE_TYPE_2D; img1CreateInfo.extent.width = 512; img1CreateInfo.extent.height = 512; img1CreateInfo.extent.depth = 1; img1CreateInfo.mipLevels = 10; img1CreateInfo.arrayLayers = 1; img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB; img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; // A full screen texture to be used as color attachment. VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; img2CreateInfo.imageType = VK_IMAGE_TYPE_2D; img2CreateInfo.extent.width = 1920; img2CreateInfo.extent.height = 1080; img2CreateInfo.extent.depth = 1; img2CreateInfo.mipLevels = 1; img2CreateInfo.arrayLayers = 1; img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; VkImage img1; res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1); VkImage img2; res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2); VkMemoryRequirements img1MemReq; vkGetImageMemoryRequirements(device, img1, &img1MemReq); VkMemoryRequirements img2MemReq; vkGetImageMemoryRequirements(device, img2, &img2MemReq); VkMemoryRequirements finalMemReq = {}; finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size); finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment); finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits; // Validate if(finalMemReq.memoryTypeBits != 0) VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; VmaAllocation alloc; res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr); res = vmaBindImageMemory(allocator, alloc, img1); res = vmaBindImageMemory(allocator, alloc, img2); // You can use img1, img2 here, but not at the same time! vmaFreeMemory(allocator, alloc); vkDestroyImage(allocator, img2, nullptr); vkDestroyImage(allocator, img1, nullptr); \endcode VMA also provides convenience functions that create a buffer or image and bind it to memory represented by an existing #VmaAllocation: vmaCreateAliasingBuffer(), vmaCreateAliasingBuffer2(), vmaCreateAliasingImage(), vmaCreateAliasingImage2(). Versions with "2" offer additional parameter `allocationLocalOffset`. Remember that using resources that alias in memory requires proper synchronization. You need to issue a memory barrier to make sure commands that use `img1` and `img2` don't overlap on GPU timeline. You also need to treat a resource after aliasing as uninitialized - containing garbage data. For example, if you use `img1` and then want to use `img2`, you need to issue an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`. Additional considerations: - Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases. See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag. - You can create more complex layout where different images and buffers are bound at different offsets inside one large allocation. For example, one can imagine a big texture used in some render passes, aliasing with a set of many small buffers used between in some further passes. To bind a resource at non-zero offset in an allocation, use vmaBindBufferMemory2() / vmaBindImageMemory2(). - Before allocating memory for the resources you want to alias, check `memoryTypeBits` returned in memory requirements of each resource to make sure the bits overlap. Some GPUs may expose multiple memory types suitable e.g. only for buffers or images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your resources may be disjoint. Aliasing them is not possible in that case. \page custom_memory_pools Custom memory pools A memory pool contains a number of `VkDeviceMemory` blocks. The library automatically creates and manages default pool for each memory type available on the device. Default memory pool automatically grows in size. Size of allocated blocks is also variable and managed automatically. You are using default pools whenever you leave VmaAllocationCreateInfo::pool = null. You can create custom pool and allocate memory out of it. It can be useful if you want to: - Keep certain kind of allocations separate from others. - Enforce particular, fixed size of Vulkan memory blocks. - Limit maximum amount of Vulkan memory allocated for that pool. - Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool. - Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain. - Perform defragmentation on a specific subset of your allocations. To use custom memory pools: -# Fill VmaPoolCreateInfo structure. -# Call vmaCreatePool() to obtain #VmaPool handle. -# When making an allocation, set VmaAllocationCreateInfo::pool to this handle. You don't need to specify any other parameters of this structure, like `usage`. Example: \code // Find memoryTypeIndex for the pool. VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; sampleBufCreateInfo.size = 0x10000; // Doesn't matter. sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VmaAllocationCreateInfo sampleAllocCreateInfo = {}; sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; uint32_t memTypeIndex; VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex); // Check res... // Create a pool that can have at most 2 blocks, 128 MiB each. VmaPoolCreateInfo poolCreateInfo = {}; poolCreateInfo.memoryTypeIndex = memTypeIndex; poolCreateInfo.blockSize = 128ull * 1024 * 1024; poolCreateInfo.maxBlockCount = 2; VmaPool pool; res = vmaCreatePool(allocator, &poolCreateInfo, &pool); // Check res... // Allocate a buffer out of it. VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; bufCreateInfo.size = 1024; bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.pool = pool; VkBuffer buf; VmaAllocation alloc; res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); // Check res... \endcode You have to free all allocations made from this pool before destroying it. \code vmaDestroyBuffer(allocator, buf, alloc); vmaDestroyPool(allocator, pool); \endcode New versions of this library support creating dedicated allocations in custom pools. It is supported only when VmaPoolCreateInfo::blockSize = 0. To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. \section custom_memory_pools_MemTypeIndex Choosing memory type index When creating a pool, you must explicitly specify memory type index. To find the one suitable for your buffers or images, you can use helper functions vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(). You need to provide structures with example parameters of buffers or images that you are going to create in that pool. \code VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; exampleBufCreateInfo.size = 1024; // Doesn't matter exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; uint32_t memTypeIndex; vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex); VmaPoolCreateInfo poolCreateInfo = {}; poolCreateInfo.memoryTypeIndex = memTypeIndex; // ... \endcode When creating buffers/images allocated in that pool, provide following parameters: - `VkBufferCreateInfo`: Prefer to pass same parameters as above. Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior. Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers or the other way around. - VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member. Other members are ignored anyway. \section custom_memory_pools_when_not_use When not to use custom pools Custom pools are commonly overused by VMA users. While it may feel natural to keep some logical groups of resources separate in memory, in most cases it does more harm than good. Using custom pool shouldn't be your first choice. Instead, please make all allocations from default pools first and only use custom pools if you can prove and measure that it is beneficial in some way, e.g. it results in lower memory usage, better performance, etc. Using custom pools has disadvantages: - Each pool has its own collection of `VkDeviceMemory` blocks. Some of them may be partially or even completely empty. Spreading allocations across multiple pools increases the amount of wasted (allocated but unbound) memory. - You must manually choose specific memory type to be used by a custom pool (set as VmaPoolCreateInfo::memoryTypeIndex). When using default pools, best memory type for each of your allocations can be selected automatically using a carefully design algorithm that works across all kinds of GPUs. - If an allocation from a custom pool at specific memory type fails, entire allocation operation returns failure. When using default pools, VMA tries another compatible memory type. - If you set VmaPoolCreateInfo::blockSize != 0, each memory block has the same size, while default pools start from small blocks and only allocate next blocks larger and larger up to the preferred block size. Many of the common concerns can be addressed in a different way than using custom pools: - If you want to keep your allocations of certain size (small versus large) or certain lifetime (transient versus long lived) separate, you likely don't need to. VMA uses a high quality allocation algorithm that manages memory well in various cases. Please measure and check if using custom pools provides a benefit. - If you want to keep your images and buffers separate, you don't need to. VMA respects `bufferImageGranularity` limit automatically. - If you want to keep your mapped and not mapped allocations separate, you don't need to. VMA respects `nonCoherentAtomSize` limit automatically. It also maps only those `VkDeviceMemory` blocks that need to map any allocation. It even tries to keep mappable and non-mappable allocations in separate blocks to minimize the amount of mapped memory. - If you want to choose a custom size for the default memory block, you can set it globally instead using VmaAllocatorCreateInfo::preferredLargeHeapBlockSize. - If you want to select specific memory type for your allocation, you can set VmaAllocationCreateInfo::memoryTypeBits to `(1u << myMemoryTypeIndex)` instead. - If you need to create a buffer with certain minimum alignment, you can still do it using default pools with dedicated function vmaCreateBufferWithAlignment(). \section linear_algorithm Linear allocation algorithm Each Vulkan memory block managed by this library has accompanying metadata that keeps track of used and unused regions. By default, the metadata structure and algorithm tries to find best place for new allocations among free regions to optimize memory usage. This way you can allocate and free objects in any order. ![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png) Sometimes there is a need to use simpler, linear allocation algorithm. You can create custom pool that uses such algorithm by adding flag #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating #VmaPool object. Then an alternative metadata management is used. It always creates new allocations after last one and doesn't reuse free regions after allocations freed in the middle. It results in better allocation performance and less memory consumed by metadata. ![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png) With this one flag, you can create a custom pool that can be used in many ways: free-at-once, stack, double stack, and ring buffer. See below for details. You don't need to specify explicitly which of these options you are going to use - it is detected automatically. \subsection linear_algorithm_free_at_once Free-at-once In a pool that uses linear algorithm, you still need to free all the allocations individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free them in any order. New allocations are always made after last one - free space in the middle is not reused. However, when you release all the allocation and the pool becomes empty, allocation starts from the beginning again. This way you can use linear algorithm to speed up creation of allocations that you are going to release all at once. ![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png) This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount value that allows multiple memory blocks. \subsection linear_algorithm_stack Stack When you free an allocation that was created last, its space can be reused. Thanks to this, if you always release allocations in the order opposite to their creation (LIFO - Last In First Out), you can achieve behavior of a stack. ![Stack](../gfx/Linear_allocator_4_stack.png) This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount value that allows multiple memory blocks. \subsection linear_algorithm_double_stack Double stack The space reserved by a custom pool with linear algorithm may be used by two stacks: - First, default one, growing up from offset 0. - Second, "upper" one, growing down from the end towards lower offsets. To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT to VmaAllocationCreateInfo::flags. ![Double stack](../gfx/Linear_allocator_7_double_stack.png) Double stack is available only in pools with one memory block - VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. When the two stacks' ends meet so there is not enough space between them for a new allocation, such allocation fails with usual `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. \subsection linear_algorithm_ring_buffer Ring buffer When you free some allocations from the beginning and there is not enough free space for a new one at the end of a pool, allocator's "cursor" wraps around to the beginning and starts allocation there. Thanks to this, if you always release allocations in the same order as you created them (FIFO - First In First Out), you can achieve behavior of a ring buffer / queue. ![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png) Ring buffer is available only in pools with one memory block - VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. \note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. \page defragmentation Defragmentation Interleaved allocations and deallocations of many objects of varying size can cause fragmentation over time, which can lead to a situation where the library is unable to find a continuous range of free memory for a new allocation despite there is enough free space, just scattered across many small free ranges between existing allocations. To mitigate this problem, you can use defragmentation feature. It doesn't happen automatically though and needs your cooperation, because VMA is a low level library that only allocates memory. It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures. It cannot copy their contents as it doesn't record any commands to a command buffer. Example: \code VmaDefragmentationInfo defragInfo = {}; defragInfo.pool = myPool; defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT; VmaDefragmentationContext defragCtx; VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx); // Check res... for(;;) { VmaDefragmentationPassMoveInfo pass; res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass); if(res == VK_SUCCESS) break; else if(res != VK_INCOMPLETE) // Handle error... for(uint32_t i = 0; i < pass.moveCount; ++i) { // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents. VmaAllocationInfo allocInfo; vmaGetAllocationInfo(allocator, pass.pMoves[i].srcAllocation, &allocInfo); MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData; // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset. VkImageCreateInfo imgCreateInfo = ... VkImage newImg; res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg); // Check res... res = vmaBindImageMemory(allocator, pass.pMoves[i].dstTmpAllocation, newImg); // Check res... // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place. vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...); } // Make sure the copy commands finished executing. vkWaitForFences(...); // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation. for(uint32_t i = 0; i < pass.moveCount; ++i) { // ... vkDestroyImage(device, resData->img, nullptr); } // Update appropriate descriptors to point to the new places... res = vmaEndDefragmentationPass(allocator, defragCtx, &pass); if(res == VK_SUCCESS) break; else if(res != VK_INCOMPLETE) // Handle error... } vmaEndDefragmentation(allocator, defragCtx, nullptr); \endcode Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage() create/destroy an allocation and a buffer/image at once, these are just a shortcut for creating the resource, allocating memory, and binding them together. Defragmentation works on memory allocations only. You must handle the rest manually. Defragmentation is an iterative process that should repreat "passes" as long as related functions return `VK_INCOMPLETE` not `VK_SUCCESS`. In each pass: 1. vmaBeginDefragmentationPass() function call: - Calculates and returns the list of allocations to be moved in this pass. Note this can be a time-consuming process. - Reserves destination memory for them by creating temporary destination allocations that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo(). 2. Inside the pass, **you should**: - Inspect the returned list of allocations to be moved. - Create new buffers/images and bind them at the returned destination temporary allocations. - Copy data from source to destination resources if necessary. - Destroy the source buffers/images, but NOT their allocations. 3. vmaEndDefragmentationPass() function call: - Frees the source memory reserved for the allocations that are moved. - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory. - Frees `VkDeviceMemory` blocks that became empty. Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter. Defragmentation algorithm tries to move all suitable allocations. You can, however, refuse to move some of them inside a defragmentation pass, by setting `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. This is not recommended and may result in suboptimal packing of the allocations after defragmentation. If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool. Inside a pass, for each allocation that should be moved: - You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`. - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass(). - If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared, filled, and used temporarily in each rendering frame, you can just recreate this image without copying its data. - If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU using `memcpy()`. - If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. This will cancel the move. - vmaEndDefragmentationPass() will then free the destination memory not the source memory of the allocation, leaving it unchanged. - If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time), you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object. You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool (like in the example above) or all the default pools by setting this member to null. Defragmentation is always performed in each pool separately. Allocations are never moved between different Vulkan memory types. The size of the destination memory reserved for a moved allocation is the same as the original one. Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation. Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones. You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved in each pass, e.g. to call it in sync with render frames and not to experience too big hitches. See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass. It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA usage, possibly from multiple threads, with the exception that allocations returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended. Mapping is preserved on allocations that are moved during defragmentation. Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried using VmaAllocationInfo::pMappedData. \note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. \page statistics Statistics This library contains several functions that return information about its internal state, especially the amount of memory allocated from Vulkan. \section statistics_numeric_statistics Numeric statistics If you need to obtain basic statistics about memory usage per heap, together with current budget, you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget. This is useful to keep track of memory usage and stay within budget (see also \ref staying_within_budget). Example: \code uint32_t heapIndex = ... VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; vmaGetHeapBudgets(allocator, budgets); printf("My heap currently has %u allocations taking %llu B,\n", budgets[heapIndex].statistics.allocationCount, budgets[heapIndex].statistics.allocationBytes); printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n", budgets[heapIndex].statistics.blockCount, budgets[heapIndex].statistics.blockBytes); printf("Vulkan reports total usage %llu B with budget %llu B.\n", budgets[heapIndex].usage, budgets[heapIndex].budget); \endcode You can query for more detailed statistics per memory heap, type, and totals, including minimum and maximum allocation size and unused range size, by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics. This function is slower though, as it has to traverse all the internal data structures, so it should be used only for debugging purposes. You can query for statistics of a custom pool using function vmaGetPoolStatistics() or vmaCalculatePoolStatistics(). You can query for information about a specific allocation using function vmaGetAllocationInfo(). It fill structure #VmaAllocationInfo. \section statistics_json_dump JSON dump You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString(). The result is guaranteed to be correct JSON. It uses ANSI encoding. Any strings provided by user (see [Allocation names](@ref allocation_names)) are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding, this JSON string can be treated as using this encoding. It must be freed using function vmaFreeStatsString(). The format of this JSON string is not part of official documentation of the library, but it will not change in backward-incompatible way without increasing library major version number and appropriate mention in changelog. The JSON string contains all the data that can be obtained using vmaCalculateStatistics(). It can also contain detailed map of allocated memory blocks and their regions - free and occupied by allocations. This allows e.g. to visualize the memory or assess fragmentation. \page allocation_annotation Allocation names and user data \section allocation_user_data Allocation user data You can annotate allocations with your own information, e.g. for debugging purposes. To do that, fill VmaAllocationCreateInfo::pUserData field when creating an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer, some handle, index, key, ordinal number or any other value that would associate the allocation with your custom metadata. It is useful to identify appropriate data structures in your engine given #VmaAllocation, e.g. when doing \ref defragmentation. \code VkBufferCreateInfo bufCreateInfo = ... MyBufferMetadata* pMetadata = CreateBufferMetadata(); VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; allocCreateInfo.pUserData = pMetadata; VkBuffer buffer; VmaAllocation allocation; vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr); \endcode The pointer may be later retrieved as VmaAllocationInfo::pUserData: \code VmaAllocationInfo allocInfo; vmaGetAllocationInfo(allocator, allocation, &allocInfo); MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData; \endcode It can also be changed using function vmaSetAllocationUserData(). Values of (non-zero) allocations' `pUserData` are printed in JSON report created by vmaBuildStatsString() in hexadecimal form. \section allocation_names Allocation names An allocation can also carry a null-terminated string, giving a name to the allocation. To set it, call vmaSetAllocationName(). The library creates internal copy of the string, so the pointer you pass doesn't need to be valid for whole lifetime of the allocation. You can free it after the call. \code std::string imageName = "Texture: "; imageName += fileName; vmaSetAllocationName(allocator, allocation, imageName.c_str()); \endcode The string can be later retrieved by inspecting VmaAllocationInfo::pName. It is also printed in JSON report created by vmaBuildStatsString(). \note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library. \page virtual_allocator Virtual allocator As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator". It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block". You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan. A common use case is sub-allocation of pieces of one large GPU buffer. \section virtual_allocator_creating_virtual_block Creating virtual block To use this functionality, there is no main "allocator" object. You don't need to have #VmaAllocator object created. All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator: -# Fill in #VmaVirtualBlockCreateInfo structure. -# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object. Example: \code VmaVirtualBlockCreateInfo blockCreateInfo = {}; blockCreateInfo.size = 1048576; // 1 MB VmaVirtualBlock block; VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block); \endcode \section virtual_allocator_making_virtual_allocations Making virtual allocations #VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions using the same code as the main Vulkan memory allocator. Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type that represents an opaque handle to an allocation within the virtual block. In order to make such allocation: -# Fill in #VmaVirtualAllocationCreateInfo structure. -# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation. You can also receive `VkDeviceSize offset` that was assigned to the allocation. Example: \code VmaVirtualAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.size = 4096; // 4 KB VmaVirtualAllocation alloc; VkDeviceSize offset; res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset); if(res == VK_SUCCESS) { // Use the 4 KB of your memory starting at offset. } else { // Allocation failed - no space for it could be found. Handle this error! } \endcode \section virtual_allocator_deallocation Deallocation When no longer needed, an allocation can be freed by calling vmaVirtualFree(). You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate() called for the same #VmaVirtualBlock. When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock(). All allocations must be freed before the block is destroyed, which is checked internally by an assert. However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once - a feature not available in normal Vulkan memory allocator. Example: \code vmaVirtualFree(block, alloc); vmaDestroyVirtualBlock(block); \endcode \section virtual_allocator_allocation_parameters Allocation parameters You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData(). Its default value is null. It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some larger data structure containing more information. Example: \code struct CustomAllocData { std::string m_AllocName; }; CustomAllocData* allocData = new CustomAllocData(); allocData->m_AllocName = "My allocation 1"; vmaSetVirtualAllocationUserData(block, alloc, allocData); \endcode The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo. If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation! Example: \code VmaVirtualAllocationInfo allocInfo; vmaGetVirtualAllocationInfo(block, alloc, &allocInfo); delete (CustomAllocData*)allocInfo.pUserData; vmaVirtualFree(block, alloc); \endcode \section virtual_allocator_alignment_and_units Alignment and units It feels natural to express sizes and offsets in bytes. If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member VmaVirtualAllocationCreateInfo::alignment to request it. Example: \code VmaVirtualAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.size = 4096; // 4 KB allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B VmaVirtualAllocation alloc; res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr); \endcode Alignments of different allocations made from one block may vary. However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`, you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes. It might be more convenient, but you need to make sure to use this new unit consistently in all the places: - VmaVirtualBlockCreateInfo::size - VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment - Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset \section virtual_allocator_statistics Statistics You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics() (to get brief statistics that are fast to calculate) or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate). The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator. Example: \code VmaStatistics stats; vmaGetVirtualBlockStatistics(block, &stats); printf("My virtual block has %llu bytes used by %u virtual allocations\n", stats.allocationBytes, stats.allocationCount); \endcode You can also request a full list of allocations and free regions as a string in JSON format by calling vmaBuildVirtualBlockStatsString(). Returned string must be later freed using vmaFreeVirtualBlockStatsString(). The format of this string differs from the one returned by the main Vulkan allocator, but it is similar. \section virtual_allocator_additional_considerations Additional considerations The "virtual allocator" functionality is implemented on a level of individual memory blocks. Keeping track of a whole collection of blocks, allocating new ones when out of free space, deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user. Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory. See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT). You can find their description in chapter \ref custom_memory_pools. Allocation strategies are also supported. See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT). Following features are supported only by the allocator of the real GPU memory and not by virtual allocations: buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`. \page debugging_memory_usage Debugging incorrect memory usage If you suspect a bug with memory usage, like usage of uninitialized memory or memory being overwritten out of bounds of an allocation, you can use debug features of this library to verify this. \section debugging_memory_usage_initialization Memory initialization If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used, you can enable automatic memory initialization to verify this. To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1. \code #define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 #include "vk_mem_alloc.h" \endcode It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`. Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`. Memory is automatically mapped and unmapped if necessary. If you find these values while debugging your program, good chances are that you incorrectly read Vulkan memory that is allocated but not initialized, or already freed, respectively. Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped. It works also with dedicated allocations. \section debugging_memory_usage_margins Margins By default, allocations are laid out in memory blocks next to each other if possible (considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`). ![Allocations without margin](../gfx/Margins_1.png) Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified number of bytes as a margin after every allocation. \code #define VMA_DEBUG_MARGIN 16 #include "vk_mem_alloc.h" \endcode ![Allocations with margin](../gfx/Margins_2.png) If your bug goes away after enabling margins, it means it may be caused by memory being overwritten outside of allocation boundaries. It is not 100% certain though. Change in application behavior may also be caused by different order and distribution of allocations across memory blocks after margins are applied. Margins work with all types of memory. Margin is applied only to allocations made out of memory blocks and not to dedicated allocations, which have their own memory block of specific size. It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag or those automatically decided to put into dedicated allocations, e.g. due to its large size or recommended by VK_KHR_dedicated_allocation extension. Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space. Note that enabling margins increases memory usage and fragmentation. Margins do not apply to \ref virtual_allocator. \section debugging_memory_usage_corruption_detection Corruption detection You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation of contents of the margins. \code #define VMA_DEBUG_MARGIN 16 #define VMA_DEBUG_DETECT_CORRUPTION 1 #include "vk_mem_alloc.h" \endcode When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN` (it must be multiply of 4) after every allocation is filled with a magic number. This idea is also know as "canary". Memory is automatically mapped and unmapped if necessary. This number is validated automatically when the allocation is destroyed. If it is not equal to the expected value, `VMA_ASSERT()` is executed. It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation, which indicates a serious bug. You can also explicitly request checking margins of all allocations in all memory blocks that belong to specified memory types by using function vmaCheckCorruption(), or in memory blocks that belong to specified custom pool, by using function vmaCheckPoolCorruption(). Margin validation (corruption detection) works only for memory types that are `HOST_VISIBLE` and `HOST_COHERENT`. \section debugging_memory_usage_leak_detection Leak detection features At allocation and allocator destruction time VMA checks for unfreed and unmapped blocks using `VMA_ASSERT_LEAK()`. This macro defaults to an assertion, triggering a typically fatal error in Debug builds, and doing nothing in Release builds. You can provide your own definition of `VMA_ASSERT_LEAK()` to change this behavior. At memory block destruction time VMA lists out all unfreed allocations using the `VMA_LEAK_LOG_FORMAT()` macro, which defaults to `VMA_DEBUG_LOG_FORMAT`, which in turn defaults to a no-op. If you're having trouble with leaks - for example, the aforementioned assertion triggers, but you don't quite know \em why -, overriding this macro to print out the the leaking blocks, combined with assigning individual names to allocations using vmaSetAllocationName(), can greatly aid in fixing them. \page other_api_interop Interop with other graphics APIs VMA provides some features that help with interoperability with other graphics APIs, e.g. OpenGL. \section opengl_interop_exporting_memory Exporting memory If you want to attach `VkExportMemoryAllocateInfoKHR` or other structure to `pNext` chain of memory allocations made by the library: You can create \ref custom_memory_pools for such allocations. Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext while creating the custom pool. Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool, not only while creating it, as no copy of the structure is made, but its original pointer is used for each allocation instead. If you want to export all memory allocated by VMA from certain memory types, also dedicated allocations or other allocations made from default pools, an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes. It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type. Please note that new versions of the library also support dedicated allocations created in custom pools. You should not mix these two methods in a way that allows to apply both to the same memory type. Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`. \section opengl_interop_custom_alignment Custom alignment Buffers or images exported to a different API like OpenGL may require a different alignment, higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`. To impose such alignment: You can create \ref custom_memory_pools for such allocations. Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation to be made out of this pool. The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically. If you want to create a buffer with a specific minimum alignment out of default pools, use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`. Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block. You can ensure that an allocation is created as dedicated by using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation. \section opengl_interop_extended_allocation_information Extended allocation information If you want to rely on VMA to allocate your buffers and images inside larger memory blocks, but you need to know the size of the entire block and whether the allocation was made with its own dedicated memory, use function vmaGetAllocationInfo2() to retrieve extended allocation information in structure #VmaAllocationInfo2. \page usage_patterns Recommended usage patterns Vulkan gives great flexibility in memory allocation. This chapter shows the most common patterns. See also slides from talk: [Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New) \section usage_patterns_gpu_only GPU-only resource When: Any resources that you frequently write and read on GPU, e.g. images used as color attachments (aka "render targets"), depth-stencil attachments, images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)"). What to do: Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. \code VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; imgCreateInfo.extent.width = 3840; imgCreateInfo.extent.height = 2160; imgCreateInfo.extent.depth = 1; imgCreateInfo.mipLevels = 1; imgCreateInfo.arrayLayers = 1; imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; allocCreateInfo.priority = 1.0f; VkImage img; VmaAllocation alloc; vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); \endcode Also consider: Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, especially if they are large or if you plan to destroy and recreate them with different sizes e.g. when display resolution changes. Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later. When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation to decrease chances to be evicted to system memory by the operating system. \section usage_patterns_staging_copy_upload Staging copy for upload When: A "staging" buffer than you want to map and fill from CPU code, then use as a source of transfer to some GPU resource. What to do: Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT. Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`. \code VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; bufCreateInfo.size = 65536; bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo; vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); ... memcpy(allocInfo.pMappedData, myData, myDataSize); \endcode Also consider: You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above. \section usage_patterns_readback Readback When: Buffers for data written by or transferred from the GPU that you want to read back on the CPU, e.g. results of some computations. What to do: Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. \code VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; bufCreateInfo.size = 65536; bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo; vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); ... const float* downloadedData = (const float*)allocInfo.pMappedData; \endcode \section usage_patterns_advanced_data_uploading Advanced data uploading For resources that you frequently write on CPU via mapped pointer and frequently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible: -# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory, even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card, and make the device reach out to that resource directly. - Reads performed by the device will then go through PCI Express bus. The performance of this access may be limited, but it may be fine depending on the size of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity of access. -# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips), a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL` (fast to access from the GPU). Then, it is likely the best choice for such type of resource. -# Systems with a discrete graphics card and separate video memory may or may not expose a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR). If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS) that is available to CPU for mapping. - Writes performed by the host to that memory go through PCI Express bus. The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0, as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads. -# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory, a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them. Thankfully, VMA offers an aid to create and use such resources in the the way optimal for the current Vulkan device. To help the library make the best choice, use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT. It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR), but if no such memory type is available or allocation from it fails (PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS), it will fall back to `DEVICE_LOCAL` memory for fast GPU access. It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`, so you need to create another "staging" allocation and perform explicit transfers. \code VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; bufCreateInfo.size = 65536; bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo; VkResult result = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); // Check result... VkMemoryPropertyFlags memPropFlags; vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags); if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) { // Allocation ended up in a mappable memory and is already mapped - write to it directly. // [Executed in runtime]: memcpy(allocInfo.pMappedData, myData, myDataSize); result = vmaFlushAllocation(allocator, alloc, 0, VK_WHOLE_SIZE); // Check result... VkBufferMemoryBarrier bufMemBarrier = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER }; bufMemBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; bufMemBarrier.dstAccessMask = VK_ACCESS_UNIFORM_READ_BIT; bufMemBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; bufMemBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; bufMemBarrier.buffer = buf; bufMemBarrier.offset = 0; bufMemBarrier.size = VK_WHOLE_SIZE; vkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 1, &bufMemBarrier, 0, nullptr); } else { // Allocation ended up in a non-mappable memory - a transfer using a staging buffer is required. VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; stagingBufCreateInfo.size = 65536; stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; VmaAllocationCreateInfo stagingAllocCreateInfo = {}; stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; VkBuffer stagingBuf; VmaAllocation stagingAlloc; VmaAllocationInfo stagingAllocInfo; result = vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo, &stagingBuf, &stagingAlloc, &stagingAllocInfo); // Check result... // [Executed in runtime]: memcpy(stagingAllocInfo.pMappedData, myData, myDataSize); result = vmaFlushAllocation(allocator, stagingAlloc, 0, VK_WHOLE_SIZE); // Check result... VkBufferMemoryBarrier bufMemBarrier = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER }; bufMemBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; bufMemBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; bufMemBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; bufMemBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; bufMemBarrier.buffer = stagingBuf; bufMemBarrier.offset = 0; bufMemBarrier.size = VK_WHOLE_SIZE; vkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 1, &bufMemBarrier, 0, nullptr); VkBufferCopy bufCopy = { 0, // srcOffset 0, // dstOffset, myDataSize, // size }; vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy); VkBufferMemoryBarrier bufMemBarrier2 = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER }; bufMemBarrier2.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; bufMemBarrier2.dstAccessMask = VK_ACCESS_UNIFORM_READ_BIT; // We created a uniform buffer bufMemBarrier2.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; bufMemBarrier2.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; bufMemBarrier2.buffer = buf; bufMemBarrier2.offset = 0; bufMemBarrier2.size = VK_WHOLE_SIZE; vkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 1, &bufMemBarrier2, 0, nullptr); } \endcode \section usage_patterns_other_use_cases Other use cases Here are some other, less obvious use cases and their recommended settings: - An image that is used only as transfer source and destination, but it should stay on the device, as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame, for temporal antialiasing or other temporal effects. - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO - An image that is used only as transfer source and destination, but it should be placed in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict least recently used textures from VRAM. - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST, as VMA needs a hint here to differentiate from the previous case. - A buffer that you want to map and write from the CPU, directly read from the GPU (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or host memory due to its large size. - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT` - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT \page configuration Configuration Please check "CONFIGURATION SECTION" in the code to find macros that you can define before each include of this file or change directly in this file to provide your own implementation of basic facilities like assert, `min()` and `max()` functions, mutex, atomic etc. For example, define `VMA_ASSERT(expr)` before including the library to provide custom implementation of the assertion, compatible with your project. By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration and empty otherwise. Similarly, you can define `VMA_LEAK_LOG_FORMAT` macro to enable printing of leaked (unfreed) allocations, including their names and other parameters. Example: \code #define VMA_LEAK_LOG_FORMAT(format, ...) do { \ printf((format), __VA_ARGS__); \ printf("\n"); \ } while(false) \endcode \section config_Vulkan_functions Pointers to Vulkan functions There are multiple ways to import pointers to Vulkan functions in the library. In the simplest case you don't need to do anything. If the compilation or linking of your program or the initialization of the #VmaAllocator doesn't work for you, you can try to reconfigure it. First, the allocator tries to fetch pointers to Vulkan functions linked statically, like this: \code m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; \endcode If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`. Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions. You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or by using a helper library like [volk](https://github.com/zeux/volk). Third, VMA tries to fetch remaining pointers that are still null by calling `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own. You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr. Other pointers will be fetched automatically. If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`. Finally, all the function pointers required by the library (considering selected Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null. \section custom_memory_allocator Custom host memory allocator If you use custom allocator for CPU memory rather than default operator `new` and `delete` from C++, you can make this library using your allocator as well by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These functions will be passed to Vulkan, as well as used by the library itself to make any CPU-side allocations. \section allocation_callbacks Device memory allocation callbacks The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally. You can setup callbacks to be informed about these calls, e.g. for the purpose of gathering some statistics. To do it, fill optional member VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. \section heap_memory_limit Device heap memory limit When device memory of certain heap runs out of free space, new allocations may fail (returning error code) or they may succeed, silently pushing some existing_ memory blocks from GPU VRAM to system RAM (which degrades performance). This behavior is implementation-dependent - it depends on GPU vendor and graphics driver. On AMD cards it can be controlled while creating Vulkan device object by using VK_AMD_memory_overallocation_behavior extension, if available. Alternatively, if you want to test how your program behaves with limited amount of Vulkan device memory available without switching your graphics card to one that really has smaller VRAM, you can use a feature of this library intended for this purpose. To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit. \page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve performance on some GPUs. It augments Vulkan API with possibility to query driver whether it prefers particular buffer or image to have its own, dedicated allocation (separate `VkDeviceMemory` block) for better efficiency - to be able to do some internal optimizations. The extension is supported by this library. It will be used automatically when enabled. It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion, you are all set. Otherwise, if you want to use it as an extension: 1 . When creating Vulkan device, check if following 2 device extensions are supported (call `vkEnumerateDeviceExtensionProperties()`). If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`). - VK_KHR_get_memory_requirements2 - VK_KHR_dedicated_allocation If you enabled these extensions: 2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating your #VmaAllocator to inform the library that you enabled required extensions and you want the library to use them. \code allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT; vmaCreateAllocator(&allocatorInfo, &allocator); \endcode That is all. The extension will be automatically used whenever you create a buffer using vmaCreateBuffer() or image using vmaCreateImage(). When using the extension together with Vulkan Validation Layer, you will receive warnings like this: _vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._ It is OK, you should just ignore it. It happens because you use function `vkGetBufferMemoryRequirements2KHR()` instead of standard `vkGetBufferMemoryRequirements()`, while the validation layer seems to be unaware of it. To learn more about this extension, see: - [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation) - [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5) \page vk_ext_memory_priority VK_EXT_memory_priority VK_EXT_memory_priority is a device extension that allows to pass additional "priority" value to Vulkan memory allocations that the implementation may use prefer certain buffers and images that are critical for performance to stay in device-local memory in cases when the memory is over-subscribed, while some others may be moved to the system memory. VMA offers convenient usage of this extension. If you enable it, you can pass "priority" parameter when creating allocations or custom pools and the library automatically passes the value to Vulkan using this extension. If you want to use this extension in connection with VMA, follow these steps: \section vk_ext_memory_priority_initialization Initialization 1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority". 2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned. Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true. 3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. 4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`. 5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT to VmaAllocatorCreateInfo::flags. \section vk_ext_memory_priority_usage Usage When using this extension, you should initialize following member: - VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. - VmaPoolCreateInfo::priority when creating a custom pool. It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`. Memory allocated with higher value can be treated by the Vulkan implementation as higher priority and so it can have lower chances of being pushed out to system memory, experiencing degraded performance. It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images as dedicated and set high priority to them. For example: \code VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; imgCreateInfo.extent.width = 3840; imgCreateInfo.extent.height = 2160; imgCreateInfo.extent.depth = 1; imgCreateInfo.mipLevels = 1; imgCreateInfo.arrayLayers = 1; imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; allocCreateInfo.priority = 1.0f; VkImage img; VmaAllocation alloc; vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); \endcode `priority` member is ignored in the following situations: - Allocations created in custom pools: They inherit the priority, along with all other allocation parameters from the parameters passed in #VmaPoolCreateInfo when the pool was created. - Allocations created in default pools: They inherit the priority from the parameters VMA used when creating default pools, which means `priority == 0.5f`. \page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory VK_AMD_device_coherent_memory is a device extension that enables access to additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for allocation of buffers intended for writing "breadcrumb markers" in between passes or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases. When the extension is available but has not been enabled, Vulkan physical device still exposes those memory types, but their usage is forbidden. VMA automatically takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt to allocate memory of such type is made. If you want to use this extension in connection with VMA, follow these steps: \section vk_amd_device_coherent_memory_initialization Initialization 1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory". 2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned. Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true. 3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. 4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`. 5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT to VmaAllocatorCreateInfo::flags. \section vk_amd_device_coherent_memory_usage Usage After following steps described above, you can create VMA allocations and custom pools out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible devices. There are multiple ways to do it, for example: - You can request or prefer to allocate out of such memory types by adding `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage. - If you manually found memory type index to use for this purpose, force allocation from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`. \section vk_amd_device_coherent_memory_more_information More information To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html) Example use of this extension can be found in the code of the sample and test suite accompanying this library. \page vk_khr_external_memory_win32 VK_KHR_external_memory_win32 On Windows, the VK_KHR_external_memory_win32 device extension allows exporting a Win32 `HANDLE` of a `VkDeviceMemory` block, to be able to reference the memory on other Vulkan logical devices or instances, in multiple processes, and/or in multiple APIs. VMA offers support for it. \section vk_khr_external_memory_win32_initialization Initialization 1) Make sure the extension is defined in the code by including following header before including VMA: \code #include \endcode 2) Check if "VK_KHR_external_memory_win32" is available among device extensions. Enable it when creating the `VkDevice` object. 3) Enable the usage of this extension in VMA by setting flag #VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT when calling vmaCreateAllocator(). 4) Make sure that VMA has access to the `vkGetMemoryWin32HandleKHR` function by either enabling `VMA_DYNAMIC_VULKAN_FUNCTIONS` macro or setting VmaVulkanFunctions::vkGetMemoryWin32HandleKHR explicitly. For more information, see \ref quick_start_initialization_importing_vulkan_functions. \section vk_khr_external_memory_win32_preparations Preparations You can find example usage among tests, in file "Tests.cpp", function `TestWin32Handles()`. To use the extenion, buffers need to be created with `VkExternalMemoryBufferCreateInfoKHR` attached to their `pNext` chain, and memory allocations need to be made with `VkExportMemoryAllocateInfoKHR` attached to their `pNext` chain. To make use of them, you need to use \ref custom_memory_pools. Example: \code // Define an example buffer and allocation parameters. VkExternalMemoryBufferCreateInfoKHR externalMemBufCreateInfo = { VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR, nullptr, VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT }; VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; exampleBufCreateInfo.size = 0x10000; // Doesn't matter here. exampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; exampleBufCreateInfo.pNext = &externalMemBufCreateInfo; VmaAllocationCreateInfo exampleAllocCreateInfo = {}; exampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; // Find memory type index to use for the custom pool. uint32_t memTypeIndex; VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_Allocator, &exampleBufCreateInfo, &exampleAllocCreateInfo, &memTypeIndex); // Check res... // Create a custom pool. constexpr static VkExportMemoryAllocateInfoKHR exportMemAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR, nullptr, VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT }; VmaPoolCreateInfo poolCreateInfo = {}; poolCreateInfo.memoryTypeIndex = memTypeIndex; poolCreateInfo.pMemoryAllocateNext = (void*)&exportMemAllocInfo; VmaPool pool; res = vmaCreatePool(g_Allocator, &poolCreateInfo, &pool); // Check res... // YOUR OTHER CODE COMES HERE.... // At the end, don't forget to destroy it! vmaDestroyPool(g_Allocator, pool); \endcode Note that the structure passed as VmaPoolCreateInfo::pMemoryAllocateNext must remain alive and unchanged for the whole lifetime of the custom pool, because it will be used when the pool allocates a new device memory block. No copy is made internally. This is why variable `exportMemAllocInfo` is defined as `static`. \section vk_khr_external_memory_win32_memory_allocation Memory allocation Finally, you can create a buffer with an allocation out of the custom pool. The buffer should use same flags as the sample buffer used to find the memory type. It should also specify `VkExternalMemoryBufferCreateInfoKHR` in its `pNext` chain. \code VkExternalMemoryBufferCreateInfoKHR externalMemBufCreateInfo = { VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR, nullptr, VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT }; VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; bufCreateInfo.size = // Your desired buffer size. bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; bufCreateInfo.pNext = &externalMemBufCreateInfo; VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.pool = pool; // It is enough to set this one member. VkBuffer buf; VmaAllocation alloc; res = vmaCreateBuffer(g_Allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); // Check res... // YOUR OTHER CODE COMES HERE.... // At the end, don't forget to destroy it! vmaDestroyBuffer(g_Allocator, buf, alloc); \endcode If you need each allocation to have its own device memory block and start at offset 0, you can still do by using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag. It works also with custom pools. \section vk_khr_external_memory_win32_exporting_win32_handle Exporting Win32 handle After the allocation is created, you can acquire a Win32 `HANDLE` to the `VkDeviceMemory` block it belongs to. VMA function vmaGetMemoryWin32Handle() is a replacement of the Vulkan function `vkGetMemoryWin32HandleKHR`. \code HANDLE handle; res = vmaGetMemoryWin32Handle(g_Allocator, alloc, nullptr, &handle); // Check res... // YOUR OTHER CODE COMES HERE.... // At the end, you must close the handle. CloseHandle(handle); \endcode Documentation of the VK_KHR_external_memory_win32 extension states that: > If handleType is defined as an NT handle, vkGetMemoryWin32HandleKHR must be called no more than once for each valid unique combination of memory and handleType. This is ensured automatically inside VMA. The library fetches the handle on first use, remembers it internally, and closes it when the memory block or dedicated allocation is destroyed. Every time you call vmaGetMemoryWin32Handle(), VMA calls `DuplicateHandle` and returns a new handle that you need to close. For further information, please check documentation of the vmaGetMemoryWin32Handle() function. \page enabling_buffer_device_address Enabling buffer device address Device extension VK_KHR_buffer_device_address allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code. It has been promoted to core Vulkan 1.2. If you want to use this feature in connection with VMA, follow these steps: \section enabling_buffer_device_address_initialization Initialization 1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device. Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_KHR_buffer_device_address". 2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned. Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true. 3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add "VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. 4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`. 5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT to VmaAllocatorCreateInfo::flags. \section enabling_buffer_device_address_usage Usage After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA. The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to allocated memory blocks wherever it might be needed. Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`. The second part of this functionality related to "capture and replay" is not supported, as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage. \section enabling_buffer_device_address_more_information More information To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address) Example use of this extension can be found in the code of the sample and test suite accompanying this library. \page general_considerations General considerations \section general_considerations_thread_safety Thread safety - The library has no global state, so separate #VmaAllocator objects can be used independently. There should be no need to create multiple such objects though - one per `VkDevice` is enough. - By default, all calls to functions that take #VmaAllocator as first parameter are safe to call from multiple threads simultaneously because they are synchronized internally when needed. This includes allocation and deallocation from default memory pool, as well as custom #VmaPool. - When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT flag, calls to functions that take such #VmaAllocator object must be synchronized externally. - Access to a #VmaAllocation object must be externally synchronized. For example, you must not call vmaGetAllocationInfo() and vmaMapMemory() from different threads at the same time if you pass the same #VmaAllocation object to these functions. - #VmaVirtualBlock is not safe to be used from multiple threads simultaneously. \section general_considerations_versioning_and_compatibility Versioning and compatibility The library uses [**Semantic Versioning**](https://semver.org/), which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where: - Incremented Patch version means a release is backward- and forward-compatible, introducing only some internal improvements, bug fixes, optimizations etc. or changes that are out of scope of the official API described in this documentation. - Incremented Minor version means a release is backward-compatible, so existing code that uses the library should continue to work, while some new symbols could have been added: new structures, functions, new values in existing enums and bit flags, new structure members, but not new function parameters. - Incrementing Major version means a release could break some backward compatibility. All changes between official releases are documented in file "CHANGELOG.md". \warning Backward compatibility is considered on the level of C++ source code, not binary linkage. Adding new members to existing structures is treated as backward compatible if initializing the new members to binary zero results in the old behavior. You should always fully initialize all library structures to zeros and not rely on their exact binary size. \section general_considerations_validation_layer_warnings Validation layer warnings When using this library, you can meet following types of warnings issued by Vulkan validation layer. They don't necessarily indicate a bug, so you may need to just ignore them. - *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.* - It happens when VK_KHR_dedicated_allocation extension is enabled. `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it. - *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.* - It happens when you map a buffer or image, because the library maps entire `VkDeviceMemory` block, where different types of images and buffers may end up together, especially on GPUs with unified memory like Intel. - *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.* - It may happen when you use [defragmentation](@ref defragmentation). \section general_considerations_allocation_algorithm Allocation algorithm The library uses following algorithm for allocation, in order: -# Try to find free range of memory in existing blocks. -# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size. -# If failed, try to create such block with size / 2, size / 4, size / 8. -# If failed, try to allocate separate `VkDeviceMemory` for this allocation, just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. -# If failed, choose other memory type that meets the requirements specified in VmaAllocationCreateInfo and go to point 1. -# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. \section general_considerations_features_not_supported Features not supported Features deliberately excluded from the scope of this library: -# **Data transfer.** Uploading (streaming) and downloading data of buffers and images between CPU and GPU memory and related synchronization is responsibility of the user. Defining some "texture" object that would automatically stream its data from a staging copy in CPU memory to GPU memory would rather be a feature of another, higher-level library implemented on top of VMA. VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory. -# **Recreation of buffers and images.** Although the library has functions for buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to recreate these objects yourself after defragmentation. That is because the big structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in #VmaAllocation object. -# **Handling CPU memory allocation failures.** When dynamically creating small C++ objects in CPU memory (not Vulkan memory), allocation failures are not checked and handled gracefully, because that would complicate code significantly and is usually not needed in desktop PC applications anyway. Success of an allocation is just checked with an assert. -# **Code free of any compiler warnings.** Maintaining the library to compile and work correctly on so many different platforms is hard enough. Being free of any warnings, on any version of any compiler, is simply not feasible. There are many preprocessor macros that make some variables unused, function parameters unreferenced, or conditional expressions constant in some configurations. The code of this library should not be bigger or more complicated just to silence these warnings. It is recommended to disable such warnings instead. -# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but are not going to be included into this repository. */ ================================================ FILE: deps/vulkan-headers/vulkan/vk_platform.h ================================================ // // File: vk_platform.h // /* ** Copyright 2014-2022 The Khronos Group Inc. ** ** SPDX-License-Identifier: Apache-2.0 */ #ifndef VK_PLATFORM_H_ #define VK_PLATFORM_H_ #ifdef __cplusplus extern "C" { #endif // __cplusplus /* *************************************************************************************************** * Platform-specific directives and type declarations *************************************************************************************************** */ /* Platform-specific calling convention macros. * * Platforms should define these so that Vulkan clients call Vulkan commands * with the same calling conventions that the Vulkan implementation expects. * * VKAPI_ATTR - Placed before the return type in function declarations. * Useful for C++11 and GCC/Clang-style function attribute syntax. * VKAPI_CALL - Placed after the return type in function declarations. * Useful for MSVC-style calling convention syntax. * VKAPI_PTR - Placed between the '(' and '*' in function pointer types. * * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void); * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void); */ #if defined(_WIN32) // On Windows, Vulkan commands use the stdcall convention #define VKAPI_ATTR #define VKAPI_CALL __stdcall #define VKAPI_PTR VKAPI_CALL #elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7 #error "Vulkan is not supported for the 'armeabi' NDK ABI" #elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE) // On Android 32-bit ARM targets, Vulkan functions use the "hardfloat" // calling convention, i.e. float parameters are passed in registers. This // is true even if the rest of the application passes floats on the stack, // as it does by default when compiling for the armeabi-v7a NDK ABI. #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp"))) #define VKAPI_CALL #define VKAPI_PTR VKAPI_ATTR #else // On other platforms, use the default calling convention #define VKAPI_ATTR #define VKAPI_CALL #define VKAPI_PTR #endif #if !defined(VK_NO_STDDEF_H) #include #endif // !defined(VK_NO_STDDEF_H) #if !defined(VK_NO_STDINT_H) #if defined(_MSC_VER) && (_MSC_VER < 1600) typedef signed __int8 int8_t; typedef unsigned __int8 uint8_t; typedef signed __int16 int16_t; typedef unsigned __int16 uint16_t; typedef signed __int32 int32_t; typedef unsigned __int32 uint32_t; typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; #else #include #endif #endif // !defined(VK_NO_STDINT_H) #ifdef __cplusplus } // extern "C" #endif // __cplusplus #endif ================================================ FILE: deps/vulkan-headers/vulkan/vulkan.h ================================================ #ifndef VULKAN_H_ #define VULKAN_H_ 1 /* ** Copyright 2015-2022 The Khronos Group Inc. ** ** SPDX-License-Identifier: Apache-2.0 */ #include "vk_platform.h" #include "vulkan_core.h" #ifdef VK_USE_PLATFORM_ANDROID_KHR #include "vulkan_android.h" #endif #ifdef VK_USE_PLATFORM_FUCHSIA #include #include "vulkan_fuchsia.h" #endif #ifdef VK_USE_PLATFORM_IOS_MVK #include "vulkan_ios.h" #endif #ifdef VK_USE_PLATFORM_MACOS_MVK #include "vulkan_macos.h" #endif #ifdef VK_USE_PLATFORM_METAL_EXT #include "vulkan_metal.h" #endif #ifdef VK_USE_PLATFORM_VI_NN #include "vulkan_vi.h" #endif #ifdef VK_USE_PLATFORM_WAYLAND_KHR #include #include "vulkan_wayland.h" #endif #ifdef VK_USE_PLATFORM_WIN32_KHR #include #include "vulkan_win32.h" #endif #ifdef VK_USE_PLATFORM_XCB_KHR #include #include "vulkan_xcb.h" #endif #ifdef VK_USE_PLATFORM_XLIB_KHR #include #include "vulkan_xlib.h" #endif #ifdef VK_USE_PLATFORM_DIRECTFB_EXT #include #include "vulkan_directfb.h" #endif #ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT #include #include #include "vulkan_xlib_xrandr.h" #endif #ifdef VK_USE_PLATFORM_GGP #include #include "vulkan_ggp.h" #endif #ifdef VK_USE_PLATFORM_SCREEN_QNX #include #include "vulkan_screen.h" #endif #ifdef VK_ENABLE_BETA_EXTENSIONS #include "vulkan_beta.h" #endif #endif // VULKAN_H_ ================================================ FILE: deps/vulkan-headers/vulkan/vulkan_core.h ================================================ #ifndef VULKAN_CORE_H_ #define VULKAN_CORE_H_ 1 /* ** Copyright 2015-2022 The Khronos Group Inc. ** ** SPDX-License-Identifier: Apache-2.0 */ /* ** This header is generated from the Khronos Vulkan XML API Registry. ** */ #ifdef __cplusplus extern "C" { #endif #define VK_VERSION_1_0 1 #include "vk_platform.h" #define VK_DEFINE_HANDLE(object) typedef struct object##_T* object; #ifndef VK_USE_64_BIT_PTR_DEFINES #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) #define VK_USE_64_BIT_PTR_DEFINES 1 #else #define VK_USE_64_BIT_PTR_DEFINES 0 #endif #endif #ifndef VK_DEFINE_NON_DISPATCHABLE_HANDLE #if (VK_USE_64_BIT_PTR_DEFINES==1) #if (defined(__cplusplus) && (__cplusplus >= 201103L)) || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201103L)) #define VK_NULL_HANDLE nullptr #else #define VK_NULL_HANDLE ((void*)0) #endif #else #define VK_NULL_HANDLE 0ULL #endif #endif #ifndef VK_NULL_HANDLE #define VK_NULL_HANDLE 0 #endif #ifndef VK_DEFINE_NON_DISPATCHABLE_HANDLE #if (VK_USE_64_BIT_PTR_DEFINES==1) #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object; #else #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object; #endif #endif // DEPRECATED: This define is deprecated. VK_MAKE_API_VERSION should be used instead. #define VK_MAKE_VERSION(major, minor, patch) \ ((((uint32_t)(major)) << 22) | (((uint32_t)(minor)) << 12) | ((uint32_t)(patch))) // DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead. //#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 0) // Patch version should always be set to 0 #define VK_MAKE_API_VERSION(variant, major, minor, patch) \ ((((uint32_t)(variant)) << 29) | (((uint32_t)(major)) << 22) | (((uint32_t)(minor)) << 12) | ((uint32_t)(patch))) // Vulkan 1.0 version number #define VK_API_VERSION_1_0 VK_MAKE_API_VERSION(0, 1, 0, 0)// Patch version should always be set to 0 // Version of this file #define VK_HEADER_VERSION 224 // Complete version of this file #define VK_HEADER_VERSION_COMPLETE VK_MAKE_API_VERSION(0, 1, 3, VK_HEADER_VERSION) // DEPRECATED: This define is deprecated. VK_API_VERSION_MAJOR should be used instead. #define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22) // DEPRECATED: This define is deprecated. VK_API_VERSION_MINOR should be used instead. #define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3FFU) // DEPRECATED: This define is deprecated. VK_API_VERSION_PATCH should be used instead. #define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xFFFU) #define VK_API_VERSION_VARIANT(version) ((uint32_t)(version) >> 29) #define VK_API_VERSION_MAJOR(version) (((uint32_t)(version) >> 22) & 0x7FU) #define VK_API_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3FFU) #define VK_API_VERSION_PATCH(version) ((uint32_t)(version) & 0xFFFU) typedef uint32_t VkBool32; typedef uint64_t VkDeviceAddress; typedef uint64_t VkDeviceSize; typedef uint32_t VkFlags; typedef uint32_t VkSampleMask; VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage) VK_DEFINE_HANDLE(VkInstance) VK_DEFINE_HANDLE(VkPhysicalDevice) VK_DEFINE_HANDLE(VkDevice) VK_DEFINE_HANDLE(VkQueue) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore) VK_DEFINE_HANDLE(VkCommandBuffer) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool) #define VK_ATTACHMENT_UNUSED (~0U) #define VK_FALSE 0U #define VK_LOD_CLAMP_NONE 1000.0F #define VK_QUEUE_FAMILY_IGNORED (~0U) #define VK_REMAINING_ARRAY_LAYERS (~0U) #define VK_REMAINING_MIP_LEVELS (~0U) #define VK_SUBPASS_EXTERNAL (~0U) #define VK_TRUE 1U #define VK_WHOLE_SIZE (~0ULL) #define VK_MAX_MEMORY_TYPES 32U #define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256U #define VK_UUID_SIZE 16U #define VK_MAX_EXTENSION_NAME_SIZE 256U #define VK_MAX_DESCRIPTION_SIZE 256U #define VK_MAX_MEMORY_HEAPS 16U typedef enum VkResult { VK_SUCCESS = 0, VK_NOT_READY = 1, VK_TIMEOUT = 2, VK_EVENT_SET = 3, VK_EVENT_RESET = 4, VK_INCOMPLETE = 5, VK_ERROR_OUT_OF_HOST_MEMORY = -1, VK_ERROR_OUT_OF_DEVICE_MEMORY = -2, VK_ERROR_INITIALIZATION_FAILED = -3, VK_ERROR_DEVICE_LOST = -4, VK_ERROR_MEMORY_MAP_FAILED = -5, VK_ERROR_LAYER_NOT_PRESENT = -6, VK_ERROR_EXTENSION_NOT_PRESENT = -7, VK_ERROR_FEATURE_NOT_PRESENT = -8, VK_ERROR_INCOMPATIBLE_DRIVER = -9, VK_ERROR_TOO_MANY_OBJECTS = -10, VK_ERROR_FORMAT_NOT_SUPPORTED = -11, VK_ERROR_FRAGMENTED_POOL = -12, VK_ERROR_UNKNOWN = -13, VK_ERROR_OUT_OF_POOL_MEMORY = -1000069000, VK_ERROR_INVALID_EXTERNAL_HANDLE = -1000072003, VK_ERROR_FRAGMENTATION = -1000161000, VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS = -1000257000, VK_PIPELINE_COMPILE_REQUIRED = 1000297000, VK_ERROR_SURFACE_LOST_KHR = -1000000000, VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001, VK_SUBOPTIMAL_KHR = 1000001003, VK_ERROR_OUT_OF_DATE_KHR = -1000001004, VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001, VK_ERROR_VALIDATION_FAILED_EXT = -1000011001, VK_ERROR_INVALID_SHADER_NV = -1000012000, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_ERROR_IMAGE_USAGE_NOT_SUPPORTED_KHR = -1000023000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_ERROR_VIDEO_PICTURE_LAYOUT_NOT_SUPPORTED_KHR = -1000023001, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_ERROR_VIDEO_PROFILE_OPERATION_NOT_SUPPORTED_KHR = -1000023002, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_ERROR_VIDEO_PROFILE_FORMAT_NOT_SUPPORTED_KHR = -1000023003, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_ERROR_VIDEO_PROFILE_CODEC_NOT_SUPPORTED_KHR = -1000023004, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_ERROR_VIDEO_STD_VERSION_NOT_SUPPORTED_KHR = -1000023005, #endif VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT = -1000158000, VK_ERROR_NOT_PERMITTED_KHR = -1000174001, VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT = -1000255000, VK_THREAD_IDLE_KHR = 1000268000, VK_THREAD_DONE_KHR = 1000268001, VK_OPERATION_DEFERRED_KHR = 1000268002, VK_OPERATION_NOT_DEFERRED_KHR = 1000268003, VK_ERROR_COMPRESSION_EXHAUSTED_EXT = -1000338000, VK_ERROR_OUT_OF_POOL_MEMORY_KHR = VK_ERROR_OUT_OF_POOL_MEMORY, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = VK_ERROR_INVALID_EXTERNAL_HANDLE, VK_ERROR_FRAGMENTATION_EXT = VK_ERROR_FRAGMENTATION, VK_ERROR_NOT_PERMITTED_EXT = VK_ERROR_NOT_PERMITTED_KHR, VK_ERROR_INVALID_DEVICE_ADDRESS_EXT = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, VK_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED, VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED, VK_RESULT_MAX_ENUM = 0x7FFFFFFF } VkResult; typedef enum VkStructureType { VK_STRUCTURE_TYPE_APPLICATION_INFO = 0, VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1, VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2, VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3, VK_STRUCTURE_TYPE_SUBMIT_INFO = 4, VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5, VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6, VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7, VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8, VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9, VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10, VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11, VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12, VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13, VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14, VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15, VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16, VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17, VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18, VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19, VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20, VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21, VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22, VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23, VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24, VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25, VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26, VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27, VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28, VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29, VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30, VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32, VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34, VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35, VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36, VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37, VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38, VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39, VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40, VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41, VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42, VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43, VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44, VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45, VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46, VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47, VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES = 1000094000, VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO = 1000157000, VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO = 1000157001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES = 1000083000, VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS = 1000127000, VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO = 1000127001, VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO = 1000060000, VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO = 1000060003, VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO = 1000060004, VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO = 1000060005, VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO = 1000060006, VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO = 1000060013, VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO = 1000060014, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES = 1000070000, VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO = 1000070001, VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2 = 1000146000, VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2 = 1000146001, VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2 = 1000146002, VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2 = 1000146003, VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2 = 1000146004, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 = 1000059000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 1000059001, VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2 = 1000059002, VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2 = 1000059003, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2 = 1000059004, VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2 = 1000059005, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2 = 1000059006, VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2 = 1000059007, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2 = 1000059008, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES = 1000117000, VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO = 1000117001, VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO = 1000117002, VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO = 1000117003, VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO = 1000053000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES = 1000053001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES = 1000053002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES = 1000120000, VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO = 1000145000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES = 1000145001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES = 1000145002, VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2 = 1000145003, VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO = 1000156000, VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO = 1000156001, VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO = 1000156002, VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO = 1000156003, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES = 1000156004, VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES = 1000156005, VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO = 1000085000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO = 1000071000, VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES = 1000071001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO = 1000071002, VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES = 1000071003, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES = 1000071004, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO = 1000072000, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO = 1000072001, VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO = 1000072002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO = 1000112000, VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES = 1000112001, VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO = 1000113000, VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO = 1000077000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO = 1000076000, VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES = 1000076001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 1000168000, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT = 1000168001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES = 1000063000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES = 49, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES = 50, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES = 51, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES = 52, VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO = 1000147000, VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2 = 1000109000, VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2 = 1000109001, VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2 = 1000109002, VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2 = 1000109003, VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2 = 1000109004, VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO = 1000109005, VK_STRUCTURE_TYPE_SUBPASS_END_INFO = 1000109006, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES = 1000177000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES = 1000196000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES = 1000180000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES = 1000082000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES = 1000197000, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO = 1000161000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES = 1000161001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES = 1000161002, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO = 1000161003, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT = 1000161004, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES = 1000199000, VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE = 1000199001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES = 1000221000, VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO = 1000246000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES = 1000130000, VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO = 1000130001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES = 1000211000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES = 1000108000, VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO = 1000108001, VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO = 1000108002, VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO = 1000108003, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES = 1000253000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES = 1000175000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES = 1000241000, VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT = 1000241001, VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT = 1000241002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES = 1000261000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES = 1000207000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES = 1000207001, VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO = 1000207002, VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO = 1000207003, VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO = 1000207004, VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO = 1000207005, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES = 1000257000, VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO = 1000244001, VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO = 1000257002, VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO = 1000257003, VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO = 1000257004, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES = 53, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES = 54, VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO = 1000192000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES = 1000215000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES = 1000245000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES = 1000276000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES = 1000295000, VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO = 1000295001, VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO = 1000295002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES = 1000297000, VK_STRUCTURE_TYPE_MEMORY_BARRIER_2 = 1000314000, VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2 = 1000314001, VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2 = 1000314002, VK_STRUCTURE_TYPE_DEPENDENCY_INFO = 1000314003, VK_STRUCTURE_TYPE_SUBMIT_INFO_2 = 1000314004, VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO = 1000314005, VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO = 1000314006, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES = 1000314007, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES = 1000325000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES = 1000335000, VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2 = 1000337000, VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2 = 1000337001, VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2 = 1000337002, VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2 = 1000337003, VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2 = 1000337004, VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2 = 1000337005, VK_STRUCTURE_TYPE_BUFFER_COPY_2 = 1000337006, VK_STRUCTURE_TYPE_IMAGE_COPY_2 = 1000337007, VK_STRUCTURE_TYPE_IMAGE_BLIT_2 = 1000337008, VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2 = 1000337009, VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2 = 1000337010, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES = 1000225000, VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO = 1000225001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES = 1000225002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES = 1000138000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES = 1000138001, VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK = 1000138002, VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO = 1000138003, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES = 1000066000, VK_STRUCTURE_TYPE_RENDERING_INFO = 1000044000, VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO = 1000044001, VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO = 1000044002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES = 1000044003, VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO = 1000044004, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES = 1000280000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES = 1000280001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES = 1000281001, VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3 = 1000360000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES = 1000413000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES = 1000413001, VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS = 1000413002, VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS = 1000413003, VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000, VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001, VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007, VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR = 1000060008, VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR = 1000060009, VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR = 1000060010, VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR = 1000060011, VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR = 1000060012, VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000, VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001, VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000, VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000, VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000, VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000, VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000, VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000, VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000, VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD = 1000018000, VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT = 1000022000, VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001, VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_PROFILE_KHR = 1000023000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_CAPABILITIES_KHR = 1000023001, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_KHR = 1000023002, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_GET_MEMORY_PROPERTIES_KHR = 1000023003, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_BIND_MEMORY_KHR = 1000023004, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_SESSION_CREATE_INFO_KHR = 1000023005, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_CREATE_INFO_KHR = 1000023006, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_UPDATE_INFO_KHR = 1000023007, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_BEGIN_CODING_INFO_KHR = 1000023008, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_END_CODING_INFO_KHR = 1000023009, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_CODING_CONTROL_INFO_KHR = 1000023010, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_REFERENCE_SLOT_KHR = 1000023011, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_QUEUE_FAMILY_PROPERTIES_2_KHR = 1000023012, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_PROFILES_KHR = 1000023013, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_FORMAT_INFO_KHR = 1000023014, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_FORMAT_PROPERTIES_KHR = 1000023015, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_QUEUE_FAMILY_QUERY_RESULT_STATUS_PROPERTIES_2_KHR = 1000023016, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_INFO_KHR = 1000024000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_CAPABILITIES_KHR = 1000024001, #endif VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000, VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001, VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT = 1000028000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT = 1000028001, VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT = 1000028002, VK_STRUCTURE_TYPE_CU_MODULE_CREATE_INFO_NVX = 1000029000, VK_STRUCTURE_TYPE_CU_FUNCTION_CREATE_INFO_NVX = 1000029001, VK_STRUCTURE_TYPE_CU_LAUNCH_INFO_NVX = 1000029002, VK_STRUCTURE_TYPE_IMAGE_VIEW_HANDLE_INFO_NVX = 1000030000, VK_STRUCTURE_TYPE_IMAGE_VIEW_ADDRESS_PROPERTIES_NVX = 1000030001, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_CAPABILITIES_EXT = 1000038000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000038001, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT = 1000038002, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_VCL_FRAME_INFO_EXT = 1000038003, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_DPB_SLOT_INFO_EXT = 1000038004, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_NALU_SLICE_EXT = 1000038005, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_EMIT_PICTURE_PARAMETERS_EXT = 1000038006, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_PROFILE_EXT = 1000038007, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_RATE_CONTROL_INFO_EXT = 1000038008, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_RATE_CONTROL_LAYER_INFO_EXT = 1000038009, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_REFERENCE_LISTS_EXT = 1000038010, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_CAPABILITIES_EXT = 1000039000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000039001, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_ADD_INFO_EXT = 1000039002, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_VCL_FRAME_INFO_EXT = 1000039003, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_DPB_SLOT_INFO_EXT = 1000039004, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_NALU_SLICE_SEGMENT_EXT = 1000039005, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_EMIT_PICTURE_PARAMETERS_EXT = 1000039006, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_PROFILE_EXT = 1000039007, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_REFERENCE_LISTS_EXT = 1000039008, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_RATE_CONTROL_INFO_EXT = 1000039009, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_RATE_CONTROL_LAYER_INFO_EXT = 1000039010, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_CAPABILITIES_EXT = 1000040000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PICTURE_INFO_EXT = 1000040001, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_MVC_EXT = 1000040002, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PROFILE_EXT = 1000040003, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000040004, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT = 1000040005, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_DPB_SLOT_INFO_EXT = 1000040006, #endif VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD = 1000041000, VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR = 1000044006, VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_INFO_EXT = 1000044007, VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_AMD = 1000044008, VK_STRUCTURE_TYPE_MULTIVIEW_PER_VIEW_ATTRIBUTES_INFO_NVX = 1000044009, VK_STRUCTURE_TYPE_STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP = 1000049000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV = 1000050000, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000, VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV = 1000056001, VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057000, VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057001, VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000, VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000, VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000, VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT = 1000067000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT = 1000067001, VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT = 1000068000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT = 1000068001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES_EXT = 1000068002, VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000, VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001, VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002, VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR = 1000073003, VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR = 1000074000, VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR = 1000074001, VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR = 1000074002, VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR = 1000075000, VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078000, VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078001, VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR = 1000078002, VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR = 1000078003, VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR = 1000079000, VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR = 1000079001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000, VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT = 1000081000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT = 1000081001, VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT = 1000081002, VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000, VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV = 1000087000, VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT = 1000090000, VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT = 1000091000, VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT = 1000091001, VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT = 1000091002, VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT = 1000091003, VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX = 1000097000, VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV = 1000098000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT = 1000099000, VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT = 1000099001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT = 1000101000, VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT = 1000101001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT = 1000102000, VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT = 1000102001, VK_STRUCTURE_TYPE_HDR_METADATA_EXT = 1000105000, VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR = 1000111000, VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114000, VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114001, VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR = 1000114002, VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR = 1000115000, VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR = 1000115001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR = 1000116000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR = 1000116001, VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR = 1000116002, VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR = 1000116003, VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR = 1000116004, VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR = 1000116005, VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_DESCRIPTION_KHR = 1000116006, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR = 1000119000, VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR = 1000119001, VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR = 1000119002, VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR = 1000121000, VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR = 1000121001, VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR = 1000121002, VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR = 1000121003, VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR = 1000121004, VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK = 1000122000, VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000, VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT = 1000128000, VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT = 1000128001, VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT = 1000128002, VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT = 1000128003, VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT = 1000128004, VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID = 1000129000, VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID = 1000129001, VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID = 1000129002, VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129003, VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129004, VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID = 1000129005, VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_2_ANDROID = 1000129006, VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT = 1000143000, VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT = 1000143001, VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT = 1000143002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT = 1000143003, VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT = 1000143004, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT = 1000148000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT = 1000148001, VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT = 1000148002, VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV = 1000149000, VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR = 1000150007, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR = 1000150000, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR = 1000150002, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR = 1000150003, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR = 1000150004, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR = 1000150005, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR = 1000150006, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR = 1000150009, VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR = 1000150010, VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR = 1000150011, VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR = 1000150012, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR = 1000150013, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR = 1000150014, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR = 1000150017, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR = 1000150020, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR = 1000347000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR = 1000347001, VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR = 1000150015, VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR = 1000150016, VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR = 1000150018, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR = 1000348013, VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV = 1000152000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV = 1000154000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV = 1000154001, VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT = 1000158000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT = 1000158002, VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT = 1000158003, VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT = 1000158004, VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT = 1000158005, VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_2_EXT = 1000158006, VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160000, VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160001, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR = 1000163000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR = 1000163001, #endif VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV = 1000164000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV = 1000164001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV = 1000164002, VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV = 1000164005, VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001, VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003, VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004, VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005, VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006, VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009, VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV = 1000166000, VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV = 1000166001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT = 1000170000, VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT = 1000170001, VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT = 1000178000, VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT = 1000178001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT = 1000178002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR = 1000181000, VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD = 1000183000, VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT = 1000184000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_CAPABILITIES_EXT = 1000187000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000187001, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_ADD_INFO_EXT = 1000187002, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PROFILE_EXT = 1000187003, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PICTURE_INFO_EXT = 1000187004, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_DPB_SLOT_INFO_EXT = 1000187005, #endif VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR = 1000174000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR = 1000388000, VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR = 1000388001, VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000, VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = 1000190002, VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP = 1000191000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV = 1000201000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV = 1000202000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV = 1000202001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV = 1000204000, VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV = 1000205000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV = 1000205002, VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV = 1000206000, VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV = 1000206001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL = 1000209000, VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL = 1000210000, VK_STRUCTURE_TYPE_INITIALIZE_PERFORMANCE_API_INFO_INTEL = 1000210001, VK_STRUCTURE_TYPE_PERFORMANCE_MARKER_INFO_INTEL = 1000210002, VK_STRUCTURE_TYPE_PERFORMANCE_STREAM_MARKER_INFO_INTEL = 1000210003, VK_STRUCTURE_TYPE_PERFORMANCE_OVERRIDE_INFO_INTEL = 1000210004, VK_STRUCTURE_TYPE_PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL = 1000210005, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT = 1000212000, VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD = 1000213000, VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD = 1000213001, VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA = 1000214000, VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT = 1000217000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT = 1000218000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT = 1000218001, VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT = 1000218002, VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR = 1000226000, VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR = 1000226001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR = 1000226002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR = 1000226003, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR = 1000226004, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD = 1000227000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD = 1000229000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT = 1000234000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT = 1000237000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT = 1000238000, VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT = 1000238001, VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR = 1000239000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV = 1000240000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT = 1000244000, VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT = 1000244002, VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT = 1000247000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_WAIT_FEATURES_KHR = 1000248000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV = 1000249000, VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV = 1000250000, VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV = 1000250001, VK_STRUCTURE_TYPE_FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV = 1000250002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT = 1000251000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT = 1000252000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT = 1000254000, VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT = 1000254001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT = 1000254002, VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT = 1000255000, VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT = 1000255002, VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT = 1000255001, VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT = 1000256000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT = 1000259000, VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT = 1000259001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT = 1000259002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT = 1000260000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT = 1000265000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT = 1000267000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR = 1000269000, VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR = 1000269001, VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR = 1000269002, VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR = 1000269003, VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR = 1000269004, VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR = 1000269005, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_2_FEATURES_EXT = 1000273000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV = 1000277000, VK_STRUCTURE_TYPE_GRAPHICS_SHADER_GROUP_CREATE_INFO_NV = 1000277001, VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV = 1000277002, VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_TOKEN_NV = 1000277003, VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV = 1000277004, VK_STRUCTURE_TYPE_GENERATED_COMMANDS_INFO_NV = 1000277005, VK_STRUCTURE_TYPE_GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV = 1000277006, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV = 1000277007, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INHERITED_VIEWPORT_SCISSOR_FEATURES_NV = 1000278000, VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_VIEWPORT_SCISSOR_INFO_NV = 1000278001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT = 1000281000, VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM = 1000282000, VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM = 1000282001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT = 1000284000, VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT = 1000284001, VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT = 1000284002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT = 1000286000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT = 1000286001, VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT = 1000287000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT = 1000287001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT = 1000287002, VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR = 1000290000, VK_STRUCTURE_TYPE_PRESENT_ID_KHR = 1000294000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_ID_FEATURES_KHR = 1000294001, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_INFO_KHR = 1000299000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_INFO_KHR = 1000299001, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_LAYER_INFO_KHR = 1000299002, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_STRUCTURE_TYPE_VIDEO_ENCODE_CAPABILITIES_KHR = 1000299003, #endif VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV = 1000300000, VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV = 1000300001, VK_STRUCTURE_TYPE_EXPORT_METAL_OBJECT_CREATE_INFO_EXT = 1000311000, VK_STRUCTURE_TYPE_EXPORT_METAL_OBJECTS_INFO_EXT = 1000311001, VK_STRUCTURE_TYPE_EXPORT_METAL_DEVICE_INFO_EXT = 1000311002, VK_STRUCTURE_TYPE_EXPORT_METAL_COMMAND_QUEUE_INFO_EXT = 1000311003, VK_STRUCTURE_TYPE_EXPORT_METAL_BUFFER_INFO_EXT = 1000311004, VK_STRUCTURE_TYPE_IMPORT_METAL_BUFFER_INFO_EXT = 1000311005, VK_STRUCTURE_TYPE_EXPORT_METAL_TEXTURE_INFO_EXT = 1000311006, VK_STRUCTURE_TYPE_IMPORT_METAL_TEXTURE_INFO_EXT = 1000311007, VK_STRUCTURE_TYPE_EXPORT_METAL_IO_SURFACE_INFO_EXT = 1000311008, VK_STRUCTURE_TYPE_IMPORT_METAL_IO_SURFACE_INFO_EXT = 1000311009, VK_STRUCTURE_TYPE_EXPORT_METAL_SHARED_EVENT_INFO_EXT = 1000311010, VK_STRUCTURE_TYPE_IMPORT_METAL_SHARED_EVENT_INFO_EXT = 1000311011, VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_2_NV = 1000314008, VK_STRUCTURE_TYPE_CHECKPOINT_DATA_2_NV = 1000314009, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_FEATURES_EXT = 1000320000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_PROPERTIES_EXT = 1000320001, VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT = 1000320002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_FEATURES_AMD = 1000321000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR = 1000203000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_PROPERTIES_KHR = 1000322000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_FEATURES_KHR = 1000323000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV = 1000326000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV = 1000326001, VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV = 1000326002, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_MOTION_TRIANGLES_DATA_NV = 1000327000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MOTION_BLUR_FEATURES_NV = 1000327001, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MOTION_INFO_NV = 1000327002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT = 1000330000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT = 1000332000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT = 1000332001, VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM = 1000333000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_FEATURES_KHR = 1000336000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_FEATURES_EXT = 1000338000, VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_CONTROL_EXT = 1000338001, VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_EXT = 1000338002, VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_EXT = 1000338003, VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_PROPERTIES_EXT = 1000338004, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_FEATURES_EXT = 1000339000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT = 1000340000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_ARM = 1000342000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RGBA10X6_FORMATS_FEATURES_EXT = 1000344000, VK_STRUCTURE_TYPE_DIRECTFB_SURFACE_CREATE_INFO_EXT = 1000346000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE = 1000351000, VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE = 1000351002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_INPUT_DYNAMIC_STATE_FEATURES_EXT = 1000352000, VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT = 1000352001, VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT = 1000352002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT = 1000353000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT = 1000355000, VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT = 1000355001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT = 1000356000, VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA = 1000364000, VK_STRUCTURE_TYPE_MEMORY_ZIRCON_HANDLE_PROPERTIES_FUCHSIA = 1000364001, VK_STRUCTURE_TYPE_MEMORY_GET_ZIRCON_HANDLE_INFO_FUCHSIA = 1000364002, VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_ZIRCON_HANDLE_INFO_FUCHSIA = 1000365000, VK_STRUCTURE_TYPE_SEMAPHORE_GET_ZIRCON_HANDLE_INFO_FUCHSIA = 1000365001, VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA = 1000366000, VK_STRUCTURE_TYPE_IMPORT_MEMORY_BUFFER_COLLECTION_FUCHSIA = 1000366001, VK_STRUCTURE_TYPE_BUFFER_COLLECTION_IMAGE_CREATE_INFO_FUCHSIA = 1000366002, VK_STRUCTURE_TYPE_BUFFER_COLLECTION_PROPERTIES_FUCHSIA = 1000366003, VK_STRUCTURE_TYPE_BUFFER_CONSTRAINTS_INFO_FUCHSIA = 1000366004, VK_STRUCTURE_TYPE_BUFFER_COLLECTION_BUFFER_CREATE_INFO_FUCHSIA = 1000366005, VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA = 1000366006, VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA = 1000366007, VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA = 1000366008, VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA = 1000366009, VK_STRUCTURE_TYPE_SUBPASS_SHADING_PIPELINE_CREATE_INFO_HUAWEI = 1000369000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_FEATURES_HUAWEI = 1000369001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_PROPERTIES_HUAWEI = 1000369002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INVOCATION_MASK_FEATURES_HUAWEI = 1000370000, VK_STRUCTURE_TYPE_MEMORY_GET_REMOTE_ADDRESS_INFO_NV = 1000371000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_RDMA_FEATURES_NV = 1000371001, VK_STRUCTURE_TYPE_PIPELINE_PROPERTIES_IDENTIFIER_EXT = 1000372000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROPERTIES_FEATURES_EXT = 1000372001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_FEATURES_EXT = 1000376000, VK_STRUCTURE_TYPE_SUBPASS_RESOLVE_PERFORMANCE_QUERY_EXT = 1000376001, VK_STRUCTURE_TYPE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_INFO_EXT = 1000376002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT = 1000377000, VK_STRUCTURE_TYPE_SCREEN_SURFACE_CREATE_INFO_QNX = 1000378000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COLOR_WRITE_ENABLE_FEATURES_EXT = 1000381000, VK_STRUCTURE_TYPE_PIPELINE_COLOR_WRITE_CREATE_INFO_EXT = 1000381001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT = 1000382000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MAINTENANCE_1_FEATURES_KHR = 1000386000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT = 1000391000, VK_STRUCTURE_TYPE_IMAGE_VIEW_MIN_LOD_CREATE_INFO_EXT = 1000391001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT = 1000392000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_PROPERTIES_EXT = 1000392001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_2D_VIEW_OF_3D_FEATURES_EXT = 1000393000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BORDER_COLOR_SWIZZLE_FEATURES_EXT = 1000411000, VK_STRUCTURE_TYPE_SAMPLER_BORDER_COLOR_COMPONENT_MAPPING_CREATE_INFO_EXT = 1000411001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT = 1000412000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_SET_HOST_MAPPING_FEATURES_VALVE = 1000420000, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_BINDING_REFERENCE_VALVE = 1000420001, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_HOST_MAPPING_INFO_VALVE = 1000420002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NON_SEAMLESS_CUBE_MAP_FEATURES_EXT = 1000422000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_QCOM = 1000425000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_PROPERTIES_QCOM = 1000425001, VK_STRUCTURE_TYPE_SUBPASS_FRAGMENT_DENSITY_MAP_OFFSET_END_INFO_QCOM = 1000425002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINEAR_COLOR_ATTACHMENT_FEATURES_NV = 1000430000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_FEATURES_EXT = 1000437000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_FEATURES_QCOM = 1000440000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_PROPERTIES_QCOM = 1000440001, VK_STRUCTURE_TYPE_IMAGE_VIEW_SAMPLE_WEIGHT_CREATE_INFO_QCOM = 1000440002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_MERGE_FEEDBACK_FEATURES_EXT = 1000458000, VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_CONTROL_EXT = 1000458001, VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_FEEDBACK_CREATE_INFO_EXT = 1000458002, VK_STRUCTURE_TYPE_RENDER_PASS_SUBPASS_FEEDBACK_CREATE_INFO_EXT = 1000458003, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_FEATURES_EXT = 1000462000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_PROPERTIES_EXT = 1000462001, VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT = 1000462002, VK_STRUCTURE_TYPE_SHADER_MODULE_IDENTIFIER_EXT = 1000462003, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_PROPERTIES_FEATURES_QCOM = 1000484000, VK_STRUCTURE_TYPE_TILE_PROPERTIES_QCOM = 1000484001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_AMIGO_PROFILING_FEATURES_SEC = 1000485000, VK_STRUCTURE_TYPE_AMIGO_PROFILING_SUBMIT_INFO_SEC = 1000485001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES, VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, VK_STRUCTURE_TYPE_RENDERING_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_INFO, VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO, VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES, VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO_KHR = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO, VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_NV = VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_AMD, VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2, VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2, VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO, VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO, VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO, VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO, VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES, VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES, VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO, VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO, VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO, VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2, VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2, VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2, VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2, VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2, VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO, VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_END_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO, VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES, VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES, VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR, VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES, VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES, VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK, VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO, VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2, VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2, VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO, VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO, VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO, VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES, VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES, VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES, VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES, VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES, VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO, VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO, VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO, VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO, VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES, VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES, VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT, VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT, VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_EXT = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES, VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES, VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO, VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO, VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES, VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO, VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES, VK_STRUCTURE_TYPE_MEMORY_BARRIER_2_KHR = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2, VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2, VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2, VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR = VK_STRUCTURE_TYPE_DEPENDENCY_INFO, VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR = VK_STRUCTURE_TYPE_SUBMIT_INFO_2, VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO, VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES, VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2, VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2, VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2, VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2, VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2, VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2, VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR = VK_STRUCTURE_TYPE_BUFFER_COPY_2, VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR = VK_STRUCTURE_TYPE_IMAGE_COPY_2, VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR = VK_STRUCTURE_TYPE_IMAGE_BLIT_2, VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR = VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2, VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2, VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3, VK_STRUCTURE_TYPE_PIPELINE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR, VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES, VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS, VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS, VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF } VkStructureType; typedef enum VkPipelineCacheHeaderVersion { VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1, VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF } VkPipelineCacheHeaderVersion; typedef enum VkImageLayout { VK_IMAGE_LAYOUT_UNDEFINED = 0, VK_IMAGE_LAYOUT_GENERAL = 1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7, VK_IMAGE_LAYOUT_PREINITIALIZED = 8, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL = 1000117000, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL = 1000117001, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL = 1000241000, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL = 1000241001, VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL = 1000241002, VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL = 1000241003, VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL = 1000314000, VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL = 1000314001, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR = 1000024000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_IMAGE_LAYOUT_VIDEO_DECODE_SRC_KHR = 1000024001, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_IMAGE_LAYOUT_VIDEO_DECODE_DPB_KHR = 1000024002, #endif VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR = 1000111000, VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT = 1000218000, VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR = 1000164003, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_IMAGE_LAYOUT_VIDEO_ENCODE_DST_KHR = 1000299000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_IMAGE_LAYOUT_VIDEO_ENCODE_SRC_KHR = 1000299001, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_IMAGE_LAYOUT_VIDEO_ENCODE_DPB_KHR = 1000299002, #endif VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT = 1000339000, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV = VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF } VkImageLayout; typedef enum VkObjectType { VK_OBJECT_TYPE_UNKNOWN = 0, VK_OBJECT_TYPE_INSTANCE = 1, VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2, VK_OBJECT_TYPE_DEVICE = 3, VK_OBJECT_TYPE_QUEUE = 4, VK_OBJECT_TYPE_SEMAPHORE = 5, VK_OBJECT_TYPE_COMMAND_BUFFER = 6, VK_OBJECT_TYPE_FENCE = 7, VK_OBJECT_TYPE_DEVICE_MEMORY = 8, VK_OBJECT_TYPE_BUFFER = 9, VK_OBJECT_TYPE_IMAGE = 10, VK_OBJECT_TYPE_EVENT = 11, VK_OBJECT_TYPE_QUERY_POOL = 12, VK_OBJECT_TYPE_BUFFER_VIEW = 13, VK_OBJECT_TYPE_IMAGE_VIEW = 14, VK_OBJECT_TYPE_SHADER_MODULE = 15, VK_OBJECT_TYPE_PIPELINE_CACHE = 16, VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17, VK_OBJECT_TYPE_RENDER_PASS = 18, VK_OBJECT_TYPE_PIPELINE = 19, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20, VK_OBJECT_TYPE_SAMPLER = 21, VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22, VK_OBJECT_TYPE_DESCRIPTOR_SET = 23, VK_OBJECT_TYPE_FRAMEBUFFER = 24, VK_OBJECT_TYPE_COMMAND_POOL = 25, VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION = 1000156000, VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE = 1000085000, VK_OBJECT_TYPE_PRIVATE_DATA_SLOT = 1000295000, VK_OBJECT_TYPE_SURFACE_KHR = 1000000000, VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000, VK_OBJECT_TYPE_DISPLAY_KHR = 1000002000, VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1000002001, VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_OBJECT_TYPE_VIDEO_SESSION_KHR = 1000023000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_OBJECT_TYPE_VIDEO_SESSION_PARAMETERS_KHR = 1000023001, #endif VK_OBJECT_TYPE_CU_MODULE_NVX = 1000029000, VK_OBJECT_TYPE_CU_FUNCTION_NVX = 1000029001, VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000128000, VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000, VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000, VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL = 1000210000, VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR = 1000268000, VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV = 1000277000, VK_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA = 1000366000, VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT = VK_OBJECT_TYPE_PRIVATE_DATA_SLOT, VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF } VkObjectType; typedef enum VkVendorId { VK_VENDOR_ID_VIV = 0x10001, VK_VENDOR_ID_VSI = 0x10002, VK_VENDOR_ID_KAZAN = 0x10003, VK_VENDOR_ID_CODEPLAY = 0x10004, VK_VENDOR_ID_MESA = 0x10005, VK_VENDOR_ID_POCL = 0x10006, VK_VENDOR_ID_MAX_ENUM = 0x7FFFFFFF } VkVendorId; typedef enum VkSystemAllocationScope { VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1, VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4, VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF } VkSystemAllocationScope; typedef enum VkInternalAllocationType { VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0, VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF } VkInternalAllocationType; typedef enum VkFormat { VK_FORMAT_UNDEFINED = 0, VK_FORMAT_R4G4_UNORM_PACK8 = 1, VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2, VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3, VK_FORMAT_R5G6B5_UNORM_PACK16 = 4, VK_FORMAT_B5G6R5_UNORM_PACK16 = 5, VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6, VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7, VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8, VK_FORMAT_R8_UNORM = 9, VK_FORMAT_R8_SNORM = 10, VK_FORMAT_R8_USCALED = 11, VK_FORMAT_R8_SSCALED = 12, VK_FORMAT_R8_UINT = 13, VK_FORMAT_R8_SINT = 14, VK_FORMAT_R8_SRGB = 15, VK_FORMAT_R8G8_UNORM = 16, VK_FORMAT_R8G8_SNORM = 17, VK_FORMAT_R8G8_USCALED = 18, VK_FORMAT_R8G8_SSCALED = 19, VK_FORMAT_R8G8_UINT = 20, VK_FORMAT_R8G8_SINT = 21, VK_FORMAT_R8G8_SRGB = 22, VK_FORMAT_R8G8B8_UNORM = 23, VK_FORMAT_R8G8B8_SNORM = 24, VK_FORMAT_R8G8B8_USCALED = 25, VK_FORMAT_R8G8B8_SSCALED = 26, VK_FORMAT_R8G8B8_UINT = 27, VK_FORMAT_R8G8B8_SINT = 28, VK_FORMAT_R8G8B8_SRGB = 29, VK_FORMAT_B8G8R8_UNORM = 30, VK_FORMAT_B8G8R8_SNORM = 31, VK_FORMAT_B8G8R8_USCALED = 32, VK_FORMAT_B8G8R8_SSCALED = 33, VK_FORMAT_B8G8R8_UINT = 34, VK_FORMAT_B8G8R8_SINT = 35, VK_FORMAT_B8G8R8_SRGB = 36, VK_FORMAT_R8G8B8A8_UNORM = 37, VK_FORMAT_R8G8B8A8_SNORM = 38, VK_FORMAT_R8G8B8A8_USCALED = 39, VK_FORMAT_R8G8B8A8_SSCALED = 40, VK_FORMAT_R8G8B8A8_UINT = 41, VK_FORMAT_R8G8B8A8_SINT = 42, VK_FORMAT_R8G8B8A8_SRGB = 43, VK_FORMAT_B8G8R8A8_UNORM = 44, VK_FORMAT_B8G8R8A8_SNORM = 45, VK_FORMAT_B8G8R8A8_USCALED = 46, VK_FORMAT_B8G8R8A8_SSCALED = 47, VK_FORMAT_B8G8R8A8_UINT = 48, VK_FORMAT_B8G8R8A8_SINT = 49, VK_FORMAT_B8G8R8A8_SRGB = 50, VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51, VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52, VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53, VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54, VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55, VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56, VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57, VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58, VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59, VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60, VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61, VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62, VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63, VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64, VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65, VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66, VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67, VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68, VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69, VK_FORMAT_R16_UNORM = 70, VK_FORMAT_R16_SNORM = 71, VK_FORMAT_R16_USCALED = 72, VK_FORMAT_R16_SSCALED = 73, VK_FORMAT_R16_UINT = 74, VK_FORMAT_R16_SINT = 75, VK_FORMAT_R16_SFLOAT = 76, VK_FORMAT_R16G16_UNORM = 77, VK_FORMAT_R16G16_SNORM = 78, VK_FORMAT_R16G16_USCALED = 79, VK_FORMAT_R16G16_SSCALED = 80, VK_FORMAT_R16G16_UINT = 81, VK_FORMAT_R16G16_SINT = 82, VK_FORMAT_R16G16_SFLOAT = 83, VK_FORMAT_R16G16B16_UNORM = 84, VK_FORMAT_R16G16B16_SNORM = 85, VK_FORMAT_R16G16B16_USCALED = 86, VK_FORMAT_R16G16B16_SSCALED = 87, VK_FORMAT_R16G16B16_UINT = 88, VK_FORMAT_R16G16B16_SINT = 89, VK_FORMAT_R16G16B16_SFLOAT = 90, VK_FORMAT_R16G16B16A16_UNORM = 91, VK_FORMAT_R16G16B16A16_SNORM = 92, VK_FORMAT_R16G16B16A16_USCALED = 93, VK_FORMAT_R16G16B16A16_SSCALED = 94, VK_FORMAT_R16G16B16A16_UINT = 95, VK_FORMAT_R16G16B16A16_SINT = 96, VK_FORMAT_R16G16B16A16_SFLOAT = 97, VK_FORMAT_R32_UINT = 98, VK_FORMAT_R32_SINT = 99, VK_FORMAT_R32_SFLOAT = 100, VK_FORMAT_R32G32_UINT = 101, VK_FORMAT_R32G32_SINT = 102, VK_FORMAT_R32G32_SFLOAT = 103, VK_FORMAT_R32G32B32_UINT = 104, VK_FORMAT_R32G32B32_SINT = 105, VK_FORMAT_R32G32B32_SFLOAT = 106, VK_FORMAT_R32G32B32A32_UINT = 107, VK_FORMAT_R32G32B32A32_SINT = 108, VK_FORMAT_R32G32B32A32_SFLOAT = 109, VK_FORMAT_R64_UINT = 110, VK_FORMAT_R64_SINT = 111, VK_FORMAT_R64_SFLOAT = 112, VK_FORMAT_R64G64_UINT = 113, VK_FORMAT_R64G64_SINT = 114, VK_FORMAT_R64G64_SFLOAT = 115, VK_FORMAT_R64G64B64_UINT = 116, VK_FORMAT_R64G64B64_SINT = 117, VK_FORMAT_R64G64B64_SFLOAT = 118, VK_FORMAT_R64G64B64A64_UINT = 119, VK_FORMAT_R64G64B64A64_SINT = 120, VK_FORMAT_R64G64B64A64_SFLOAT = 121, VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122, VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123, VK_FORMAT_D16_UNORM = 124, VK_FORMAT_X8_D24_UNORM_PACK32 = 125, VK_FORMAT_D32_SFLOAT = 126, VK_FORMAT_S8_UINT = 127, VK_FORMAT_D16_UNORM_S8_UINT = 128, VK_FORMAT_D24_UNORM_S8_UINT = 129, VK_FORMAT_D32_SFLOAT_S8_UINT = 130, VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131, VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132, VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133, VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134, VK_FORMAT_BC2_UNORM_BLOCK = 135, VK_FORMAT_BC2_SRGB_BLOCK = 136, VK_FORMAT_BC3_UNORM_BLOCK = 137, VK_FORMAT_BC3_SRGB_BLOCK = 138, VK_FORMAT_BC4_UNORM_BLOCK = 139, VK_FORMAT_BC4_SNORM_BLOCK = 140, VK_FORMAT_BC5_UNORM_BLOCK = 141, VK_FORMAT_BC5_SNORM_BLOCK = 142, VK_FORMAT_BC6H_UFLOAT_BLOCK = 143, VK_FORMAT_BC6H_SFLOAT_BLOCK = 144, VK_FORMAT_BC7_UNORM_BLOCK = 145, VK_FORMAT_BC7_SRGB_BLOCK = 146, VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147, VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148, VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149, VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151, VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152, VK_FORMAT_EAC_R11_UNORM_BLOCK = 153, VK_FORMAT_EAC_R11_SNORM_BLOCK = 154, VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155, VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156, VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157, VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158, VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159, VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160, VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161, VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162, VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163, VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164, VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165, VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166, VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167, VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168, VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169, VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170, VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171, VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172, VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173, VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174, VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175, VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176, VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177, VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178, VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179, VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180, VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181, VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182, VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183, VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184, VK_FORMAT_G8B8G8R8_422_UNORM = 1000156000, VK_FORMAT_B8G8R8G8_422_UNORM = 1000156001, VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM = 1000156002, VK_FORMAT_G8_B8R8_2PLANE_420_UNORM = 1000156003, VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM = 1000156004, VK_FORMAT_G8_B8R8_2PLANE_422_UNORM = 1000156005, VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM = 1000156006, VK_FORMAT_R10X6_UNORM_PACK16 = 1000156007, VK_FORMAT_R10X6G10X6_UNORM_2PACK16 = 1000156008, VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009, VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010, VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011, VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012, VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013, VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014, VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015, VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016, VK_FORMAT_R12X4_UNORM_PACK16 = 1000156017, VK_FORMAT_R12X4G12X4_UNORM_2PACK16 = 1000156018, VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019, VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020, VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021, VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022, VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023, VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024, VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025, VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026, VK_FORMAT_G16B16G16R16_422_UNORM = 1000156027, VK_FORMAT_B16G16R16G16_422_UNORM = 1000156028, VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM = 1000156029, VK_FORMAT_G16_B16R16_2PLANE_420_UNORM = 1000156030, VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM = 1000156031, VK_FORMAT_G16_B16R16_2PLANE_422_UNORM = 1000156032, VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM = 1000156033, VK_FORMAT_G8_B8R8_2PLANE_444_UNORM = 1000330000, VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16 = 1000330001, VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16 = 1000330002, VK_FORMAT_G16_B16R16_2PLANE_444_UNORM = 1000330003, VK_FORMAT_A4R4G4B4_UNORM_PACK16 = 1000340000, VK_FORMAT_A4B4G4R4_UNORM_PACK16 = 1000340001, VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK = 1000066000, VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK = 1000066001, VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK = 1000066002, VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK = 1000066003, VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK = 1000066004, VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK = 1000066005, VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK = 1000066006, VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK = 1000066007, VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK = 1000066008, VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK = 1000066009, VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK = 1000066010, VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK = 1000066011, VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK = 1000066012, VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK = 1000066013, VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000, VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001, VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002, VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003, VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004, VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005, VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006, VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007, VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK, VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK, VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK, VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK, VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK, VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK, VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK, VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK, VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK, VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK, VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK, VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK, VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK, VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK, VK_FORMAT_G8B8G8R8_422_UNORM_KHR = VK_FORMAT_G8B8G8R8_422_UNORM, VK_FORMAT_B8G8R8G8_422_UNORM_KHR = VK_FORMAT_B8G8R8G8_422_UNORM, VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM, VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM, VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM, VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM, VK_FORMAT_R10X6_UNORM_PACK16_KHR = VK_FORMAT_R10X6_UNORM_PACK16, VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR = VK_FORMAT_R10X6G10X6_UNORM_2PACK16, VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16, VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, VK_FORMAT_R12X4_UNORM_PACK16_KHR = VK_FORMAT_R12X4_UNORM_PACK16, VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR = VK_FORMAT_R12X4G12X4_UNORM_2PACK16, VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16, VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, VK_FORMAT_G16B16G16R16_422_UNORM_KHR = VK_FORMAT_G16B16G16R16_422_UNORM, VK_FORMAT_B16G16R16G16_422_UNORM_KHR = VK_FORMAT_B16G16R16G16_422_UNORM, VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM, VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM, VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM, VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM, VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM, VK_FORMAT_G8_B8R8_2PLANE_444_UNORM_EXT = VK_FORMAT_G8_B8R8_2PLANE_444_UNORM, VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16_EXT = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16, VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16_EXT = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16, VK_FORMAT_G16_B16R16_2PLANE_444_UNORM_EXT = VK_FORMAT_G16_B16R16_2PLANE_444_UNORM, VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT = VK_FORMAT_A4R4G4B4_UNORM_PACK16, VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT = VK_FORMAT_A4B4G4R4_UNORM_PACK16, VK_FORMAT_MAX_ENUM = 0x7FFFFFFF } VkFormat; typedef enum VkImageTiling { VK_IMAGE_TILING_OPTIMAL = 0, VK_IMAGE_TILING_LINEAR = 1, VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT = 1000158000, VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF } VkImageTiling; typedef enum VkImageType { VK_IMAGE_TYPE_1D = 0, VK_IMAGE_TYPE_2D = 1, VK_IMAGE_TYPE_3D = 2, VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF } VkImageType; typedef enum VkPhysicalDeviceType { VK_PHYSICAL_DEVICE_TYPE_OTHER = 0, VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1, VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2, VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3, VK_PHYSICAL_DEVICE_TYPE_CPU = 4, VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF } VkPhysicalDeviceType; typedef enum VkQueryType { VK_QUERY_TYPE_OCCLUSION = 0, VK_QUERY_TYPE_PIPELINE_STATISTICS = 1, VK_QUERY_TYPE_TIMESTAMP = 2, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR = 1000023000, #endif VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT = 1000028004, VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR = 1000116000, VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR = 1000150000, VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR = 1000150001, VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = 1000165000, VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL = 1000210000, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_QUERY_TYPE_VIDEO_ENCODE_BITSTREAM_BUFFER_RANGE_KHR = 1000299000, #endif VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT = 1000382000, VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR = 1000386000, VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR = 1000386001, VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF } VkQueryType; typedef enum VkSharingMode { VK_SHARING_MODE_EXCLUSIVE = 0, VK_SHARING_MODE_CONCURRENT = 1, VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF } VkSharingMode; typedef enum VkComponentSwizzle { VK_COMPONENT_SWIZZLE_IDENTITY = 0, VK_COMPONENT_SWIZZLE_ZERO = 1, VK_COMPONENT_SWIZZLE_ONE = 2, VK_COMPONENT_SWIZZLE_R = 3, VK_COMPONENT_SWIZZLE_G = 4, VK_COMPONENT_SWIZZLE_B = 5, VK_COMPONENT_SWIZZLE_A = 6, VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF } VkComponentSwizzle; typedef enum VkImageViewType { VK_IMAGE_VIEW_TYPE_1D = 0, VK_IMAGE_VIEW_TYPE_2D = 1, VK_IMAGE_VIEW_TYPE_3D = 2, VK_IMAGE_VIEW_TYPE_CUBE = 3, VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4, VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5, VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6, VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF } VkImageViewType; typedef enum VkBlendFactor { VK_BLEND_FACTOR_ZERO = 0, VK_BLEND_FACTOR_ONE = 1, VK_BLEND_FACTOR_SRC_COLOR = 2, VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3, VK_BLEND_FACTOR_DST_COLOR = 4, VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5, VK_BLEND_FACTOR_SRC_ALPHA = 6, VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7, VK_BLEND_FACTOR_DST_ALPHA = 8, VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9, VK_BLEND_FACTOR_CONSTANT_COLOR = 10, VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11, VK_BLEND_FACTOR_CONSTANT_ALPHA = 12, VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13, VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14, VK_BLEND_FACTOR_SRC1_COLOR = 15, VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16, VK_BLEND_FACTOR_SRC1_ALPHA = 17, VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18, VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF } VkBlendFactor; typedef enum VkBlendOp { VK_BLEND_OP_ADD = 0, VK_BLEND_OP_SUBTRACT = 1, VK_BLEND_OP_REVERSE_SUBTRACT = 2, VK_BLEND_OP_MIN = 3, VK_BLEND_OP_MAX = 4, VK_BLEND_OP_ZERO_EXT = 1000148000, VK_BLEND_OP_SRC_EXT = 1000148001, VK_BLEND_OP_DST_EXT = 1000148002, VK_BLEND_OP_SRC_OVER_EXT = 1000148003, VK_BLEND_OP_DST_OVER_EXT = 1000148004, VK_BLEND_OP_SRC_IN_EXT = 1000148005, VK_BLEND_OP_DST_IN_EXT = 1000148006, VK_BLEND_OP_SRC_OUT_EXT = 1000148007, VK_BLEND_OP_DST_OUT_EXT = 1000148008, VK_BLEND_OP_SRC_ATOP_EXT = 1000148009, VK_BLEND_OP_DST_ATOP_EXT = 1000148010, VK_BLEND_OP_XOR_EXT = 1000148011, VK_BLEND_OP_MULTIPLY_EXT = 1000148012, VK_BLEND_OP_SCREEN_EXT = 1000148013, VK_BLEND_OP_OVERLAY_EXT = 1000148014, VK_BLEND_OP_DARKEN_EXT = 1000148015, VK_BLEND_OP_LIGHTEN_EXT = 1000148016, VK_BLEND_OP_COLORDODGE_EXT = 1000148017, VK_BLEND_OP_COLORBURN_EXT = 1000148018, VK_BLEND_OP_HARDLIGHT_EXT = 1000148019, VK_BLEND_OP_SOFTLIGHT_EXT = 1000148020, VK_BLEND_OP_DIFFERENCE_EXT = 1000148021, VK_BLEND_OP_EXCLUSION_EXT = 1000148022, VK_BLEND_OP_INVERT_EXT = 1000148023, VK_BLEND_OP_INVERT_RGB_EXT = 1000148024, VK_BLEND_OP_LINEARDODGE_EXT = 1000148025, VK_BLEND_OP_LINEARBURN_EXT = 1000148026, VK_BLEND_OP_VIVIDLIGHT_EXT = 1000148027, VK_BLEND_OP_LINEARLIGHT_EXT = 1000148028, VK_BLEND_OP_PINLIGHT_EXT = 1000148029, VK_BLEND_OP_HARDMIX_EXT = 1000148030, VK_BLEND_OP_HSL_HUE_EXT = 1000148031, VK_BLEND_OP_HSL_SATURATION_EXT = 1000148032, VK_BLEND_OP_HSL_COLOR_EXT = 1000148033, VK_BLEND_OP_HSL_LUMINOSITY_EXT = 1000148034, VK_BLEND_OP_PLUS_EXT = 1000148035, VK_BLEND_OP_PLUS_CLAMPED_EXT = 1000148036, VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT = 1000148037, VK_BLEND_OP_PLUS_DARKER_EXT = 1000148038, VK_BLEND_OP_MINUS_EXT = 1000148039, VK_BLEND_OP_MINUS_CLAMPED_EXT = 1000148040, VK_BLEND_OP_CONTRAST_EXT = 1000148041, VK_BLEND_OP_INVERT_OVG_EXT = 1000148042, VK_BLEND_OP_RED_EXT = 1000148043, VK_BLEND_OP_GREEN_EXT = 1000148044, VK_BLEND_OP_BLUE_EXT = 1000148045, VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF } VkBlendOp; typedef enum VkCompareOp { VK_COMPARE_OP_NEVER = 0, VK_COMPARE_OP_LESS = 1, VK_COMPARE_OP_EQUAL = 2, VK_COMPARE_OP_LESS_OR_EQUAL = 3, VK_COMPARE_OP_GREATER = 4, VK_COMPARE_OP_NOT_EQUAL = 5, VK_COMPARE_OP_GREATER_OR_EQUAL = 6, VK_COMPARE_OP_ALWAYS = 7, VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF } VkCompareOp; typedef enum VkDynamicState { VK_DYNAMIC_STATE_VIEWPORT = 0, VK_DYNAMIC_STATE_SCISSOR = 1, VK_DYNAMIC_STATE_LINE_WIDTH = 2, VK_DYNAMIC_STATE_DEPTH_BIAS = 3, VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4, VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7, VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8, VK_DYNAMIC_STATE_CULL_MODE = 1000267000, VK_DYNAMIC_STATE_FRONT_FACE = 1000267001, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY = 1000267002, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT = 1000267003, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT = 1000267004, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE = 1000267005, VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE = 1000267006, VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE = 1000267007, VK_DYNAMIC_STATE_DEPTH_COMPARE_OP = 1000267008, VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE = 1000267009, VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE = 1000267010, VK_DYNAMIC_STATE_STENCIL_OP = 1000267011, VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE = 1000377001, VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE = 1000377002, VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE = 1000377004, VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000, VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1000143000, VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR = 1000347000, VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV = 1000164004, VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV = 1000164006, VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV = 1000205001, VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR = 1000226000, VK_DYNAMIC_STATE_LINE_STIPPLE_EXT = 1000259000, VK_DYNAMIC_STATE_VERTEX_INPUT_EXT = 1000352000, VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT = 1000377000, VK_DYNAMIC_STATE_LOGIC_OP_EXT = 1000377003, VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT = 1000381000, VK_DYNAMIC_STATE_CULL_MODE_EXT = VK_DYNAMIC_STATE_CULL_MODE, VK_DYNAMIC_STATE_FRONT_FACE_EXT = VK_DYNAMIC_STATE_FRONT_FACE, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT = VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT = VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT = VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT = VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE, VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE, VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE, VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT = VK_DYNAMIC_STATE_DEPTH_COMPARE_OP, VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE, VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT = VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE, VK_DYNAMIC_STATE_STENCIL_OP_EXT = VK_DYNAMIC_STATE_STENCIL_OP, VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT = VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE, VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE, VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT = VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE, VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF } VkDynamicState; typedef enum VkFrontFace { VK_FRONT_FACE_COUNTER_CLOCKWISE = 0, VK_FRONT_FACE_CLOCKWISE = 1, VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF } VkFrontFace; typedef enum VkVertexInputRate { VK_VERTEX_INPUT_RATE_VERTEX = 0, VK_VERTEX_INPUT_RATE_INSTANCE = 1, VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF } VkVertexInputRate; typedef enum VkPrimitiveTopology { VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0, VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1, VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5, VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6, VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10, VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF } VkPrimitiveTopology; typedef enum VkPolygonMode { VK_POLYGON_MODE_FILL = 0, VK_POLYGON_MODE_LINE = 1, VK_POLYGON_MODE_POINT = 2, VK_POLYGON_MODE_FILL_RECTANGLE_NV = 1000153000, VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF } VkPolygonMode; typedef enum VkStencilOp { VK_STENCIL_OP_KEEP = 0, VK_STENCIL_OP_ZERO = 1, VK_STENCIL_OP_REPLACE = 2, VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3, VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4, VK_STENCIL_OP_INVERT = 5, VK_STENCIL_OP_INCREMENT_AND_WRAP = 6, VK_STENCIL_OP_DECREMENT_AND_WRAP = 7, VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF } VkStencilOp; typedef enum VkLogicOp { VK_LOGIC_OP_CLEAR = 0, VK_LOGIC_OP_AND = 1, VK_LOGIC_OP_AND_REVERSE = 2, VK_LOGIC_OP_COPY = 3, VK_LOGIC_OP_AND_INVERTED = 4, VK_LOGIC_OP_NO_OP = 5, VK_LOGIC_OP_XOR = 6, VK_LOGIC_OP_OR = 7, VK_LOGIC_OP_NOR = 8, VK_LOGIC_OP_EQUIVALENT = 9, VK_LOGIC_OP_INVERT = 10, VK_LOGIC_OP_OR_REVERSE = 11, VK_LOGIC_OP_COPY_INVERTED = 12, VK_LOGIC_OP_OR_INVERTED = 13, VK_LOGIC_OP_NAND = 14, VK_LOGIC_OP_SET = 15, VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF } VkLogicOp; typedef enum VkBorderColor { VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0, VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1, VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2, VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3, VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4, VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5, VK_BORDER_COLOR_FLOAT_CUSTOM_EXT = 1000287003, VK_BORDER_COLOR_INT_CUSTOM_EXT = 1000287004, VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF } VkBorderColor; typedef enum VkFilter { VK_FILTER_NEAREST = 0, VK_FILTER_LINEAR = 1, VK_FILTER_CUBIC_EXT = 1000015000, VK_FILTER_CUBIC_IMG = VK_FILTER_CUBIC_EXT, VK_FILTER_MAX_ENUM = 0x7FFFFFFF } VkFilter; typedef enum VkSamplerAddressMode { VK_SAMPLER_ADDRESS_MODE_REPEAT = 0, VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3, VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4, VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF } VkSamplerAddressMode; typedef enum VkSamplerMipmapMode { VK_SAMPLER_MIPMAP_MODE_NEAREST = 0, VK_SAMPLER_MIPMAP_MODE_LINEAR = 1, VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF } VkSamplerMipmapMode; typedef enum VkDescriptorType { VK_DESCRIPTOR_TYPE_SAMPLER = 0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK = 1000138000, VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000, VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, VK_DESCRIPTOR_TYPE_MUTABLE_VALVE = 1000351000, VK_DESCRIPTOR_TYPE_SAMPLE_WEIGHT_IMAGE_QCOM = 1000440000, VK_DESCRIPTOR_TYPE_BLOCK_MATCH_IMAGE_QCOM = 1000440001, VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK, VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF } VkDescriptorType; typedef enum VkAttachmentLoadOp { VK_ATTACHMENT_LOAD_OP_LOAD = 0, VK_ATTACHMENT_LOAD_OP_CLEAR = 1, VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2, VK_ATTACHMENT_LOAD_OP_NONE_EXT = 1000400000, VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF } VkAttachmentLoadOp; typedef enum VkAttachmentStoreOp { VK_ATTACHMENT_STORE_OP_STORE = 0, VK_ATTACHMENT_STORE_OP_DONT_CARE = 1, VK_ATTACHMENT_STORE_OP_NONE = 1000301000, VK_ATTACHMENT_STORE_OP_NONE_KHR = VK_ATTACHMENT_STORE_OP_NONE, VK_ATTACHMENT_STORE_OP_NONE_QCOM = VK_ATTACHMENT_STORE_OP_NONE, VK_ATTACHMENT_STORE_OP_NONE_EXT = VK_ATTACHMENT_STORE_OP_NONE, VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF } VkAttachmentStoreOp; typedef enum VkPipelineBindPoint { VK_PIPELINE_BIND_POINT_GRAPHICS = 0, VK_PIPELINE_BIND_POINT_COMPUTE = 1, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR = 1000165000, VK_PIPELINE_BIND_POINT_SUBPASS_SHADING_HUAWEI = 1000369003, VK_PIPELINE_BIND_POINT_RAY_TRACING_NV = VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF } VkPipelineBindPoint; typedef enum VkCommandBufferLevel { VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0, VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1, VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF } VkCommandBufferLevel; typedef enum VkIndexType { VK_INDEX_TYPE_UINT16 = 0, VK_INDEX_TYPE_UINT32 = 1, VK_INDEX_TYPE_NONE_KHR = 1000165000, VK_INDEX_TYPE_UINT8_EXT = 1000265000, VK_INDEX_TYPE_NONE_NV = VK_INDEX_TYPE_NONE_KHR, VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF } VkIndexType; typedef enum VkSubpassContents { VK_SUBPASS_CONTENTS_INLINE = 0, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1, VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF } VkSubpassContents; typedef enum VkAccessFlagBits { VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001, VK_ACCESS_INDEX_READ_BIT = 0x00000002, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004, VK_ACCESS_UNIFORM_READ_BIT = 0x00000008, VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010, VK_ACCESS_SHADER_READ_BIT = 0x00000020, VK_ACCESS_SHADER_WRITE_BIT = 0x00000040, VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400, VK_ACCESS_TRANSFER_READ_BIT = 0x00000800, VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000, VK_ACCESS_HOST_READ_BIT = 0x00002000, VK_ACCESS_HOST_WRITE_BIT = 0x00004000, VK_ACCESS_MEMORY_READ_BIT = 0x00008000, VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000, VK_ACCESS_NONE = 0, VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000, VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000, VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000, VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000, VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000, VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x00200000, VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x00400000, VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x01000000, VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR = 0x00800000, VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV = 0x00020000, VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV = 0x00040000, VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR, VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR, VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, VK_ACCESS_NONE_KHR = VK_ACCESS_NONE, VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkAccessFlagBits; typedef VkFlags VkAccessFlags; typedef enum VkImageAspectFlagBits { VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001, VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002, VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004, VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008, VK_IMAGE_ASPECT_PLANE_0_BIT = 0x00000010, VK_IMAGE_ASPECT_PLANE_1_BIT = 0x00000020, VK_IMAGE_ASPECT_PLANE_2_BIT = 0x00000040, VK_IMAGE_ASPECT_NONE = 0, VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT = 0x00000080, VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT = 0x00000100, VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT = 0x00000200, VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT = 0x00000400, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR = VK_IMAGE_ASPECT_PLANE_0_BIT, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR = VK_IMAGE_ASPECT_PLANE_1_BIT, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR = VK_IMAGE_ASPECT_PLANE_2_BIT, VK_IMAGE_ASPECT_NONE_KHR = VK_IMAGE_ASPECT_NONE, VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkImageAspectFlagBits; typedef VkFlags VkImageAspectFlags; typedef enum VkFormatFeatureFlagBits { VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001, VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002, VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004, VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008, VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010, VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020, VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200, VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400, VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT = 0x00004000, VK_FORMAT_FEATURE_TRANSFER_DST_BIT = 0x00008000, VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000, VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000, VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000, VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000, VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000, VK_FORMAT_FEATURE_DISJOINT_BIT = 0x00400000, VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT = 0x00800000, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT = 0x00010000, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_FORMAT_FEATURE_VIDEO_DECODE_OUTPUT_BIT_KHR = 0x02000000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_FORMAT_FEATURE_VIDEO_DECODE_DPB_BIT_KHR = 0x04000000, #endif VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR = 0x20000000, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = 0x00002000, VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x01000000, VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x40000000, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_FORMAT_FEATURE_VIDEO_ENCODE_INPUT_BIT_KHR = 0x08000000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_FORMAT_FEATURE_VIDEO_ENCODE_DPB_BIT_KHR = 0x10000000, #endif VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT, VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT, VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT, VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT, VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT, VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT, VK_FORMAT_FEATURE_DISJOINT_BIT_KHR = VK_FORMAT_FEATURE_DISJOINT_BIT, VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT, VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkFormatFeatureFlagBits; typedef VkFlags VkFormatFeatureFlags; typedef enum VkImageCreateFlagBits { VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001, VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010, VK_IMAGE_CREATE_ALIAS_BIT = 0x00000400, VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT = 0x00000040, VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT = 0x00000020, VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT = 0x00000080, VK_IMAGE_CREATE_EXTENDED_USAGE_BIT = 0x00000100, VK_IMAGE_CREATE_PROTECTED_BIT = 0x00000800, VK_IMAGE_CREATE_DISJOINT_BIT = 0x00000200, VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV = 0x00002000, VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT = 0x00001000, VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT = 0x00004000, VK_IMAGE_CREATE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_BIT_EXT = 0x00040000, VK_IMAGE_CREATE_2D_VIEW_COMPATIBLE_BIT_EXT = 0x00020000, VK_IMAGE_CREATE_FRAGMENT_DENSITY_MAP_OFFSET_BIT_QCOM = 0x00008000, VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT, VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT, VK_IMAGE_CREATE_DISJOINT_BIT_KHR = VK_IMAGE_CREATE_DISJOINT_BIT, VK_IMAGE_CREATE_ALIAS_BIT_KHR = VK_IMAGE_CREATE_ALIAS_BIT, VK_IMAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkImageCreateFlagBits; typedef VkFlags VkImageCreateFlags; typedef enum VkSampleCountFlagBits { VK_SAMPLE_COUNT_1_BIT = 0x00000001, VK_SAMPLE_COUNT_2_BIT = 0x00000002, VK_SAMPLE_COUNT_4_BIT = 0x00000004, VK_SAMPLE_COUNT_8_BIT = 0x00000008, VK_SAMPLE_COUNT_16_BIT = 0x00000010, VK_SAMPLE_COUNT_32_BIT = 0x00000020, VK_SAMPLE_COUNT_64_BIT = 0x00000040, VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkSampleCountFlagBits; typedef VkFlags VkSampleCountFlags; typedef enum VkImageUsageFlagBits { VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001, VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002, VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004, VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020, VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_IMAGE_USAGE_VIDEO_DECODE_DST_BIT_KHR = 0x00000400, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_IMAGE_USAGE_VIDEO_DECODE_SRC_BIT_KHR = 0x00000800, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_IMAGE_USAGE_VIDEO_DECODE_DPB_BIT_KHR = 0x00001000, #endif VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x00000200, VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00000100, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_IMAGE_USAGE_VIDEO_ENCODE_DST_BIT_KHR = 0x00002000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_IMAGE_USAGE_VIDEO_ENCODE_SRC_BIT_KHR = 0x00004000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR = 0x00008000, #endif VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x00080000, VK_IMAGE_USAGE_INVOCATION_MASK_BIT_HUAWEI = 0x00040000, VK_IMAGE_USAGE_SAMPLE_WEIGHT_BIT_QCOM = 0x00100000, VK_IMAGE_USAGE_SAMPLE_BLOCK_MATCH_BIT_QCOM = 0x00200000, VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV = VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkImageUsageFlagBits; typedef VkFlags VkImageUsageFlags; typedef enum VkInstanceCreateFlagBits { VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR = 0x00000001, VK_INSTANCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkInstanceCreateFlagBits; typedef VkFlags VkInstanceCreateFlags; typedef enum VkMemoryHeapFlagBits { VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001, VK_MEMORY_HEAP_MULTI_INSTANCE_BIT = 0x00000002, VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHR = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT, VK_MEMORY_HEAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkMemoryHeapFlagBits; typedef VkFlags VkMemoryHeapFlags; typedef enum VkMemoryPropertyFlagBits { VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004, VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010, VK_MEMORY_PROPERTY_PROTECTED_BIT = 0x00000020, VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD = 0x00000040, VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD = 0x00000080, VK_MEMORY_PROPERTY_RDMA_CAPABLE_BIT_NV = 0x00000100, VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkMemoryPropertyFlagBits; typedef VkFlags VkMemoryPropertyFlags; typedef enum VkQueueFlagBits { VK_QUEUE_GRAPHICS_BIT = 0x00000001, VK_QUEUE_COMPUTE_BIT = 0x00000002, VK_QUEUE_TRANSFER_BIT = 0x00000004, VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008, VK_QUEUE_PROTECTED_BIT = 0x00000010, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_QUEUE_VIDEO_DECODE_BIT_KHR = 0x00000020, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_QUEUE_VIDEO_ENCODE_BIT_KHR = 0x00000040, #endif VK_QUEUE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkQueueFlagBits; typedef VkFlags VkQueueFlags; typedef VkFlags VkDeviceCreateFlags; typedef enum VkDeviceQueueCreateFlagBits { VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT = 0x00000001, VK_DEVICE_QUEUE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkDeviceQueueCreateFlagBits; typedef VkFlags VkDeviceQueueCreateFlags; typedef enum VkPipelineStageFlagBits { VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010, VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100, VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800, VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000, VK_PIPELINE_STAGE_HOST_BIT = 0x00004000, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000, VK_PIPELINE_STAGE_NONE = 0, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000, VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000, VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR = 0x00200000, VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x00080000, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = 0x00100000, VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000, VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00400000, VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV = 0x00020000, VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, VK_PIPELINE_STAGE_NONE_KHR = VK_PIPELINE_STAGE_NONE, VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkPipelineStageFlagBits; typedef VkFlags VkPipelineStageFlags; typedef VkFlags VkMemoryMapFlags; typedef enum VkSparseMemoryBindFlagBits { VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001, VK_SPARSE_MEMORY_BIND_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkSparseMemoryBindFlagBits; typedef VkFlags VkSparseMemoryBindFlags; typedef enum VkSparseImageFormatFlagBits { VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001, VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002, VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004, VK_SPARSE_IMAGE_FORMAT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkSparseImageFormatFlagBits; typedef VkFlags VkSparseImageFormatFlags; typedef enum VkFenceCreateFlagBits { VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001, VK_FENCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkFenceCreateFlagBits; typedef VkFlags VkFenceCreateFlags; typedef VkFlags VkSemaphoreCreateFlags; typedef enum VkEventCreateFlagBits { VK_EVENT_CREATE_DEVICE_ONLY_BIT = 0x00000001, VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR = VK_EVENT_CREATE_DEVICE_ONLY_BIT, VK_EVENT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkEventCreateFlagBits; typedef VkFlags VkEventCreateFlags; typedef enum VkQueryPipelineStatisticFlagBits { VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001, VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002, VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004, VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008, VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010, VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020, VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040, VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080, VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100, VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200, VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400, VK_QUERY_PIPELINE_STATISTIC_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkQueryPipelineStatisticFlagBits; typedef VkFlags VkQueryPipelineStatisticFlags; typedef VkFlags VkQueryPoolCreateFlags; typedef enum VkQueryResultFlagBits { VK_QUERY_RESULT_64_BIT = 0x00000001, VK_QUERY_RESULT_WAIT_BIT = 0x00000002, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004, VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_QUERY_RESULT_WITH_STATUS_BIT_KHR = 0x00000010, #endif VK_QUERY_RESULT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkQueryResultFlagBits; typedef VkFlags VkQueryResultFlags; typedef enum VkBufferCreateFlagBits { VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001, VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004, VK_BUFFER_CREATE_PROTECTED_BIT = 0x00000008, VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000010, VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkBufferCreateFlagBits; typedef VkFlags VkBufferCreateFlags; typedef enum VkBufferUsageFlagBits { VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001, VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020, VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080, VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT = 0x00020000, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_BUFFER_USAGE_VIDEO_DECODE_SRC_BIT_KHR = 0x00002000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_BUFFER_USAGE_VIDEO_DECODE_DST_BIT_KHR = 0x00004000, #endif VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800, VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x00001000, VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200, VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR = 0x00080000, VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR = 0x00100000, VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR = 0x00000400, #ifdef VK_ENABLE_BETA_EXTENSIONS VK_BUFFER_USAGE_VIDEO_ENCODE_DST_BIT_KHR = 0x00008000, #endif #ifdef VK_ENABLE_BETA_EXTENSIONS VK_BUFFER_USAGE_VIDEO_ENCODE_SRC_BIT_KHR = 0x00010000, #endif VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkBufferUsageFlagBits; typedef VkFlags VkBufferUsageFlags; typedef VkFlags VkBufferViewCreateFlags; typedef enum VkImageViewCreateFlagBits { VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT = 0x00000001, VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT = 0x00000002, VK_IMAGE_VIEW_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkImageViewCreateFlagBits; typedef VkFlags VkImageViewCreateFlags; typedef VkFlags VkShaderModuleCreateFlags; typedef enum VkPipelineCacheCreateFlagBits { VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT = VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT, VK_PIPELINE_CACHE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkPipelineCacheCreateFlagBits; typedef VkFlags VkPipelineCacheCreateFlags; typedef enum VkColorComponentFlagBits { VK_COLOR_COMPONENT_R_BIT = 0x00000001, VK_COLOR_COMPONENT_G_BIT = 0x00000002, VK_COLOR_COMPONENT_B_BIT = 0x00000004, VK_COLOR_COMPONENT_A_BIT = 0x00000008, VK_COLOR_COMPONENT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkColorComponentFlagBits; typedef VkFlags VkColorComponentFlags; typedef enum VkPipelineCreateFlagBits { VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001, VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002, VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004, VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008, VK_PIPELINE_CREATE_DISPATCH_BASE_BIT = 0x00000010, VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT = 0x00000100, VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT = 0x00000200, VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00200000, VK_PIPELINE_CREATE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT = 0x00400000, VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR = 0x00004000, VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR = 0x00008000, VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR = 0x00010000, VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR = 0x00020000, VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR = 0x00001000, VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR = 0x00002000, VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR = 0x00080000, VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020, VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR = 0x00000040, VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x00000080, VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV = 0x00040000, VK_PIPELINE_CREATE_LIBRARY_BIT_KHR = 0x00000800, VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT = 0x00800000, VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT = 0x00000400, VK_PIPELINE_CREATE_RAY_TRACING_ALLOW_MOTION_BIT_NV = 0x00100000, VK_PIPELINE_CREATE_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x02000000, VK_PIPELINE_CREATE_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x04000000, VK_PIPELINE_CREATE_DISPATCH_BASE = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT, VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT = VK_PIPELINE_CREATE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT, VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE, VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT = VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT, VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT = VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT, VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkPipelineCreateFlagBits; typedef VkFlags VkPipelineCreateFlags; typedef enum VkPipelineShaderStageCreateFlagBits { VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT = 0x00000001, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT = 0x00000002, VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT = VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT = VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT, VK_PIPELINE_SHADER_STAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkPipelineShaderStageCreateFlagBits; typedef VkFlags VkPipelineShaderStageCreateFlags; typedef enum VkShaderStageFlagBits { VK_SHADER_STAGE_VERTEX_BIT = 0x00000001, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004, VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008, VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010, VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020, VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F, VK_SHADER_STAGE_ALL = 0x7FFFFFFF, VK_SHADER_STAGE_RAYGEN_BIT_KHR = 0x00000100, VK_SHADER_STAGE_ANY_HIT_BIT_KHR = 0x00000200, VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR = 0x00000400, VK_SHADER_STAGE_MISS_BIT_KHR = 0x00000800, VK_SHADER_STAGE_INTERSECTION_BIT_KHR = 0x00001000, VK_SHADER_STAGE_CALLABLE_BIT_KHR = 0x00002000, VK_SHADER_STAGE_TASK_BIT_NV = 0x00000040, VK_SHADER_STAGE_MESH_BIT_NV = 0x00000080, VK_SHADER_STAGE_SUBPASS_SHADING_BIT_HUAWEI = 0x00004000, VK_SHADER_STAGE_RAYGEN_BIT_NV = VK_SHADER_STAGE_RAYGEN_BIT_KHR, VK_SHADER_STAGE_ANY_HIT_BIT_NV = VK_SHADER_STAGE_ANY_HIT_BIT_KHR, VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR, VK_SHADER_STAGE_MISS_BIT_NV = VK_SHADER_STAGE_MISS_BIT_KHR, VK_SHADER_STAGE_INTERSECTION_BIT_NV = VK_SHADER_STAGE_INTERSECTION_BIT_KHR, VK_SHADER_STAGE_CALLABLE_BIT_NV = VK_SHADER_STAGE_CALLABLE_BIT_KHR, VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkShaderStageFlagBits; typedef enum VkCullModeFlagBits { VK_CULL_MODE_NONE = 0, VK_CULL_MODE_FRONT_BIT = 0x00000001, VK_CULL_MODE_BACK_BIT = 0x00000002, VK_CULL_MODE_FRONT_AND_BACK = 0x00000003, VK_CULL_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkCullModeFlagBits; typedef VkFlags VkCullModeFlags; typedef VkFlags VkPipelineVertexInputStateCreateFlags; typedef VkFlags VkPipelineInputAssemblyStateCreateFlags; typedef VkFlags VkPipelineTessellationStateCreateFlags; typedef VkFlags VkPipelineViewportStateCreateFlags; typedef VkFlags VkPipelineRasterizationStateCreateFlags; typedef VkFlags VkPipelineMultisampleStateCreateFlags; typedef enum VkPipelineDepthStencilStateCreateFlagBits { VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM = 0x00000001, VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM = 0x00000002, VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkPipelineDepthStencilStateCreateFlagBits; typedef VkFlags VkPipelineDepthStencilStateCreateFlags; typedef enum VkPipelineColorBlendStateCreateFlagBits { VK_PIPELINE_COLOR_BLEND_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_BIT_ARM = 0x00000001, VK_PIPELINE_COLOR_BLEND_STATE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkPipelineColorBlendStateCreateFlagBits; typedef VkFlags VkPipelineColorBlendStateCreateFlags; typedef VkFlags VkPipelineDynamicStateCreateFlags; typedef enum VkPipelineLayoutCreateFlagBits { VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT = 0x00000002, VK_PIPELINE_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkPipelineLayoutCreateFlagBits; typedef VkFlags VkPipelineLayoutCreateFlags; typedef VkFlags VkShaderStageFlags; typedef enum VkSamplerCreateFlagBits { VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT = 0x00000001, VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT = 0x00000002, VK_SAMPLER_CREATE_NON_SEAMLESS_CUBE_MAP_BIT_EXT = 0x00000004, VK_SAMPLER_CREATE_IMAGE_PROCESSING_BIT_QCOM = 0x00000010, VK_SAMPLER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkSamplerCreateFlagBits; typedef VkFlags VkSamplerCreateFlags; typedef enum VkDescriptorPoolCreateFlagBits { VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001, VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT = 0x00000002, VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE = 0x00000004, VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT, VK_DESCRIPTOR_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkDescriptorPoolCreateFlagBits; typedef VkFlags VkDescriptorPoolCreateFlags; typedef VkFlags VkDescriptorPoolResetFlags; typedef enum VkDescriptorSetLayoutCreateFlagBits { VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT = 0x00000002, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x00000001, VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE = 0x00000004, VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT, VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkDescriptorSetLayoutCreateFlagBits; typedef VkFlags VkDescriptorSetLayoutCreateFlags; typedef enum VkAttachmentDescriptionFlagBits { VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001, VK_ATTACHMENT_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkAttachmentDescriptionFlagBits; typedef VkFlags VkAttachmentDescriptionFlags; typedef enum VkDependencyFlagBits { VK_DEPENDENCY_BY_REGION_BIT = 0x00000001, VK_DEPENDENCY_DEVICE_GROUP_BIT = 0x00000004, VK_DEPENDENCY_VIEW_LOCAL_BIT = 0x00000002, VK_DEPENDENCY_FEEDBACK_LOOP_BIT_EXT = 0x00000008, VK_DEPENDENCY_VIEW_LOCAL_BIT_KHR = VK_DEPENDENCY_VIEW_LOCAL_BIT, VK_DEPENDENCY_DEVICE_GROUP_BIT_KHR = VK_DEPENDENCY_DEVICE_GROUP_BIT, VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkDependencyFlagBits; typedef VkFlags VkDependencyFlags; typedef enum VkFramebufferCreateFlagBits { VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT = 0x00000001, VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, VK_FRAMEBUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkFramebufferCreateFlagBits; typedef VkFlags VkFramebufferCreateFlags; typedef enum VkRenderPassCreateFlagBits { VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM = 0x00000002, VK_RENDER_PASS_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkRenderPassCreateFlagBits; typedef VkFlags VkRenderPassCreateFlags; typedef enum VkSubpassDescriptionFlagBits { VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX = 0x00000001, VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x00000002, VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM = 0x00000004, VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM = 0x00000008, VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_COLOR_ACCESS_BIT_ARM = 0x00000010, VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM = 0x00000020, VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM = 0x00000040, VK_SUBPASS_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkSubpassDescriptionFlagBits; typedef VkFlags VkSubpassDescriptionFlags; typedef enum VkCommandPoolCreateFlagBits { VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002, VK_COMMAND_POOL_CREATE_PROTECTED_BIT = 0x00000004, VK_COMMAND_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkCommandPoolCreateFlagBits; typedef VkFlags VkCommandPoolCreateFlags; typedef enum VkCommandPoolResetFlagBits { VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001, VK_COMMAND_POOL_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkCommandPoolResetFlagBits; typedef VkFlags VkCommandPoolResetFlags; typedef enum VkCommandBufferUsageFlagBits { VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001, VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002, VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004, VK_COMMAND_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkCommandBufferUsageFlagBits; typedef VkFlags VkCommandBufferUsageFlags; typedef enum VkQueryControlFlagBits { VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001, VK_QUERY_CONTROL_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkQueryControlFlagBits; typedef VkFlags VkQueryControlFlags; typedef enum VkCommandBufferResetFlagBits { VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001, VK_COMMAND_BUFFER_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkCommandBufferResetFlagBits; typedef VkFlags VkCommandBufferResetFlags; typedef enum VkStencilFaceFlagBits { VK_STENCIL_FACE_FRONT_BIT = 0x00000001, VK_STENCIL_FACE_BACK_BIT = 0x00000002, VK_STENCIL_FACE_FRONT_AND_BACK = 0x00000003, VK_STENCIL_FRONT_AND_BACK = VK_STENCIL_FACE_FRONT_AND_BACK, VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkStencilFaceFlagBits; typedef VkFlags VkStencilFaceFlags; typedef struct VkExtent2D { uint32_t width; uint32_t height; } VkExtent2D; typedef struct VkExtent3D { uint32_t width; uint32_t height; uint32_t depth; } VkExtent3D; typedef struct VkOffset2D { int32_t x; int32_t y; } VkOffset2D; typedef struct VkOffset3D { int32_t x; int32_t y; int32_t z; } VkOffset3D; typedef struct VkRect2D { VkOffset2D offset; VkExtent2D extent; } VkRect2D; typedef struct VkBaseInStructure { VkStructureType sType; const struct VkBaseInStructure* pNext; } VkBaseInStructure; typedef struct VkBaseOutStructure { VkStructureType sType; struct VkBaseOutStructure* pNext; } VkBaseOutStructure; typedef struct VkBufferMemoryBarrier { VkStructureType sType; const void* pNext; VkAccessFlags srcAccessMask; VkAccessFlags dstAccessMask; uint32_t srcQueueFamilyIndex; uint32_t dstQueueFamilyIndex; VkBuffer buffer; VkDeviceSize offset; VkDeviceSize size; } VkBufferMemoryBarrier; typedef struct VkDispatchIndirectCommand { uint32_t x; uint32_t y; uint32_t z; } VkDispatchIndirectCommand; typedef struct VkDrawIndexedIndirectCommand { uint32_t indexCount; uint32_t instanceCount; uint32_t firstIndex; int32_t vertexOffset; uint32_t firstInstance; } VkDrawIndexedIndirectCommand; typedef struct VkDrawIndirectCommand { uint32_t vertexCount; uint32_t instanceCount; uint32_t firstVertex; uint32_t firstInstance; } VkDrawIndirectCommand; typedef struct VkImageSubresourceRange { VkImageAspectFlags aspectMask; uint32_t baseMipLevel; uint32_t levelCount; uint32_t baseArrayLayer; uint32_t layerCount; } VkImageSubresourceRange; typedef struct VkImageMemoryBarrier { VkStructureType sType; const void* pNext; VkAccessFlags srcAccessMask; VkAccessFlags dstAccessMask; VkImageLayout oldLayout; VkImageLayout newLayout; uint32_t srcQueueFamilyIndex; uint32_t dstQueueFamilyIndex; VkImage image; VkImageSubresourceRange subresourceRange; } VkImageMemoryBarrier; typedef struct VkMemoryBarrier { VkStructureType sType; const void* pNext; VkAccessFlags srcAccessMask; VkAccessFlags dstAccessMask; } VkMemoryBarrier; typedef struct VkPipelineCacheHeaderVersionOne { uint32_t headerSize; VkPipelineCacheHeaderVersion headerVersion; uint32_t vendorID; uint32_t deviceID; uint8_t pipelineCacheUUID[VK_UUID_SIZE]; } VkPipelineCacheHeaderVersionOne; typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)( void* pUserData, size_t size, size_t alignment, VkSystemAllocationScope allocationScope); typedef void (VKAPI_PTR *PFN_vkFreeFunction)( void* pUserData, void* pMemory); typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)( void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope); typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)( void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope); typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)( void* pUserData, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope allocationScope); typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void); typedef struct VkAllocationCallbacks { void* pUserData; PFN_vkAllocationFunction pfnAllocation; PFN_vkReallocationFunction pfnReallocation; PFN_vkFreeFunction pfnFree; PFN_vkInternalAllocationNotification pfnInternalAllocation; PFN_vkInternalFreeNotification pfnInternalFree; } VkAllocationCallbacks; typedef struct VkApplicationInfo { VkStructureType sType; const void* pNext; const char* pApplicationName; uint32_t applicationVersion; const char* pEngineName; uint32_t engineVersion; uint32_t apiVersion; } VkApplicationInfo; typedef struct VkFormatProperties { VkFormatFeatureFlags linearTilingFeatures; VkFormatFeatureFlags optimalTilingFeatures; VkFormatFeatureFlags bufferFeatures; } VkFormatProperties; typedef struct VkImageFormatProperties { VkExtent3D maxExtent; uint32_t maxMipLevels; uint32_t maxArrayLayers; VkSampleCountFlags sampleCounts; VkDeviceSize maxResourceSize; } VkImageFormatProperties; typedef struct VkInstanceCreateInfo { VkStructureType sType; const void* pNext; VkInstanceCreateFlags flags; const VkApplicationInfo* pApplicationInfo; uint32_t enabledLayerCount; const char* const* ppEnabledLayerNames; uint32_t enabledExtensionCount; const char* const* ppEnabledExtensionNames; } VkInstanceCreateInfo; typedef struct VkMemoryHeap { VkDeviceSize size; VkMemoryHeapFlags flags; } VkMemoryHeap; typedef struct VkMemoryType { VkMemoryPropertyFlags propertyFlags; uint32_t heapIndex; } VkMemoryType; typedef struct VkPhysicalDeviceFeatures { VkBool32 robustBufferAccess; VkBool32 fullDrawIndexUint32; VkBool32 imageCubeArray; VkBool32 independentBlend; VkBool32 geometryShader; VkBool32 tessellationShader; VkBool32 sampleRateShading; VkBool32 dualSrcBlend; VkBool32 logicOp; VkBool32 multiDrawIndirect; VkBool32 drawIndirectFirstInstance; VkBool32 depthClamp; VkBool32 depthBiasClamp; VkBool32 fillModeNonSolid; VkBool32 depthBounds; VkBool32 wideLines; VkBool32 largePoints; VkBool32 alphaToOne; VkBool32 multiViewport; VkBool32 samplerAnisotropy; VkBool32 textureCompressionETC2; VkBool32 textureCompressionASTC_LDR; VkBool32 textureCompressionBC; VkBool32 occlusionQueryPrecise; VkBool32 pipelineStatisticsQuery; VkBool32 vertexPipelineStoresAndAtomics; VkBool32 fragmentStoresAndAtomics; VkBool32 shaderTessellationAndGeometryPointSize; VkBool32 shaderImageGatherExtended; VkBool32 shaderStorageImageExtendedFormats; VkBool32 shaderStorageImageMultisample; VkBool32 shaderStorageImageReadWithoutFormat; VkBool32 shaderStorageImageWriteWithoutFormat; VkBool32 shaderUniformBufferArrayDynamicIndexing; VkBool32 shaderSampledImageArrayDynamicIndexing; VkBool32 shaderStorageBufferArrayDynamicIndexing; VkBool32 shaderStorageImageArrayDynamicIndexing; VkBool32 shaderClipDistance; VkBool32 shaderCullDistance; VkBool32 shaderFloat64; VkBool32 shaderInt64; VkBool32 shaderInt16; VkBool32 shaderResourceResidency; VkBool32 shaderResourceMinLod; VkBool32 sparseBinding; VkBool32 sparseResidencyBuffer; VkBool32 sparseResidencyImage2D; VkBool32 sparseResidencyImage3D; VkBool32 sparseResidency2Samples; VkBool32 sparseResidency4Samples; VkBool32 sparseResidency8Samples; VkBool32 sparseResidency16Samples; VkBool32 sparseResidencyAliased; VkBool32 variableMultisampleRate; VkBool32 inheritedQueries; } VkPhysicalDeviceFeatures; typedef struct VkPhysicalDeviceLimits { uint32_t maxImageDimension1D; uint32_t maxImageDimension2D; uint32_t maxImageDimension3D; uint32_t maxImageDimensionCube; uint32_t maxImageArrayLayers; uint32_t maxTexelBufferElements; uint32_t maxUniformBufferRange; uint32_t maxStorageBufferRange; uint32_t maxPushConstantsSize; uint32_t maxMemoryAllocationCount; uint32_t maxSamplerAllocationCount; VkDeviceSize bufferImageGranularity; VkDeviceSize sparseAddressSpaceSize; uint32_t maxBoundDescriptorSets; uint32_t maxPerStageDescriptorSamplers; uint32_t maxPerStageDescriptorUniformBuffers; uint32_t maxPerStageDescriptorStorageBuffers; uint32_t maxPerStageDescriptorSampledImages; uint32_t maxPerStageDescriptorStorageImages; uint32_t maxPerStageDescriptorInputAttachments; uint32_t maxPerStageResources; uint32_t maxDescriptorSetSamplers; uint32_t maxDescriptorSetUniformBuffers; uint32_t maxDescriptorSetUniformBuffersDynamic; uint32_t maxDescriptorSetStorageBuffers; uint32_t maxDescriptorSetStorageBuffersDynamic; uint32_t maxDescriptorSetSampledImages; uint32_t maxDescriptorSetStorageImages; uint32_t maxDescriptorSetInputAttachments; uint32_t maxVertexInputAttributes; uint32_t maxVertexInputBindings; uint32_t maxVertexInputAttributeOffset; uint32_t maxVertexInputBindingStride; uint32_t maxVertexOutputComponents; uint32_t maxTessellationGenerationLevel; uint32_t maxTessellationPatchSize; uint32_t maxTessellationControlPerVertexInputComponents; uint32_t maxTessellationControlPerVertexOutputComponents; uint32_t maxTessellationControlPerPatchOutputComponents; uint32_t maxTessellationControlTotalOutputComponents; uint32_t maxTessellationEvaluationInputComponents; uint32_t maxTessellationEvaluationOutputComponents; uint32_t maxGeometryShaderInvocations; uint32_t maxGeometryInputComponents; uint32_t maxGeometryOutputComponents; uint32_t maxGeometryOutputVertices; uint32_t maxGeometryTotalOutputComponents; uint32_t maxFragmentInputComponents; uint32_t maxFragmentOutputAttachments; uint32_t maxFragmentDualSrcAttachments; uint32_t maxFragmentCombinedOutputResources; uint32_t maxComputeSharedMemorySize; uint32_t maxComputeWorkGroupCount[3]; uint32_t maxComputeWorkGroupInvocations; uint32_t maxComputeWorkGroupSize[3]; uint32_t subPixelPrecisionBits; uint32_t subTexelPrecisionBits; uint32_t mipmapPrecisionBits; uint32_t maxDrawIndexedIndexValue; uint32_t maxDrawIndirectCount; float maxSamplerLodBias; float maxSamplerAnisotropy; uint32_t maxViewports; uint32_t maxViewportDimensions[2]; float viewportBoundsRange[2]; uint32_t viewportSubPixelBits; size_t minMemoryMapAlignment; VkDeviceSize minTexelBufferOffsetAlignment; VkDeviceSize minUniformBufferOffsetAlignment; VkDeviceSize minStorageBufferOffsetAlignment; int32_t minTexelOffset; uint32_t maxTexelOffset; int32_t minTexelGatherOffset; uint32_t maxTexelGatherOffset; float minInterpolationOffset; float maxInterpolationOffset; uint32_t subPixelInterpolationOffsetBits; uint32_t maxFramebufferWidth; uint32_t maxFramebufferHeight; uint32_t maxFramebufferLayers; VkSampleCountFlags framebufferColorSampleCounts; VkSampleCountFlags framebufferDepthSampleCounts; VkSampleCountFlags framebufferStencilSampleCounts; VkSampleCountFlags framebufferNoAttachmentsSampleCounts; uint32_t maxColorAttachments; VkSampleCountFlags sampledImageColorSampleCounts; VkSampleCountFlags sampledImageIntegerSampleCounts; VkSampleCountFlags sampledImageDepthSampleCounts; VkSampleCountFlags sampledImageStencilSampleCounts; VkSampleCountFlags storageImageSampleCounts; uint32_t maxSampleMaskWords; VkBool32 timestampComputeAndGraphics; float timestampPeriod; uint32_t maxClipDistances; uint32_t maxCullDistances; uint32_t maxCombinedClipAndCullDistances; uint32_t discreteQueuePriorities; float pointSizeRange[2]; float lineWidthRange[2]; float pointSizeGranularity; float lineWidthGranularity; VkBool32 strictLines; VkBool32 standardSampleLocations; VkDeviceSize optimalBufferCopyOffsetAlignment; VkDeviceSize optimalBufferCopyRowPitchAlignment; VkDeviceSize nonCoherentAtomSize; } VkPhysicalDeviceLimits; typedef struct VkPhysicalDeviceMemoryProperties { uint32_t memoryTypeCount; VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES]; uint32_t memoryHeapCount; VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS]; } VkPhysicalDeviceMemoryProperties; typedef struct VkPhysicalDeviceSparseProperties { VkBool32 residencyStandard2DBlockShape; VkBool32 residencyStandard2DMultisampleBlockShape; VkBool32 residencyStandard3DBlockShape; VkBool32 residencyAlignedMipSize; VkBool32 residencyNonResidentStrict; } VkPhysicalDeviceSparseProperties; typedef struct VkPhysicalDeviceProperties { uint32_t apiVersion; uint32_t driverVersion; uint32_t vendorID; uint32_t deviceID; VkPhysicalDeviceType deviceType; char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]; uint8_t pipelineCacheUUID[VK_UUID_SIZE]; VkPhysicalDeviceLimits limits; VkPhysicalDeviceSparseProperties sparseProperties; } VkPhysicalDeviceProperties; typedef struct VkQueueFamilyProperties { VkQueueFlags queueFlags; uint32_t queueCount; uint32_t timestampValidBits; VkExtent3D minImageTransferGranularity; } VkQueueFamilyProperties; typedef struct VkDeviceQueueCreateInfo { VkStructureType sType; const void* pNext; VkDeviceQueueCreateFlags flags; uint32_t queueFamilyIndex; uint32_t queueCount; const float* pQueuePriorities; } VkDeviceQueueCreateInfo; typedef struct VkDeviceCreateInfo { VkStructureType sType; const void* pNext; VkDeviceCreateFlags flags; uint32_t queueCreateInfoCount; const VkDeviceQueueCreateInfo* pQueueCreateInfos; uint32_t enabledLayerCount; const char* const* ppEnabledLayerNames; uint32_t enabledExtensionCount; const char* const* ppEnabledExtensionNames; const VkPhysicalDeviceFeatures* pEnabledFeatures; } VkDeviceCreateInfo; typedef struct VkExtensionProperties { char extensionName[VK_MAX_EXTENSION_NAME_SIZE]; uint32_t specVersion; } VkExtensionProperties; typedef struct VkLayerProperties { char layerName[VK_MAX_EXTENSION_NAME_SIZE]; uint32_t specVersion; uint32_t implementationVersion; char description[VK_MAX_DESCRIPTION_SIZE]; } VkLayerProperties; typedef struct VkSubmitInfo { VkStructureType sType; const void* pNext; uint32_t waitSemaphoreCount; const VkSemaphore* pWaitSemaphores; const VkPipelineStageFlags* pWaitDstStageMask; uint32_t commandBufferCount; const VkCommandBuffer* pCommandBuffers; uint32_t signalSemaphoreCount; const VkSemaphore* pSignalSemaphores; } VkSubmitInfo; typedef struct VkMappedMemoryRange { VkStructureType sType; const void* pNext; VkDeviceMemory memory; VkDeviceSize offset; VkDeviceSize size; } VkMappedMemoryRange; typedef struct VkMemoryAllocateInfo { VkStructureType sType; const void* pNext; VkDeviceSize allocationSize; uint32_t memoryTypeIndex; } VkMemoryAllocateInfo; typedef struct VkMemoryRequirements { VkDeviceSize size; VkDeviceSize alignment; uint32_t memoryTypeBits; } VkMemoryRequirements; typedef struct VkSparseMemoryBind { VkDeviceSize resourceOffset; VkDeviceSize size; VkDeviceMemory memory; VkDeviceSize memoryOffset; VkSparseMemoryBindFlags flags; } VkSparseMemoryBind; typedef struct VkSparseBufferMemoryBindInfo { VkBuffer buffer; uint32_t bindCount; const VkSparseMemoryBind* pBinds; } VkSparseBufferMemoryBindInfo; typedef struct VkSparseImageOpaqueMemoryBindInfo { VkImage image; uint32_t bindCount; const VkSparseMemoryBind* pBinds; } VkSparseImageOpaqueMemoryBindInfo; typedef struct VkImageSubresource { VkImageAspectFlags aspectMask; uint32_t mipLevel; uint32_t arrayLayer; } VkImageSubresource; typedef struct VkSparseImageMemoryBind { VkImageSubresource subresource; VkOffset3D offset; VkExtent3D extent; VkDeviceMemory memory; VkDeviceSize memoryOffset; VkSparseMemoryBindFlags flags; } VkSparseImageMemoryBind; typedef struct VkSparseImageMemoryBindInfo { VkImage image; uint32_t bindCount; const VkSparseImageMemoryBind* pBinds; } VkSparseImageMemoryBindInfo; typedef struct VkBindSparseInfo { VkStructureType sType; const void* pNext; uint32_t waitSemaphoreCount; const VkSemaphore* pWaitSemaphores; uint32_t bufferBindCount; const VkSparseBufferMemoryBindInfo* pBufferBinds; uint32_t imageOpaqueBindCount; const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; uint32_t imageBindCount; const VkSparseImageMemoryBindInfo* pImageBinds; uint32_t signalSemaphoreCount; const VkSemaphore* pSignalSemaphores; } VkBindSparseInfo; typedef struct VkSparseImageFormatProperties { VkImageAspectFlags aspectMask; VkExtent3D imageGranularity; VkSparseImageFormatFlags flags; } VkSparseImageFormatProperties; typedef struct VkSparseImageMemoryRequirements { VkSparseImageFormatProperties formatProperties; uint32_t imageMipTailFirstLod; VkDeviceSize imageMipTailSize; VkDeviceSize imageMipTailOffset; VkDeviceSize imageMipTailStride; } VkSparseImageMemoryRequirements; typedef struct VkFenceCreateInfo { VkStructureType sType; const void* pNext; VkFenceCreateFlags flags; } VkFenceCreateInfo; typedef struct VkSemaphoreCreateInfo { VkStructureType sType; const void* pNext; VkSemaphoreCreateFlags flags; } VkSemaphoreCreateInfo; typedef struct VkEventCreateInfo { VkStructureType sType; const void* pNext; VkEventCreateFlags flags; } VkEventCreateInfo; typedef struct VkQueryPoolCreateInfo { VkStructureType sType; const void* pNext; VkQueryPoolCreateFlags flags; VkQueryType queryType; uint32_t queryCount; VkQueryPipelineStatisticFlags pipelineStatistics; } VkQueryPoolCreateInfo; typedef struct VkBufferCreateInfo { VkStructureType sType; const void* pNext; VkBufferCreateFlags flags; VkDeviceSize size; VkBufferUsageFlags usage; VkSharingMode sharingMode; uint32_t queueFamilyIndexCount; const uint32_t* pQueueFamilyIndices; } VkBufferCreateInfo; typedef struct VkBufferViewCreateInfo { VkStructureType sType; const void* pNext; VkBufferViewCreateFlags flags; VkBuffer buffer; VkFormat format; VkDeviceSize offset; VkDeviceSize range; } VkBufferViewCreateInfo; typedef struct VkImageCreateInfo { VkStructureType sType; const void* pNext; VkImageCreateFlags flags; VkImageType imageType; VkFormat format; VkExtent3D extent; uint32_t mipLevels; uint32_t arrayLayers; VkSampleCountFlagBits samples; VkImageTiling tiling; VkImageUsageFlags usage; VkSharingMode sharingMode; uint32_t queueFamilyIndexCount; const uint32_t* pQueueFamilyIndices; VkImageLayout initialLayout; } VkImageCreateInfo; typedef struct VkSubresourceLayout { VkDeviceSize offset; VkDeviceSize size; VkDeviceSize rowPitch; VkDeviceSize arrayPitch; VkDeviceSize depthPitch; } VkSubresourceLayout; typedef struct VkComponentMapping { VkComponentSwizzle r; VkComponentSwizzle g; VkComponentSwizzle b; VkComponentSwizzle a; } VkComponentMapping; typedef struct VkImageViewCreateInfo { VkStructureType sType; const void* pNext; VkImageViewCreateFlags flags; VkImage image; VkImageViewType viewType; VkFormat format; VkComponentMapping components; VkImageSubresourceRange subresourceRange; } VkImageViewCreateInfo; typedef struct VkShaderModuleCreateInfo { VkStructureType sType; const void* pNext; VkShaderModuleCreateFlags flags; size_t codeSize; const uint32_t* pCode; } VkShaderModuleCreateInfo; typedef struct VkPipelineCacheCreateInfo { VkStructureType sType; const void* pNext; VkPipelineCacheCreateFlags flags; size_t initialDataSize; const void* pInitialData; } VkPipelineCacheCreateInfo; typedef struct VkSpecializationMapEntry { uint32_t constantID; uint32_t offset; size_t size; } VkSpecializationMapEntry; typedef struct VkSpecializationInfo { uint32_t mapEntryCount; const VkSpecializationMapEntry* pMapEntries; size_t dataSize; const void* pData; } VkSpecializationInfo; typedef struct VkPipelineShaderStageCreateInfo { VkStructureType sType; const void* pNext; VkPipelineShaderStageCreateFlags flags; VkShaderStageFlagBits stage; VkShaderModule module; const char* pName; const VkSpecializationInfo* pSpecializationInfo; } VkPipelineShaderStageCreateInfo; typedef struct VkComputePipelineCreateInfo { VkStructureType sType; const void* pNext; VkPipelineCreateFlags flags; VkPipelineShaderStageCreateInfo stage; VkPipelineLayout layout; VkPipeline basePipelineHandle; int32_t basePipelineIndex; } VkComputePipelineCreateInfo; typedef struct VkVertexInputBindingDescription { uint32_t binding; uint32_t stride; VkVertexInputRate inputRate; } VkVertexInputBindingDescription; typedef struct VkVertexInputAttributeDescription { uint32_t location; uint32_t binding; VkFormat format; uint32_t offset; } VkVertexInputAttributeDescription; typedef struct VkPipelineVertexInputStateCreateInfo { VkStructureType sType; const void* pNext; VkPipelineVertexInputStateCreateFlags flags; uint32_t vertexBindingDescriptionCount; const VkVertexInputBindingDescription* pVertexBindingDescriptions; uint32_t vertexAttributeDescriptionCount; const VkVertexInputAttributeDescription* pVertexAttributeDescriptions; } VkPipelineVertexInputStateCreateInfo; typedef struct VkPipelineInputAssemblyStateCreateInfo { VkStructureType sType; const void* pNext; VkPipelineInputAssemblyStateCreateFlags flags; VkPrimitiveTopology topology; VkBool32 primitiveRestartEnable; } VkPipelineInputAssemblyStateCreateInfo; typedef struct VkPipelineTessellationStateCreateInfo { VkStructureType sType; const void* pNext; VkPipelineTessellationStateCreateFlags flags; uint32_t patchControlPoints; } VkPipelineTessellationStateCreateInfo; typedef struct VkViewport { float x; float y; float width; float height; float minDepth; float maxDepth; } VkViewport; typedef struct VkPipelineViewportStateCreateInfo { VkStructureType sType; const void* pNext; VkPipelineViewportStateCreateFlags flags; uint32_t viewportCount; const VkViewport* pViewports; uint32_t scissorCount; const VkRect2D* pScissors; } VkPipelineViewportStateCreateInfo; typedef struct VkPipelineRasterizationStateCreateInfo { VkStructureType sType; const void* pNext; VkPipelineRasterizationStateCreateFlags flags; VkBool32 depthClampEnable; VkBool32 rasterizerDiscardEnable; VkPolygonMode polygonMode; VkCullModeFlags cullMode; VkFrontFace frontFace; VkBool32 depthBiasEnable; float depthBiasConstantFactor; float depthBiasClamp; float depthBiasSlopeFactor; float lineWidth; } VkPipelineRasterizationStateCreateInfo; typedef struct VkPipelineMultisampleStateCreateInfo { VkStructureType sType; const void* pNext; VkPipelineMultisampleStateCreateFlags flags; VkSampleCountFlagBits rasterizationSamples; VkBool32 sampleShadingEnable; float minSampleShading; const VkSampleMask* pSampleMask; VkBool32 alphaToCoverageEnable; VkBool32 alphaToOneEnable; } VkPipelineMultisampleStateCreateInfo; typedef struct VkStencilOpState { VkStencilOp failOp; VkStencilOp passOp; VkStencilOp depthFailOp; VkCompareOp compareOp; uint32_t compareMask; uint32_t writeMask; uint32_t reference; } VkStencilOpState; typedef struct VkPipelineDepthStencilStateCreateInfo { VkStructureType sType; const void* pNext; VkPipelineDepthStencilStateCreateFlags flags; VkBool32 depthTestEnable; VkBool32 depthWriteEnable; VkCompareOp depthCompareOp; VkBool32 depthBoundsTestEnable; VkBool32 stencilTestEnable; VkStencilOpState front; VkStencilOpState back; float minDepthBounds; float maxDepthBounds; } VkPipelineDepthStencilStateCreateInfo; typedef struct VkPipelineColorBlendAttachmentState { VkBool32 blendEnable; VkBlendFactor srcColorBlendFactor; VkBlendFactor dstColorBlendFactor; VkBlendOp colorBlendOp; VkBlendFactor srcAlphaBlendFactor; VkBlendFactor dstAlphaBlendFactor; VkBlendOp alphaBlendOp; VkColorComponentFlags colorWriteMask; } VkPipelineColorBlendAttachmentState; typedef struct VkPipelineColorBlendStateCreateInfo { VkStructureType sType; const void* pNext; VkPipelineColorBlendStateCreateFlags flags; VkBool32 logicOpEnable; VkLogicOp logicOp; uint32_t attachmentCount; const VkPipelineColorBlendAttachmentState* pAttachments; float blendConstants[4]; } VkPipelineColorBlendStateCreateInfo; typedef struct VkPipelineDynamicStateCreateInfo { VkStructureType sType; const void* pNext; VkPipelineDynamicStateCreateFlags flags; uint32_t dynamicStateCount; const VkDynamicState* pDynamicStates; } VkPipelineDynamicStateCreateInfo; typedef struct VkGraphicsPipelineCreateInfo { VkStructureType sType; const void* pNext; VkPipelineCreateFlags flags; uint32_t stageCount; const VkPipelineShaderStageCreateInfo* pStages; const VkPipelineVertexInputStateCreateInfo* pVertexInputState; const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState; const VkPipelineTessellationStateCreateInfo* pTessellationState; const VkPipelineViewportStateCreateInfo* pViewportState; const VkPipelineRasterizationStateCreateInfo* pRasterizationState; const VkPipelineMultisampleStateCreateInfo* pMultisampleState; const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState; const VkPipelineColorBlendStateCreateInfo* pColorBlendState; const VkPipelineDynamicStateCreateInfo* pDynamicState; VkPipelineLayout layout; VkRenderPass renderPass; uint32_t subpass; VkPipeline basePipelineHandle; int32_t basePipelineIndex; } VkGraphicsPipelineCreateInfo; typedef struct VkPushConstantRange { VkShaderStageFlags stageFlags; uint32_t offset; uint32_t size; } VkPushConstantRange; typedef struct VkPipelineLayoutCreateInfo { VkStructureType sType; const void* pNext; VkPipelineLayoutCreateFlags flags; uint32_t setLayoutCount; const VkDescriptorSetLayout* pSetLayouts; uint32_t pushConstantRangeCount; const VkPushConstantRange* pPushConstantRanges; } VkPipelineLayoutCreateInfo; typedef struct VkSamplerCreateInfo { VkStructureType sType; const void* pNext; VkSamplerCreateFlags flags; VkFilter magFilter; VkFilter minFilter; VkSamplerMipmapMode mipmapMode; VkSamplerAddressMode addressModeU; VkSamplerAddressMode addressModeV; VkSamplerAddressMode addressModeW; float mipLodBias; VkBool32 anisotropyEnable; float maxAnisotropy; VkBool32 compareEnable; VkCompareOp compareOp; float minLod; float maxLod; VkBorderColor borderColor; VkBool32 unnormalizedCoordinates; } VkSamplerCreateInfo; typedef struct VkCopyDescriptorSet { VkStructureType sType; const void* pNext; VkDescriptorSet srcSet; uint32_t srcBinding; uint32_t srcArrayElement; VkDescriptorSet dstSet; uint32_t dstBinding; uint32_t dstArrayElement; uint32_t descriptorCount; } VkCopyDescriptorSet; typedef struct VkDescriptorBufferInfo { VkBuffer buffer; VkDeviceSize offset; VkDeviceSize range; } VkDescriptorBufferInfo; typedef struct VkDescriptorImageInfo { VkSampler sampler; VkImageView imageView; VkImageLayout imageLayout; } VkDescriptorImageInfo; typedef struct VkDescriptorPoolSize { VkDescriptorType type; uint32_t descriptorCount; } VkDescriptorPoolSize; typedef struct VkDescriptorPoolCreateInfo { VkStructureType sType; const void* pNext; VkDescriptorPoolCreateFlags flags; uint32_t maxSets; uint32_t poolSizeCount; const VkDescriptorPoolSize* pPoolSizes; } VkDescriptorPoolCreateInfo; typedef struct VkDescriptorSetAllocateInfo { VkStructureType sType; const void* pNext; VkDescriptorPool descriptorPool; uint32_t descriptorSetCount; const VkDescriptorSetLayout* pSetLayouts; } VkDescriptorSetAllocateInfo; typedef struct VkDescriptorSetLayoutBinding { uint32_t binding; VkDescriptorType descriptorType; uint32_t descriptorCount; VkShaderStageFlags stageFlags; const VkSampler* pImmutableSamplers; } VkDescriptorSetLayoutBinding; typedef struct VkDescriptorSetLayoutCreateInfo { VkStructureType sType; const void* pNext; VkDescriptorSetLayoutCreateFlags flags; uint32_t bindingCount; const VkDescriptorSetLayoutBinding* pBindings; } VkDescriptorSetLayoutCreateInfo; typedef struct VkWriteDescriptorSet { VkStructureType sType; const void* pNext; VkDescriptorSet dstSet; uint32_t dstBinding; uint32_t dstArrayElement; uint32_t descriptorCount; VkDescriptorType descriptorType; const VkDescriptorImageInfo* pImageInfo; const VkDescriptorBufferInfo* pBufferInfo; const VkBufferView* pTexelBufferView; } VkWriteDescriptorSet; typedef struct VkAttachmentDescription { VkAttachmentDescriptionFlags flags; VkFormat format; VkSampleCountFlagBits samples; VkAttachmentLoadOp loadOp; VkAttachmentStoreOp storeOp; VkAttachmentLoadOp stencilLoadOp; VkAttachmentStoreOp stencilStoreOp; VkImageLayout initialLayout; VkImageLayout finalLayout; } VkAttachmentDescription; typedef struct VkAttachmentReference { uint32_t attachment; VkImageLayout layout; } VkAttachmentReference; typedef struct VkFramebufferCreateInfo { VkStructureType sType; const void* pNext; VkFramebufferCreateFlags flags; VkRenderPass renderPass; uint32_t attachmentCount; const VkImageView* pAttachments; uint32_t width; uint32_t height; uint32_t layers; } VkFramebufferCreateInfo; typedef struct VkSubpassDescription { VkSubpassDescriptionFlags flags; VkPipelineBindPoint pipelineBindPoint; uint32_t inputAttachmentCount; const VkAttachmentReference* pInputAttachments; uint32_t colorAttachmentCount; const VkAttachmentReference* pColorAttachments; const VkAttachmentReference* pResolveAttachments; const VkAttachmentReference* pDepthStencilAttachment; uint32_t preserveAttachmentCount; const uint32_t* pPreserveAttachments; } VkSubpassDescription; typedef struct VkSubpassDependency { uint32_t srcSubpass; uint32_t dstSubpass; VkPipelineStageFlags srcStageMask; VkPipelineStageFlags dstStageMask; VkAccessFlags srcAccessMask; VkAccessFlags dstAccessMask; VkDependencyFlags dependencyFlags; } VkSubpassDependency; typedef struct VkRenderPassCreateInfo { VkStructureType sType; const void* pNext; VkRenderPassCreateFlags flags; uint32_t attachmentCount; const VkAttachmentDescription* pAttachments; uint32_t subpassCount; const VkSubpassDescription* pSubpasses; uint32_t dependencyCount; const VkSubpassDependency* pDependencies; } VkRenderPassCreateInfo; typedef struct VkCommandPoolCreateInfo { VkStructureType sType; const void* pNext; VkCommandPoolCreateFlags flags; uint32_t queueFamilyIndex; } VkCommandPoolCreateInfo; typedef struct VkCommandBufferAllocateInfo { VkStructureType sType; const void* pNext; VkCommandPool commandPool; VkCommandBufferLevel level; uint32_t commandBufferCount; } VkCommandBufferAllocateInfo; typedef struct VkCommandBufferInheritanceInfo { VkStructureType sType; const void* pNext; VkRenderPass renderPass; uint32_t subpass; VkFramebuffer framebuffer; VkBool32 occlusionQueryEnable; VkQueryControlFlags queryFlags; VkQueryPipelineStatisticFlags pipelineStatistics; } VkCommandBufferInheritanceInfo; typedef struct VkCommandBufferBeginInfo { VkStructureType sType; const void* pNext; VkCommandBufferUsageFlags flags; const VkCommandBufferInheritanceInfo* pInheritanceInfo; } VkCommandBufferBeginInfo; typedef struct VkBufferCopy { VkDeviceSize srcOffset; VkDeviceSize dstOffset; VkDeviceSize size; } VkBufferCopy; typedef struct VkImageSubresourceLayers { VkImageAspectFlags aspectMask; uint32_t mipLevel; uint32_t baseArrayLayer; uint32_t layerCount; } VkImageSubresourceLayers; typedef struct VkBufferImageCopy { VkDeviceSize bufferOffset; uint32_t bufferRowLength; uint32_t bufferImageHeight; VkImageSubresourceLayers imageSubresource; VkOffset3D imageOffset; VkExtent3D imageExtent; } VkBufferImageCopy; typedef union VkClearColorValue { float float32[4]; int32_t int32[4]; uint32_t uint32[4]; } VkClearColorValue; typedef struct VkClearDepthStencilValue { float depth; uint32_t stencil; } VkClearDepthStencilValue; typedef union VkClearValue { VkClearColorValue color; VkClearDepthStencilValue depthStencil; } VkClearValue; typedef struct VkClearAttachment { VkImageAspectFlags aspectMask; uint32_t colorAttachment; VkClearValue clearValue; } VkClearAttachment; typedef struct VkClearRect { VkRect2D rect; uint32_t baseArrayLayer; uint32_t layerCount; } VkClearRect; typedef struct VkImageBlit { VkImageSubresourceLayers srcSubresource; VkOffset3D srcOffsets[2]; VkImageSubresourceLayers dstSubresource; VkOffset3D dstOffsets[2]; } VkImageBlit; typedef struct VkImageCopy { VkImageSubresourceLayers srcSubresource; VkOffset3D srcOffset; VkImageSubresourceLayers dstSubresource; VkOffset3D dstOffset; VkExtent3D extent; } VkImageCopy; typedef struct VkImageResolve { VkImageSubresourceLayers srcSubresource; VkOffset3D srcOffset; VkImageSubresourceLayers dstSubresource; VkOffset3D dstOffset; VkExtent3D extent; } VkImageResolve; typedef struct VkRenderPassBeginInfo { VkStructureType sType; const void* pNext; VkRenderPass renderPass; VkFramebuffer framebuffer; VkRect2D renderArea; uint32_t clearValueCount; const VkClearValue* pClearValues; } VkRenderPassBeginInfo; typedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance); typedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties); typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties); typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName); typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName); typedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice); typedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties); typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties); typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue); typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence); typedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue); typedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device); typedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory); typedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData); typedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory); typedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); typedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); typedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes); typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset); typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset); typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements); typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements); typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties); typedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence); typedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); typedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences); typedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence); typedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout); typedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore); typedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent); typedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event); typedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event); typedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event); typedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool); typedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags); typedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer); typedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView); typedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage); typedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator); typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout); typedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView); typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule); typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache); typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData); typedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches); typedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); typedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); typedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout); typedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler); typedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout); typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool); typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags); typedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets); typedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets); typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies); typedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer); typedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); typedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator); typedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity); typedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool); typedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags); typedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers); typedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); typedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo); typedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer); typedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags); typedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline); typedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports); typedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors); typedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth); typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor); typedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]); typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds); typedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask); typedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask); typedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference); typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets); typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType); typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets); typedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance); typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); typedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); typedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset); typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions); typedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions); typedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter); typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions); typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions); typedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData); typedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data); typedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); typedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); typedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects); typedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions); typedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); typedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); typedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags); typedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query); typedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query); typedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags); typedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues); typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents); typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents); typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer); typedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance( const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance); VKAPI_ATTR void VKAPI_CALL vkDestroyInstance( VkInstance instance, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices( VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures( VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties( VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties); VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties( VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties( VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties( VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties( VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties); VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr( VkInstance instance, const char* pName); VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr( VkDevice device, const char* pName); VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice( VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice); VKAPI_ATTR void VKAPI_CALL vkDestroyDevice( VkDevice device, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties( VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties( uint32_t* pPropertyCount, VkLayerProperties* pProperties); VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties); VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue( VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue); VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit( VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence); VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle( VkQueue queue); VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle( VkDevice device); VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory( VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory); VKAPI_ATTR void VKAPI_CALL vkFreeMemory( VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory( VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData); VKAPI_ATTR void VKAPI_CALL vkUnmapMemory( VkDevice device, VkDeviceMemory memory); VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges( VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges( VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment( VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes); VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory( VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset); VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory( VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset); VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements( VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements); VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements( VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements); VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements( VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties( VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties); VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse( VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence); VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence( VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); VKAPI_ATTR void VKAPI_CALL vkDestroyFence( VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkResetFences( VkDevice device, uint32_t fenceCount, const VkFence* pFences); VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus( VkDevice device, VkFence fence); VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences( VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout); VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore( VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore); VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore( VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent( VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent); VKAPI_ATTR void VKAPI_CALL vkDestroyEvent( VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus( VkDevice device, VkEvent event); VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent( VkDevice device, VkEvent event); VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent( VkDevice device, VkEvent event); VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool( VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool); VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool( VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults( VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags); VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer( VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer); VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer( VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView( VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView); VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView( VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage( VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage); VKAPI_ATTR void VKAPI_CALL vkDestroyImage( VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout( VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout); VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView( VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView); VKAPI_ATTR void VKAPI_CALL vkDestroyImageView( VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule( VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule); VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule( VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache( VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache); VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache( VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData( VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData); VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches( VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches); VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines( VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines( VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline( VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout( VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout); VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout( VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler( VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler); VKAPI_ATTR void VKAPI_CALL vkDestroySampler( VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout( VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout); VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout( VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool( VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool); VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool( VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool( VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags); VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets( VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets); VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets( VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets); VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets( VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies); VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer( VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer); VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer( VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass( VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass( VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity( VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity); VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool( VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool); VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool( VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool( VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags); VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers( VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers); VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers( VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer( VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo); VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer( VkCommandBuffer commandBuffer); VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer( VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags); VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline( VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline); VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport( VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports); VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor( VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors); VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth( VkCommandBuffer commandBuffer, float lineWidth); VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias( VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor); VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants( VkCommandBuffer commandBuffer, const float blendConstants[4]); VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds( VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds); VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask( VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask); VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask( VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask); VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference( VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference); VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets( VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets); VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType); VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers( VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets); VKAPI_ATTR void VKAPI_CALL vkCmdDraw( VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed( VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance); VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); VKAPI_ATTR void VKAPI_CALL vkCmdDispatch( VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset); VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer( VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions); VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage( VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions); VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage( VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter); VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage( VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions); VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer( VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions); VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer( VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData); VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer( VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data); VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage( VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage( VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments( VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects); VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage( VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions); VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent( VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent( VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents( VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier( VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags); VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query); VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp( VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query); VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags); VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants( VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues); VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass( VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents); VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass( VkCommandBuffer commandBuffer, VkSubpassContents contents); VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass( VkCommandBuffer commandBuffer); VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands( VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); #endif #define VK_VERSION_1_1 1 // Vulkan 1.1 version number #define VK_API_VERSION_1_1 VK_MAKE_API_VERSION(0, 1, 1, 0)// Patch version should always be set to 0 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSamplerYcbcrConversion) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplate) #define VK_MAX_DEVICE_GROUP_SIZE 32U #define VK_LUID_SIZE 8U #define VK_QUEUE_FAMILY_EXTERNAL (~1U) typedef enum VkPointClippingBehavior { VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES = 0, VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY = 1, VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY_KHR = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY, VK_POINT_CLIPPING_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF } VkPointClippingBehavior; typedef enum VkTessellationDomainOrigin { VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT = 0, VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT = 1, VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT, VK_TESSELLATION_DOMAIN_ORIGIN_MAX_ENUM = 0x7FFFFFFF } VkTessellationDomainOrigin; typedef enum VkSamplerYcbcrModelConversion { VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0, VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY = 1, VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709 = 2, VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601 = 3, VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 = 4, VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709, VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601, VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020, VK_SAMPLER_YCBCR_MODEL_CONVERSION_MAX_ENUM = 0x7FFFFFFF } VkSamplerYcbcrModelConversion; typedef enum VkSamplerYcbcrRange { VK_SAMPLER_YCBCR_RANGE_ITU_FULL = 0, VK_SAMPLER_YCBCR_RANGE_ITU_NARROW = 1, VK_SAMPLER_YCBCR_RANGE_ITU_FULL_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, VK_SAMPLER_YCBCR_RANGE_ITU_NARROW_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW, VK_SAMPLER_YCBCR_RANGE_MAX_ENUM = 0x7FFFFFFF } VkSamplerYcbcrRange; typedef enum VkChromaLocation { VK_CHROMA_LOCATION_COSITED_EVEN = 0, VK_CHROMA_LOCATION_MIDPOINT = 1, VK_CHROMA_LOCATION_COSITED_EVEN_KHR = VK_CHROMA_LOCATION_COSITED_EVEN, VK_CHROMA_LOCATION_MIDPOINT_KHR = VK_CHROMA_LOCATION_MIDPOINT, VK_CHROMA_LOCATION_MAX_ENUM = 0x7FFFFFFF } VkChromaLocation; typedef enum VkDescriptorUpdateTemplateType { VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET = 0, VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = 1, VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_MAX_ENUM = 0x7FFFFFFF } VkDescriptorUpdateTemplateType; typedef enum VkSubgroupFeatureFlagBits { VK_SUBGROUP_FEATURE_BASIC_BIT = 0x00000001, VK_SUBGROUP_FEATURE_VOTE_BIT = 0x00000002, VK_SUBGROUP_FEATURE_ARITHMETIC_BIT = 0x00000004, VK_SUBGROUP_FEATURE_BALLOT_BIT = 0x00000008, VK_SUBGROUP_FEATURE_SHUFFLE_BIT = 0x00000010, VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 0x00000020, VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 0x00000040, VK_SUBGROUP_FEATURE_QUAD_BIT = 0x00000080, VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV = 0x00000100, VK_SUBGROUP_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkSubgroupFeatureFlagBits; typedef VkFlags VkSubgroupFeatureFlags; typedef enum VkPeerMemoryFeatureFlagBits { VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT = 0x00000001, VK_PEER_MEMORY_FEATURE_COPY_DST_BIT = 0x00000002, VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT = 0x00000004, VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT = 0x00000008, VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT, VK_PEER_MEMORY_FEATURE_COPY_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT, VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT, VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT, VK_PEER_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkPeerMemoryFeatureFlagBits; typedef VkFlags VkPeerMemoryFeatureFlags; typedef enum VkMemoryAllocateFlagBits { VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT = 0x00000001, VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT = 0x00000002, VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000004, VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT, VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, VK_MEMORY_ALLOCATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkMemoryAllocateFlagBits; typedef VkFlags VkMemoryAllocateFlags; typedef VkFlags VkCommandPoolTrimFlags; typedef VkFlags VkDescriptorUpdateTemplateCreateFlags; typedef enum VkExternalMemoryHandleTypeFlagBits { VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT = 0x00000008, VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT = 0x00000010, VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT = 0x00000020, VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT = 0x00000040, VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT = 0x00000200, VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID = 0x00000400, VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT = 0x00000080, VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT = 0x00000100, VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA = 0x00000800, VK_EXTERNAL_MEMORY_HANDLE_TYPE_RDMA_ADDRESS_BIT_NV = 0x00001000, VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT, VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT, VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT, VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT, VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT, VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT, VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkExternalMemoryHandleTypeFlagBits; typedef VkFlags VkExternalMemoryHandleTypeFlags; typedef enum VkExternalMemoryFeatureFlagBits { VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT = 0x00000001, VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT = 0x00000002, VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT = 0x00000004, VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT, VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT, VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT, VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkExternalMemoryFeatureFlagBits; typedef VkFlags VkExternalMemoryFeatureFlags; typedef enum VkExternalFenceHandleTypeFlagBits { VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000008, VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT, VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT, VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT, VK_EXTERNAL_FENCE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkExternalFenceHandleTypeFlagBits; typedef VkFlags VkExternalFenceHandleTypeFlags; typedef enum VkExternalFenceFeatureFlagBits { VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT = 0x00000001, VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT = 0x00000002, VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT, VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT, VK_EXTERNAL_FENCE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkExternalFenceFeatureFlagBits; typedef VkFlags VkExternalFenceFeatureFlags; typedef enum VkFenceImportFlagBits { VK_FENCE_IMPORT_TEMPORARY_BIT = 0x00000001, VK_FENCE_IMPORT_TEMPORARY_BIT_KHR = VK_FENCE_IMPORT_TEMPORARY_BIT, VK_FENCE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkFenceImportFlagBits; typedef VkFlags VkFenceImportFlags; typedef enum VkSemaphoreImportFlagBits { VK_SEMAPHORE_IMPORT_TEMPORARY_BIT = 0x00000001, VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT, VK_SEMAPHORE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkSemaphoreImportFlagBits; typedef VkFlags VkSemaphoreImportFlags; typedef enum VkExternalSemaphoreHandleTypeFlagBits { VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT = 0x00000008, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000010, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA = 0x00000080, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE_BIT = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkExternalSemaphoreHandleTypeFlagBits; typedef VkFlags VkExternalSemaphoreHandleTypeFlags; typedef enum VkExternalSemaphoreFeatureFlagBits { VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT = 0x00000001, VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT = 0x00000002, VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT, VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT, VK_EXTERNAL_SEMAPHORE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkExternalSemaphoreFeatureFlagBits; typedef VkFlags VkExternalSemaphoreFeatureFlags; typedef struct VkPhysicalDeviceSubgroupProperties { VkStructureType sType; void* pNext; uint32_t subgroupSize; VkShaderStageFlags supportedStages; VkSubgroupFeatureFlags supportedOperations; VkBool32 quadOperationsInAllStages; } VkPhysicalDeviceSubgroupProperties; typedef struct VkBindBufferMemoryInfo { VkStructureType sType; const void* pNext; VkBuffer buffer; VkDeviceMemory memory; VkDeviceSize memoryOffset; } VkBindBufferMemoryInfo; typedef struct VkBindImageMemoryInfo { VkStructureType sType; const void* pNext; VkImage image; VkDeviceMemory memory; VkDeviceSize memoryOffset; } VkBindImageMemoryInfo; typedef struct VkPhysicalDevice16BitStorageFeatures { VkStructureType sType; void* pNext; VkBool32 storageBuffer16BitAccess; VkBool32 uniformAndStorageBuffer16BitAccess; VkBool32 storagePushConstant16; VkBool32 storageInputOutput16; } VkPhysicalDevice16BitStorageFeatures; typedef struct VkMemoryDedicatedRequirements { VkStructureType sType; void* pNext; VkBool32 prefersDedicatedAllocation; VkBool32 requiresDedicatedAllocation; } VkMemoryDedicatedRequirements; typedef struct VkMemoryDedicatedAllocateInfo { VkStructureType sType; const void* pNext; VkImage image; VkBuffer buffer; } VkMemoryDedicatedAllocateInfo; typedef struct VkMemoryAllocateFlagsInfo { VkStructureType sType; const void* pNext; VkMemoryAllocateFlags flags; uint32_t deviceMask; } VkMemoryAllocateFlagsInfo; typedef struct VkDeviceGroupRenderPassBeginInfo { VkStructureType sType; const void* pNext; uint32_t deviceMask; uint32_t deviceRenderAreaCount; const VkRect2D* pDeviceRenderAreas; } VkDeviceGroupRenderPassBeginInfo; typedef struct VkDeviceGroupCommandBufferBeginInfo { VkStructureType sType; const void* pNext; uint32_t deviceMask; } VkDeviceGroupCommandBufferBeginInfo; typedef struct VkDeviceGroupSubmitInfo { VkStructureType sType; const void* pNext; uint32_t waitSemaphoreCount; const uint32_t* pWaitSemaphoreDeviceIndices; uint32_t commandBufferCount; const uint32_t* pCommandBufferDeviceMasks; uint32_t signalSemaphoreCount; const uint32_t* pSignalSemaphoreDeviceIndices; } VkDeviceGroupSubmitInfo; typedef struct VkDeviceGroupBindSparseInfo { VkStructureType sType; const void* pNext; uint32_t resourceDeviceIndex; uint32_t memoryDeviceIndex; } VkDeviceGroupBindSparseInfo; typedef struct VkBindBufferMemoryDeviceGroupInfo { VkStructureType sType; const void* pNext; uint32_t deviceIndexCount; const uint32_t* pDeviceIndices; } VkBindBufferMemoryDeviceGroupInfo; typedef struct VkBindImageMemoryDeviceGroupInfo { VkStructureType sType; const void* pNext; uint32_t deviceIndexCount; const uint32_t* pDeviceIndices; uint32_t splitInstanceBindRegionCount; const VkRect2D* pSplitInstanceBindRegions; } VkBindImageMemoryDeviceGroupInfo; typedef struct VkPhysicalDeviceGroupProperties { VkStructureType sType; void* pNext; uint32_t physicalDeviceCount; VkPhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE]; VkBool32 subsetAllocation; } VkPhysicalDeviceGroupProperties; typedef struct VkDeviceGroupDeviceCreateInfo { VkStructureType sType; const void* pNext; uint32_t physicalDeviceCount; const VkPhysicalDevice* pPhysicalDevices; } VkDeviceGroupDeviceCreateInfo; typedef struct VkBufferMemoryRequirementsInfo2 { VkStructureType sType; const void* pNext; VkBuffer buffer; } VkBufferMemoryRequirementsInfo2; typedef struct VkImageMemoryRequirementsInfo2 { VkStructureType sType; const void* pNext; VkImage image; } VkImageMemoryRequirementsInfo2; typedef struct VkImageSparseMemoryRequirementsInfo2 { VkStructureType sType; const void* pNext; VkImage image; } VkImageSparseMemoryRequirementsInfo2; typedef struct VkMemoryRequirements2 { VkStructureType sType; void* pNext; VkMemoryRequirements memoryRequirements; } VkMemoryRequirements2; typedef struct VkSparseImageMemoryRequirements2 { VkStructureType sType; void* pNext; VkSparseImageMemoryRequirements memoryRequirements; } VkSparseImageMemoryRequirements2; typedef struct VkPhysicalDeviceFeatures2 { VkStructureType sType; void* pNext; VkPhysicalDeviceFeatures features; } VkPhysicalDeviceFeatures2; typedef struct VkPhysicalDeviceProperties2 { VkStructureType sType; void* pNext; VkPhysicalDeviceProperties properties; } VkPhysicalDeviceProperties2; typedef struct VkFormatProperties2 { VkStructureType sType; void* pNext; VkFormatProperties formatProperties; } VkFormatProperties2; typedef struct VkImageFormatProperties2 { VkStructureType sType; void* pNext; VkImageFormatProperties imageFormatProperties; } VkImageFormatProperties2; typedef struct VkPhysicalDeviceImageFormatInfo2 { VkStructureType sType; const void* pNext; VkFormat format; VkImageType type; VkImageTiling tiling; VkImageUsageFlags usage; VkImageCreateFlags flags; } VkPhysicalDeviceImageFormatInfo2; typedef struct VkQueueFamilyProperties2 { VkStructureType sType; void* pNext; VkQueueFamilyProperties queueFamilyProperties; } VkQueueFamilyProperties2; typedef struct VkPhysicalDeviceMemoryProperties2 { VkStructureType sType; void* pNext; VkPhysicalDeviceMemoryProperties memoryProperties; } VkPhysicalDeviceMemoryProperties2; typedef struct VkSparseImageFormatProperties2 { VkStructureType sType; void* pNext; VkSparseImageFormatProperties properties; } VkSparseImageFormatProperties2; typedef struct VkPhysicalDeviceSparseImageFormatInfo2 { VkStructureType sType; const void* pNext; VkFormat format; VkImageType type; VkSampleCountFlagBits samples; VkImageUsageFlags usage; VkImageTiling tiling; } VkPhysicalDeviceSparseImageFormatInfo2; typedef struct VkPhysicalDevicePointClippingProperties { VkStructureType sType; void* pNext; VkPointClippingBehavior pointClippingBehavior; } VkPhysicalDevicePointClippingProperties; typedef struct VkInputAttachmentAspectReference { uint32_t subpass; uint32_t inputAttachmentIndex; VkImageAspectFlags aspectMask; } VkInputAttachmentAspectReference; typedef struct VkRenderPassInputAttachmentAspectCreateInfo { VkStructureType sType; const void* pNext; uint32_t aspectReferenceCount; const VkInputAttachmentAspectReference* pAspectReferences; } VkRenderPassInputAttachmentAspectCreateInfo; typedef struct VkImageViewUsageCreateInfo { VkStructureType sType; const void* pNext; VkImageUsageFlags usage; } VkImageViewUsageCreateInfo; typedef struct VkPipelineTessellationDomainOriginStateCreateInfo { VkStructureType sType; const void* pNext; VkTessellationDomainOrigin domainOrigin; } VkPipelineTessellationDomainOriginStateCreateInfo; typedef struct VkRenderPassMultiviewCreateInfo { VkStructureType sType; const void* pNext; uint32_t subpassCount; const uint32_t* pViewMasks; uint32_t dependencyCount; const int32_t* pViewOffsets; uint32_t correlationMaskCount; const uint32_t* pCorrelationMasks; } VkRenderPassMultiviewCreateInfo; typedef struct VkPhysicalDeviceMultiviewFeatures { VkStructureType sType; void* pNext; VkBool32 multiview; VkBool32 multiviewGeometryShader; VkBool32 multiviewTessellationShader; } VkPhysicalDeviceMultiviewFeatures; typedef struct VkPhysicalDeviceMultiviewProperties { VkStructureType sType; void* pNext; uint32_t maxMultiviewViewCount; uint32_t maxMultiviewInstanceIndex; } VkPhysicalDeviceMultiviewProperties; typedef struct VkPhysicalDeviceVariablePointersFeatures { VkStructureType sType; void* pNext; VkBool32 variablePointersStorageBuffer; VkBool32 variablePointers; } VkPhysicalDeviceVariablePointersFeatures; typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeatures; typedef struct VkPhysicalDeviceProtectedMemoryFeatures { VkStructureType sType; void* pNext; VkBool32 protectedMemory; } VkPhysicalDeviceProtectedMemoryFeatures; typedef struct VkPhysicalDeviceProtectedMemoryProperties { VkStructureType sType; void* pNext; VkBool32 protectedNoFault; } VkPhysicalDeviceProtectedMemoryProperties; typedef struct VkDeviceQueueInfo2 { VkStructureType sType; const void* pNext; VkDeviceQueueCreateFlags flags; uint32_t queueFamilyIndex; uint32_t queueIndex; } VkDeviceQueueInfo2; typedef struct VkProtectedSubmitInfo { VkStructureType sType; const void* pNext; VkBool32 protectedSubmit; } VkProtectedSubmitInfo; typedef struct VkSamplerYcbcrConversionCreateInfo { VkStructureType sType; const void* pNext; VkFormat format; VkSamplerYcbcrModelConversion ycbcrModel; VkSamplerYcbcrRange ycbcrRange; VkComponentMapping components; VkChromaLocation xChromaOffset; VkChromaLocation yChromaOffset; VkFilter chromaFilter; VkBool32 forceExplicitReconstruction; } VkSamplerYcbcrConversionCreateInfo; typedef struct VkSamplerYcbcrConversionInfo { VkStructureType sType; const void* pNext; VkSamplerYcbcrConversion conversion; } VkSamplerYcbcrConversionInfo; typedef struct VkBindImagePlaneMemoryInfo { VkStructureType sType; const void* pNext; VkImageAspectFlagBits planeAspect; } VkBindImagePlaneMemoryInfo; typedef struct VkImagePlaneMemoryRequirementsInfo { VkStructureType sType; const void* pNext; VkImageAspectFlagBits planeAspect; } VkImagePlaneMemoryRequirementsInfo; typedef struct VkPhysicalDeviceSamplerYcbcrConversionFeatures { VkStructureType sType; void* pNext; VkBool32 samplerYcbcrConversion; } VkPhysicalDeviceSamplerYcbcrConversionFeatures; typedef struct VkSamplerYcbcrConversionImageFormatProperties { VkStructureType sType; void* pNext; uint32_t combinedImageSamplerDescriptorCount; } VkSamplerYcbcrConversionImageFormatProperties; typedef struct VkDescriptorUpdateTemplateEntry { uint32_t dstBinding; uint32_t dstArrayElement; uint32_t descriptorCount; VkDescriptorType descriptorType; size_t offset; size_t stride; } VkDescriptorUpdateTemplateEntry; typedef struct VkDescriptorUpdateTemplateCreateInfo { VkStructureType sType; const void* pNext; VkDescriptorUpdateTemplateCreateFlags flags; uint32_t descriptorUpdateEntryCount; const VkDescriptorUpdateTemplateEntry* pDescriptorUpdateEntries; VkDescriptorUpdateTemplateType templateType; VkDescriptorSetLayout descriptorSetLayout; VkPipelineBindPoint pipelineBindPoint; VkPipelineLayout pipelineLayout; uint32_t set; } VkDescriptorUpdateTemplateCreateInfo; typedef struct VkExternalMemoryProperties { VkExternalMemoryFeatureFlags externalMemoryFeatures; VkExternalMemoryHandleTypeFlags exportFromImportedHandleTypes; VkExternalMemoryHandleTypeFlags compatibleHandleTypes; } VkExternalMemoryProperties; typedef struct VkPhysicalDeviceExternalImageFormatInfo { VkStructureType sType; const void* pNext; VkExternalMemoryHandleTypeFlagBits handleType; } VkPhysicalDeviceExternalImageFormatInfo; typedef struct VkExternalImageFormatProperties { VkStructureType sType; void* pNext; VkExternalMemoryProperties externalMemoryProperties; } VkExternalImageFormatProperties; typedef struct VkPhysicalDeviceExternalBufferInfo { VkStructureType sType; const void* pNext; VkBufferCreateFlags flags; VkBufferUsageFlags usage; VkExternalMemoryHandleTypeFlagBits handleType; } VkPhysicalDeviceExternalBufferInfo; typedef struct VkExternalBufferProperties { VkStructureType sType; void* pNext; VkExternalMemoryProperties externalMemoryProperties; } VkExternalBufferProperties; typedef struct VkPhysicalDeviceIDProperties { VkStructureType sType; void* pNext; uint8_t deviceUUID[VK_UUID_SIZE]; uint8_t driverUUID[VK_UUID_SIZE]; uint8_t deviceLUID[VK_LUID_SIZE]; uint32_t deviceNodeMask; VkBool32 deviceLUIDValid; } VkPhysicalDeviceIDProperties; typedef struct VkExternalMemoryImageCreateInfo { VkStructureType sType; const void* pNext; VkExternalMemoryHandleTypeFlags handleTypes; } VkExternalMemoryImageCreateInfo; typedef struct VkExternalMemoryBufferCreateInfo { VkStructureType sType; const void* pNext; VkExternalMemoryHandleTypeFlags handleTypes; } VkExternalMemoryBufferCreateInfo; typedef struct VkExportMemoryAllocateInfo { VkStructureType sType; const void* pNext; VkExternalMemoryHandleTypeFlags handleTypes; } VkExportMemoryAllocateInfo; typedef struct VkPhysicalDeviceExternalFenceInfo { VkStructureType sType; const void* pNext; VkExternalFenceHandleTypeFlagBits handleType; } VkPhysicalDeviceExternalFenceInfo; typedef struct VkExternalFenceProperties { VkStructureType sType; void* pNext; VkExternalFenceHandleTypeFlags exportFromImportedHandleTypes; VkExternalFenceHandleTypeFlags compatibleHandleTypes; VkExternalFenceFeatureFlags externalFenceFeatures; } VkExternalFenceProperties; typedef struct VkExportFenceCreateInfo { VkStructureType sType; const void* pNext; VkExternalFenceHandleTypeFlags handleTypes; } VkExportFenceCreateInfo; typedef struct VkExportSemaphoreCreateInfo { VkStructureType sType; const void* pNext; VkExternalSemaphoreHandleTypeFlags handleTypes; } VkExportSemaphoreCreateInfo; typedef struct VkPhysicalDeviceExternalSemaphoreInfo { VkStructureType sType; const void* pNext; VkExternalSemaphoreHandleTypeFlagBits handleType; } VkPhysicalDeviceExternalSemaphoreInfo; typedef struct VkExternalSemaphoreProperties { VkStructureType sType; void* pNext; VkExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes; VkExternalSemaphoreHandleTypeFlags compatibleHandleTypes; VkExternalSemaphoreFeatureFlags externalSemaphoreFeatures; } VkExternalSemaphoreProperties; typedef struct VkPhysicalDeviceMaintenance3Properties { VkStructureType sType; void* pNext; uint32_t maxPerSetDescriptors; VkDeviceSize maxMemoryAllocationSize; } VkPhysicalDeviceMaintenance3Properties; typedef struct VkDescriptorSetLayoutSupport { VkStructureType sType; void* pNext; VkBool32 supported; } VkDescriptorSetLayoutSupport; typedef struct VkPhysicalDeviceShaderDrawParametersFeatures { VkStructureType sType; void* pNext; VkBool32 shaderDrawParameters; } VkPhysicalDeviceShaderDrawParametersFeatures; typedef VkPhysicalDeviceShaderDrawParametersFeatures VkPhysicalDeviceShaderDrawParameterFeatures; typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceVersion)(uint32_t* pApiVersion); typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeatures)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMask)(VkCommandBuffer commandBuffer, uint32_t deviceMask); typedef void (VKAPI_PTR *PFN_vkCmdDispatchBase)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroups)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); typedef void (VKAPI_PTR *PFN_vkTrimCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue2)(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue); typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversion)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversion)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplate)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplate)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplate)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFenceProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupport)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceVersion( uint32_t* pApiVersion); VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2( VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2( VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeatures( VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMask( VkCommandBuffer commandBuffer, uint32_t deviceMask); VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBase( VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroups( VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2( VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2( VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2( VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2( VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2( VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2( VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2( VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2( VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); VKAPI_ATTR void VKAPI_CALL vkTrimCommandPool( VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue2( VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue); VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversion( VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversion( VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplate( VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate( VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate( VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferProperties( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFenceProperties( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphoreProperties( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupport( VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); #endif #define VK_VERSION_1_2 1 // Vulkan 1.2 version number #define VK_API_VERSION_1_2 VK_MAKE_API_VERSION(0, 1, 2, 0)// Patch version should always be set to 0 #define VK_MAX_DRIVER_NAME_SIZE 256U #define VK_MAX_DRIVER_INFO_SIZE 256U typedef enum VkDriverId { VK_DRIVER_ID_AMD_PROPRIETARY = 1, VK_DRIVER_ID_AMD_OPEN_SOURCE = 2, VK_DRIVER_ID_MESA_RADV = 3, VK_DRIVER_ID_NVIDIA_PROPRIETARY = 4, VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS = 5, VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA = 6, VK_DRIVER_ID_IMAGINATION_PROPRIETARY = 7, VK_DRIVER_ID_QUALCOMM_PROPRIETARY = 8, VK_DRIVER_ID_ARM_PROPRIETARY = 9, VK_DRIVER_ID_GOOGLE_SWIFTSHADER = 10, VK_DRIVER_ID_GGP_PROPRIETARY = 11, VK_DRIVER_ID_BROADCOM_PROPRIETARY = 12, VK_DRIVER_ID_MESA_LLVMPIPE = 13, VK_DRIVER_ID_MOLTENVK = 14, VK_DRIVER_ID_COREAVI_PROPRIETARY = 15, VK_DRIVER_ID_JUICE_PROPRIETARY = 16, VK_DRIVER_ID_VERISILICON_PROPRIETARY = 17, VK_DRIVER_ID_MESA_TURNIP = 18, VK_DRIVER_ID_MESA_V3DV = 19, VK_DRIVER_ID_MESA_PANVK = 20, VK_DRIVER_ID_SAMSUNG_PROPRIETARY = 21, VK_DRIVER_ID_MESA_VENUS = 22, VK_DRIVER_ID_MESA_DOZEN = 23, VK_DRIVER_ID_AMD_PROPRIETARY_KHR = VK_DRIVER_ID_AMD_PROPRIETARY, VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR = VK_DRIVER_ID_AMD_OPEN_SOURCE, VK_DRIVER_ID_MESA_RADV_KHR = VK_DRIVER_ID_MESA_RADV, VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR = VK_DRIVER_ID_NVIDIA_PROPRIETARY, VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR = VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS, VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA, VK_DRIVER_ID_IMAGINATION_PROPRIETARY_KHR = VK_DRIVER_ID_IMAGINATION_PROPRIETARY, VK_DRIVER_ID_QUALCOMM_PROPRIETARY_KHR = VK_DRIVER_ID_QUALCOMM_PROPRIETARY, VK_DRIVER_ID_ARM_PROPRIETARY_KHR = VK_DRIVER_ID_ARM_PROPRIETARY, VK_DRIVER_ID_GOOGLE_SWIFTSHADER_KHR = VK_DRIVER_ID_GOOGLE_SWIFTSHADER, VK_DRIVER_ID_GGP_PROPRIETARY_KHR = VK_DRIVER_ID_GGP_PROPRIETARY, VK_DRIVER_ID_BROADCOM_PROPRIETARY_KHR = VK_DRIVER_ID_BROADCOM_PROPRIETARY, VK_DRIVER_ID_MAX_ENUM = 0x7FFFFFFF } VkDriverId; typedef enum VkShaderFloatControlsIndependence { VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY = 0, VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL = 1, VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE = 2, VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY, VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL, VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE, VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_MAX_ENUM = 0x7FFFFFFF } VkShaderFloatControlsIndependence; typedef enum VkSamplerReductionMode { VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE = 0, VK_SAMPLER_REDUCTION_MODE_MIN = 1, VK_SAMPLER_REDUCTION_MODE_MAX = 2, VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE, VK_SAMPLER_REDUCTION_MODE_MIN_EXT = VK_SAMPLER_REDUCTION_MODE_MIN, VK_SAMPLER_REDUCTION_MODE_MAX_EXT = VK_SAMPLER_REDUCTION_MODE_MAX, VK_SAMPLER_REDUCTION_MODE_MAX_ENUM = 0x7FFFFFFF } VkSamplerReductionMode; typedef enum VkSemaphoreType { VK_SEMAPHORE_TYPE_BINARY = 0, VK_SEMAPHORE_TYPE_TIMELINE = 1, VK_SEMAPHORE_TYPE_BINARY_KHR = VK_SEMAPHORE_TYPE_BINARY, VK_SEMAPHORE_TYPE_TIMELINE_KHR = VK_SEMAPHORE_TYPE_TIMELINE, VK_SEMAPHORE_TYPE_MAX_ENUM = 0x7FFFFFFF } VkSemaphoreType; typedef enum VkResolveModeFlagBits { VK_RESOLVE_MODE_NONE = 0, VK_RESOLVE_MODE_SAMPLE_ZERO_BIT = 0x00000001, VK_RESOLVE_MODE_AVERAGE_BIT = 0x00000002, VK_RESOLVE_MODE_MIN_BIT = 0x00000004, VK_RESOLVE_MODE_MAX_BIT = 0x00000008, VK_RESOLVE_MODE_NONE_KHR = VK_RESOLVE_MODE_NONE, VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT, VK_RESOLVE_MODE_AVERAGE_BIT_KHR = VK_RESOLVE_MODE_AVERAGE_BIT, VK_RESOLVE_MODE_MIN_BIT_KHR = VK_RESOLVE_MODE_MIN_BIT, VK_RESOLVE_MODE_MAX_BIT_KHR = VK_RESOLVE_MODE_MAX_BIT, VK_RESOLVE_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkResolveModeFlagBits; typedef VkFlags VkResolveModeFlags; typedef enum VkDescriptorBindingFlagBits { VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT = 0x00000001, VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT = 0x00000002, VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT = 0x00000004, VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT = 0x00000008, VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT, VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT, VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT, VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT, VK_DESCRIPTOR_BINDING_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkDescriptorBindingFlagBits; typedef VkFlags VkDescriptorBindingFlags; typedef enum VkSemaphoreWaitFlagBits { VK_SEMAPHORE_WAIT_ANY_BIT = 0x00000001, VK_SEMAPHORE_WAIT_ANY_BIT_KHR = VK_SEMAPHORE_WAIT_ANY_BIT, VK_SEMAPHORE_WAIT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkSemaphoreWaitFlagBits; typedef VkFlags VkSemaphoreWaitFlags; typedef struct VkPhysicalDeviceVulkan11Features { VkStructureType sType; void* pNext; VkBool32 storageBuffer16BitAccess; VkBool32 uniformAndStorageBuffer16BitAccess; VkBool32 storagePushConstant16; VkBool32 storageInputOutput16; VkBool32 multiview; VkBool32 multiviewGeometryShader; VkBool32 multiviewTessellationShader; VkBool32 variablePointersStorageBuffer; VkBool32 variablePointers; VkBool32 protectedMemory; VkBool32 samplerYcbcrConversion; VkBool32 shaderDrawParameters; } VkPhysicalDeviceVulkan11Features; typedef struct VkPhysicalDeviceVulkan11Properties { VkStructureType sType; void* pNext; uint8_t deviceUUID[VK_UUID_SIZE]; uint8_t driverUUID[VK_UUID_SIZE]; uint8_t deviceLUID[VK_LUID_SIZE]; uint32_t deviceNodeMask; VkBool32 deviceLUIDValid; uint32_t subgroupSize; VkShaderStageFlags subgroupSupportedStages; VkSubgroupFeatureFlags subgroupSupportedOperations; VkBool32 subgroupQuadOperationsInAllStages; VkPointClippingBehavior pointClippingBehavior; uint32_t maxMultiviewViewCount; uint32_t maxMultiviewInstanceIndex; VkBool32 protectedNoFault; uint32_t maxPerSetDescriptors; VkDeviceSize maxMemoryAllocationSize; } VkPhysicalDeviceVulkan11Properties; typedef struct VkPhysicalDeviceVulkan12Features { VkStructureType sType; void* pNext; VkBool32 samplerMirrorClampToEdge; VkBool32 drawIndirectCount; VkBool32 storageBuffer8BitAccess; VkBool32 uniformAndStorageBuffer8BitAccess; VkBool32 storagePushConstant8; VkBool32 shaderBufferInt64Atomics; VkBool32 shaderSharedInt64Atomics; VkBool32 shaderFloat16; VkBool32 shaderInt8; VkBool32 descriptorIndexing; VkBool32 shaderInputAttachmentArrayDynamicIndexing; VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; VkBool32 shaderUniformBufferArrayNonUniformIndexing; VkBool32 shaderSampledImageArrayNonUniformIndexing; VkBool32 shaderStorageBufferArrayNonUniformIndexing; VkBool32 shaderStorageImageArrayNonUniformIndexing; VkBool32 shaderInputAttachmentArrayNonUniformIndexing; VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; VkBool32 descriptorBindingUniformBufferUpdateAfterBind; VkBool32 descriptorBindingSampledImageUpdateAfterBind; VkBool32 descriptorBindingStorageImageUpdateAfterBind; VkBool32 descriptorBindingStorageBufferUpdateAfterBind; VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; VkBool32 descriptorBindingUpdateUnusedWhilePending; VkBool32 descriptorBindingPartiallyBound; VkBool32 descriptorBindingVariableDescriptorCount; VkBool32 runtimeDescriptorArray; VkBool32 samplerFilterMinmax; VkBool32 scalarBlockLayout; VkBool32 imagelessFramebuffer; VkBool32 uniformBufferStandardLayout; VkBool32 shaderSubgroupExtendedTypes; VkBool32 separateDepthStencilLayouts; VkBool32 hostQueryReset; VkBool32 timelineSemaphore; VkBool32 bufferDeviceAddress; VkBool32 bufferDeviceAddressCaptureReplay; VkBool32 bufferDeviceAddressMultiDevice; VkBool32 vulkanMemoryModel; VkBool32 vulkanMemoryModelDeviceScope; VkBool32 vulkanMemoryModelAvailabilityVisibilityChains; VkBool32 shaderOutputViewportIndex; VkBool32 shaderOutputLayer; VkBool32 subgroupBroadcastDynamicId; } VkPhysicalDeviceVulkan12Features; typedef struct VkConformanceVersion { uint8_t major; uint8_t minor; uint8_t subminor; uint8_t patch; } VkConformanceVersion; typedef struct VkPhysicalDeviceVulkan12Properties { VkStructureType sType; void* pNext; VkDriverId driverID; char driverName[VK_MAX_DRIVER_NAME_SIZE]; char driverInfo[VK_MAX_DRIVER_INFO_SIZE]; VkConformanceVersion conformanceVersion; VkShaderFloatControlsIndependence denormBehaviorIndependence; VkShaderFloatControlsIndependence roundingModeIndependence; VkBool32 shaderSignedZeroInfNanPreserveFloat16; VkBool32 shaderSignedZeroInfNanPreserveFloat32; VkBool32 shaderSignedZeroInfNanPreserveFloat64; VkBool32 shaderDenormPreserveFloat16; VkBool32 shaderDenormPreserveFloat32; VkBool32 shaderDenormPreserveFloat64; VkBool32 shaderDenormFlushToZeroFloat16; VkBool32 shaderDenormFlushToZeroFloat32; VkBool32 shaderDenormFlushToZeroFloat64; VkBool32 shaderRoundingModeRTEFloat16; VkBool32 shaderRoundingModeRTEFloat32; VkBool32 shaderRoundingModeRTEFloat64; VkBool32 shaderRoundingModeRTZFloat16; VkBool32 shaderRoundingModeRTZFloat32; VkBool32 shaderRoundingModeRTZFloat64; uint32_t maxUpdateAfterBindDescriptorsInAllPools; VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; VkBool32 shaderSampledImageArrayNonUniformIndexingNative; VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; VkBool32 shaderStorageImageArrayNonUniformIndexingNative; VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; VkBool32 robustBufferAccessUpdateAfterBind; VkBool32 quadDivergentImplicitLod; uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; uint32_t maxPerStageUpdateAfterBindResources; uint32_t maxDescriptorSetUpdateAfterBindSamplers; uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; uint32_t maxDescriptorSetUpdateAfterBindSampledImages; uint32_t maxDescriptorSetUpdateAfterBindStorageImages; uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; VkResolveModeFlags supportedDepthResolveModes; VkResolveModeFlags supportedStencilResolveModes; VkBool32 independentResolveNone; VkBool32 independentResolve; VkBool32 filterMinmaxSingleComponentFormats; VkBool32 filterMinmaxImageComponentMapping; uint64_t maxTimelineSemaphoreValueDifference; VkSampleCountFlags framebufferIntegerColorSampleCounts; } VkPhysicalDeviceVulkan12Properties; typedef struct VkImageFormatListCreateInfo { VkStructureType sType; const void* pNext; uint32_t viewFormatCount; const VkFormat* pViewFormats; } VkImageFormatListCreateInfo; typedef struct VkAttachmentDescription2 { VkStructureType sType; const void* pNext; VkAttachmentDescriptionFlags flags; VkFormat format; VkSampleCountFlagBits samples; VkAttachmentLoadOp loadOp; VkAttachmentStoreOp storeOp; VkAttachmentLoadOp stencilLoadOp; VkAttachmentStoreOp stencilStoreOp; VkImageLayout initialLayout; VkImageLayout finalLayout; } VkAttachmentDescription2; typedef struct VkAttachmentReference2 { VkStructureType sType; const void* pNext; uint32_t attachment; VkImageLayout layout; VkImageAspectFlags aspectMask; } VkAttachmentReference2; typedef struct VkSubpassDescription2 { VkStructureType sType; const void* pNext; VkSubpassDescriptionFlags flags; VkPipelineBindPoint pipelineBindPoint; uint32_t viewMask; uint32_t inputAttachmentCount; const VkAttachmentReference2* pInputAttachments; uint32_t colorAttachmentCount; const VkAttachmentReference2* pColorAttachments; const VkAttachmentReference2* pResolveAttachments; const VkAttachmentReference2* pDepthStencilAttachment; uint32_t preserveAttachmentCount; const uint32_t* pPreserveAttachments; } VkSubpassDescription2; typedef struct VkSubpassDependency2 { VkStructureType sType; const void* pNext; uint32_t srcSubpass; uint32_t dstSubpass; VkPipelineStageFlags srcStageMask; VkPipelineStageFlags dstStageMask; VkAccessFlags srcAccessMask; VkAccessFlags dstAccessMask; VkDependencyFlags dependencyFlags; int32_t viewOffset; } VkSubpassDependency2; typedef struct VkRenderPassCreateInfo2 { VkStructureType sType; const void* pNext; VkRenderPassCreateFlags flags; uint32_t attachmentCount; const VkAttachmentDescription2* pAttachments; uint32_t subpassCount; const VkSubpassDescription2* pSubpasses; uint32_t dependencyCount; const VkSubpassDependency2* pDependencies; uint32_t correlatedViewMaskCount; const uint32_t* pCorrelatedViewMasks; } VkRenderPassCreateInfo2; typedef struct VkSubpassBeginInfo { VkStructureType sType; const void* pNext; VkSubpassContents contents; } VkSubpassBeginInfo; typedef struct VkSubpassEndInfo { VkStructureType sType; const void* pNext; } VkSubpassEndInfo; typedef struct VkPhysicalDevice8BitStorageFeatures { VkStructureType sType; void* pNext; VkBool32 storageBuffer8BitAccess; VkBool32 uniformAndStorageBuffer8BitAccess; VkBool32 storagePushConstant8; } VkPhysicalDevice8BitStorageFeatures; typedef struct VkPhysicalDeviceDriverProperties { VkStructureType sType; void* pNext; VkDriverId driverID; char driverName[VK_MAX_DRIVER_NAME_SIZE]; char driverInfo[VK_MAX_DRIVER_INFO_SIZE]; VkConformanceVersion conformanceVersion; } VkPhysicalDeviceDriverProperties; typedef struct VkPhysicalDeviceShaderAtomicInt64Features { VkStructureType sType; void* pNext; VkBool32 shaderBufferInt64Atomics; VkBool32 shaderSharedInt64Atomics; } VkPhysicalDeviceShaderAtomicInt64Features; typedef struct VkPhysicalDeviceShaderFloat16Int8Features { VkStructureType sType; void* pNext; VkBool32 shaderFloat16; VkBool32 shaderInt8; } VkPhysicalDeviceShaderFloat16Int8Features; typedef struct VkPhysicalDeviceFloatControlsProperties { VkStructureType sType; void* pNext; VkShaderFloatControlsIndependence denormBehaviorIndependence; VkShaderFloatControlsIndependence roundingModeIndependence; VkBool32 shaderSignedZeroInfNanPreserveFloat16; VkBool32 shaderSignedZeroInfNanPreserveFloat32; VkBool32 shaderSignedZeroInfNanPreserveFloat64; VkBool32 shaderDenormPreserveFloat16; VkBool32 shaderDenormPreserveFloat32; VkBool32 shaderDenormPreserveFloat64; VkBool32 shaderDenormFlushToZeroFloat16; VkBool32 shaderDenormFlushToZeroFloat32; VkBool32 shaderDenormFlushToZeroFloat64; VkBool32 shaderRoundingModeRTEFloat16; VkBool32 shaderRoundingModeRTEFloat32; VkBool32 shaderRoundingModeRTEFloat64; VkBool32 shaderRoundingModeRTZFloat16; VkBool32 shaderRoundingModeRTZFloat32; VkBool32 shaderRoundingModeRTZFloat64; } VkPhysicalDeviceFloatControlsProperties; typedef struct VkDescriptorSetLayoutBindingFlagsCreateInfo { VkStructureType sType; const void* pNext; uint32_t bindingCount; const VkDescriptorBindingFlags* pBindingFlags; } VkDescriptorSetLayoutBindingFlagsCreateInfo; typedef struct VkPhysicalDeviceDescriptorIndexingFeatures { VkStructureType sType; void* pNext; VkBool32 shaderInputAttachmentArrayDynamicIndexing; VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; VkBool32 shaderUniformBufferArrayNonUniformIndexing; VkBool32 shaderSampledImageArrayNonUniformIndexing; VkBool32 shaderStorageBufferArrayNonUniformIndexing; VkBool32 shaderStorageImageArrayNonUniformIndexing; VkBool32 shaderInputAttachmentArrayNonUniformIndexing; VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; VkBool32 descriptorBindingUniformBufferUpdateAfterBind; VkBool32 descriptorBindingSampledImageUpdateAfterBind; VkBool32 descriptorBindingStorageImageUpdateAfterBind; VkBool32 descriptorBindingStorageBufferUpdateAfterBind; VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; VkBool32 descriptorBindingUpdateUnusedWhilePending; VkBool32 descriptorBindingPartiallyBound; VkBool32 descriptorBindingVariableDescriptorCount; VkBool32 runtimeDescriptorArray; } VkPhysicalDeviceDescriptorIndexingFeatures; typedef struct VkPhysicalDeviceDescriptorIndexingProperties { VkStructureType sType; void* pNext; uint32_t maxUpdateAfterBindDescriptorsInAllPools; VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; VkBool32 shaderSampledImageArrayNonUniformIndexingNative; VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; VkBool32 shaderStorageImageArrayNonUniformIndexingNative; VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; VkBool32 robustBufferAccessUpdateAfterBind; VkBool32 quadDivergentImplicitLod; uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; uint32_t maxPerStageUpdateAfterBindResources; uint32_t maxDescriptorSetUpdateAfterBindSamplers; uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; uint32_t maxDescriptorSetUpdateAfterBindSampledImages; uint32_t maxDescriptorSetUpdateAfterBindStorageImages; uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; } VkPhysicalDeviceDescriptorIndexingProperties; typedef struct VkDescriptorSetVariableDescriptorCountAllocateInfo { VkStructureType sType; const void* pNext; uint32_t descriptorSetCount; const uint32_t* pDescriptorCounts; } VkDescriptorSetVariableDescriptorCountAllocateInfo; typedef struct VkDescriptorSetVariableDescriptorCountLayoutSupport { VkStructureType sType; void* pNext; uint32_t maxVariableDescriptorCount; } VkDescriptorSetVariableDescriptorCountLayoutSupport; typedef struct VkSubpassDescriptionDepthStencilResolve { VkStructureType sType; const void* pNext; VkResolveModeFlagBits depthResolveMode; VkResolveModeFlagBits stencilResolveMode; const VkAttachmentReference2* pDepthStencilResolveAttachment; } VkSubpassDescriptionDepthStencilResolve; typedef struct VkPhysicalDeviceDepthStencilResolveProperties { VkStructureType sType; void* pNext; VkResolveModeFlags supportedDepthResolveModes; VkResolveModeFlags supportedStencilResolveModes; VkBool32 independentResolveNone; VkBool32 independentResolve; } VkPhysicalDeviceDepthStencilResolveProperties; typedef struct VkPhysicalDeviceScalarBlockLayoutFeatures { VkStructureType sType; void* pNext; VkBool32 scalarBlockLayout; } VkPhysicalDeviceScalarBlockLayoutFeatures; typedef struct VkImageStencilUsageCreateInfo { VkStructureType sType; const void* pNext; VkImageUsageFlags stencilUsage; } VkImageStencilUsageCreateInfo; typedef struct VkSamplerReductionModeCreateInfo { VkStructureType sType; const void* pNext; VkSamplerReductionMode reductionMode; } VkSamplerReductionModeCreateInfo; typedef struct VkPhysicalDeviceSamplerFilterMinmaxProperties { VkStructureType sType; void* pNext; VkBool32 filterMinmaxSingleComponentFormats; VkBool32 filterMinmaxImageComponentMapping; } VkPhysicalDeviceSamplerFilterMinmaxProperties; typedef struct VkPhysicalDeviceVulkanMemoryModelFeatures { VkStructureType sType; void* pNext; VkBool32 vulkanMemoryModel; VkBool32 vulkanMemoryModelDeviceScope; VkBool32 vulkanMemoryModelAvailabilityVisibilityChains; } VkPhysicalDeviceVulkanMemoryModelFeatures; typedef struct VkPhysicalDeviceImagelessFramebufferFeatures { VkStructureType sType; void* pNext; VkBool32 imagelessFramebuffer; } VkPhysicalDeviceImagelessFramebufferFeatures; typedef struct VkFramebufferAttachmentImageInfo { VkStructureType sType; const void* pNext; VkImageCreateFlags flags; VkImageUsageFlags usage; uint32_t width; uint32_t height; uint32_t layerCount; uint32_t viewFormatCount; const VkFormat* pViewFormats; } VkFramebufferAttachmentImageInfo; typedef struct VkFramebufferAttachmentsCreateInfo { VkStructureType sType; const void* pNext; uint32_t attachmentImageInfoCount; const VkFramebufferAttachmentImageInfo* pAttachmentImageInfos; } VkFramebufferAttachmentsCreateInfo; typedef struct VkRenderPassAttachmentBeginInfo { VkStructureType sType; const void* pNext; uint32_t attachmentCount; const VkImageView* pAttachments; } VkRenderPassAttachmentBeginInfo; typedef struct VkPhysicalDeviceUniformBufferStandardLayoutFeatures { VkStructureType sType; void* pNext; VkBool32 uniformBufferStandardLayout; } VkPhysicalDeviceUniformBufferStandardLayoutFeatures; typedef struct VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures { VkStructureType sType; void* pNext; VkBool32 shaderSubgroupExtendedTypes; } VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures; typedef struct VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures { VkStructureType sType; void* pNext; VkBool32 separateDepthStencilLayouts; } VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures; typedef struct VkAttachmentReferenceStencilLayout { VkStructureType sType; void* pNext; VkImageLayout stencilLayout; } VkAttachmentReferenceStencilLayout; typedef struct VkAttachmentDescriptionStencilLayout { VkStructureType sType; void* pNext; VkImageLayout stencilInitialLayout; VkImageLayout stencilFinalLayout; } VkAttachmentDescriptionStencilLayout; typedef struct VkPhysicalDeviceHostQueryResetFeatures { VkStructureType sType; void* pNext; VkBool32 hostQueryReset; } VkPhysicalDeviceHostQueryResetFeatures; typedef struct VkPhysicalDeviceTimelineSemaphoreFeatures { VkStructureType sType; void* pNext; VkBool32 timelineSemaphore; } VkPhysicalDeviceTimelineSemaphoreFeatures; typedef struct VkPhysicalDeviceTimelineSemaphoreProperties { VkStructureType sType; void* pNext; uint64_t maxTimelineSemaphoreValueDifference; } VkPhysicalDeviceTimelineSemaphoreProperties; typedef struct VkSemaphoreTypeCreateInfo { VkStructureType sType; const void* pNext; VkSemaphoreType semaphoreType; uint64_t initialValue; } VkSemaphoreTypeCreateInfo; typedef struct VkTimelineSemaphoreSubmitInfo { VkStructureType sType; const void* pNext; uint32_t waitSemaphoreValueCount; const uint64_t* pWaitSemaphoreValues; uint32_t signalSemaphoreValueCount; const uint64_t* pSignalSemaphoreValues; } VkTimelineSemaphoreSubmitInfo; typedef struct VkSemaphoreWaitInfo { VkStructureType sType; const void* pNext; VkSemaphoreWaitFlags flags; uint32_t semaphoreCount; const VkSemaphore* pSemaphores; const uint64_t* pValues; } VkSemaphoreWaitInfo; typedef struct VkSemaphoreSignalInfo { VkStructureType sType; const void* pNext; VkSemaphore semaphore; uint64_t value; } VkSemaphoreSignalInfo; typedef struct VkPhysicalDeviceBufferDeviceAddressFeatures { VkStructureType sType; void* pNext; VkBool32 bufferDeviceAddress; VkBool32 bufferDeviceAddressCaptureReplay; VkBool32 bufferDeviceAddressMultiDevice; } VkPhysicalDeviceBufferDeviceAddressFeatures; typedef struct VkBufferDeviceAddressInfo { VkStructureType sType; const void* pNext; VkBuffer buffer; } VkBufferDeviceAddressInfo; typedef struct VkBufferOpaqueCaptureAddressCreateInfo { VkStructureType sType; const void* pNext; uint64_t opaqueCaptureAddress; } VkBufferOpaqueCaptureAddressCreateInfo; typedef struct VkMemoryOpaqueCaptureAddressAllocateInfo { VkStructureType sType; const void* pNext; uint64_t opaqueCaptureAddress; } VkMemoryOpaqueCaptureAddressAllocateInfo; typedef struct VkDeviceMemoryOpaqueCaptureAddressInfo { VkStructureType sType; const void* pNext; VkDeviceMemory memory; } VkDeviceMemoryOpaqueCaptureAddressInfo; typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); typedef void (VKAPI_PTR *PFN_vkResetQueryPool)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValue)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue); typedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphores)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); typedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphore)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); typedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); typedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddress)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCount( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCount( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2( VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2( VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2( VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2( VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); VKAPI_ATTR void VKAPI_CALL vkResetQueryPool( VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValue( VkDevice device, VkSemaphore semaphore, uint64_t* pValue); VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphores( VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphore( VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddress( VkDevice device, const VkBufferDeviceAddressInfo* pInfo); VKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddress( VkDevice device, const VkBufferDeviceAddressInfo* pInfo); VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddress( VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); #endif #define VK_VERSION_1_3 1 // Vulkan 1.3 version number #define VK_API_VERSION_1_3 VK_MAKE_API_VERSION(0, 1, 3, 0)// Patch version should always be set to 0 typedef uint64_t VkFlags64; VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPrivateDataSlot) typedef enum VkPipelineCreationFeedbackFlagBits { VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT = 0x00000001, VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT = 0x00000002, VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT = 0x00000004, VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT, VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT = VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT, VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT = VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT, VK_PIPELINE_CREATION_FEEDBACK_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkPipelineCreationFeedbackFlagBits; typedef VkFlags VkPipelineCreationFeedbackFlags; typedef enum VkToolPurposeFlagBits { VK_TOOL_PURPOSE_VALIDATION_BIT = 0x00000001, VK_TOOL_PURPOSE_PROFILING_BIT = 0x00000002, VK_TOOL_PURPOSE_TRACING_BIT = 0x00000004, VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT = 0x00000008, VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT = 0x00000010, VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT = 0x00000020, VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT = 0x00000040, VK_TOOL_PURPOSE_VALIDATION_BIT_EXT = VK_TOOL_PURPOSE_VALIDATION_BIT, VK_TOOL_PURPOSE_PROFILING_BIT_EXT = VK_TOOL_PURPOSE_PROFILING_BIT, VK_TOOL_PURPOSE_TRACING_BIT_EXT = VK_TOOL_PURPOSE_TRACING_BIT, VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT = VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT, VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT = VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT, VK_TOOL_PURPOSE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkToolPurposeFlagBits; typedef VkFlags VkToolPurposeFlags; typedef VkFlags VkPrivateDataSlotCreateFlags; typedef VkFlags64 VkPipelineStageFlags2; // Flag bits for VkPipelineStageFlagBits2 typedef VkFlags64 VkPipelineStageFlagBits2; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_NONE = 0ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_NONE_KHR = 0ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT = 0x00000001ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR = 0x00000001ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT = 0x00000002ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT_KHR = 0x00000002ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT = 0x00000004ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR = 0x00000004ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT = 0x00000008ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT_KHR = 0x00000008ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT_KHR = 0x00000010ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT_KHR = 0x00000020ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT = 0x00000040ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT_KHR = 0x00000040ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT = 0x00000080ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR = 0x00000080ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT = 0x00000100ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR = 0x00000100ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT = 0x00000200ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR = 0x00000200ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR = 0x00000400ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT = 0x00000800ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT_KHR = 0x00000800ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT = 0x00001000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR = 0x00001000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFER_BIT = 0x00001000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR = 0x00001000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT = 0x00002000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR = 0x00002000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_HOST_BIT = 0x00004000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_HOST_BIT_KHR = 0x00004000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT = 0x00008000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR = 0x00008000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT = 0x00010000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR = 0x00010000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COPY_BIT = 0x100000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COPY_BIT_KHR = 0x100000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RESOLVE_BIT = 0x200000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RESOLVE_BIT_KHR = 0x200000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BLIT_BIT = 0x400000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BLIT_BIT_KHR = 0x400000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CLEAR_BIT = 0x800000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CLEAR_BIT_KHR = 0x800000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT = 0x1000000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT_KHR = 0x1000000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT = 0x2000000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT_KHR = 0x2000000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT = 0x4000000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR = 0x4000000000ULL; #ifdef VK_ENABLE_BETA_EXTENSIONS static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR = 0x04000000ULL; #endif #ifdef VK_ENABLE_BETA_EXTENSIONS static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR = 0x08000000ULL; #endif static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV = 0x00020000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00400000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_SHADING_RATE_IMAGE_BIT_NV = 0x00400000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR = 0x00200000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_NV = 0x00200000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_NV = 0x02000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_NV = 0x00080000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_NV = 0x00100000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_SUBPASS_SHADING_BIT_HUAWEI = 0x8000000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INVOCATION_MASK_BIT_HUAWEI = 0x10000000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR = 0x10000000ULL; typedef VkFlags64 VkAccessFlags2; // Flag bits for VkAccessFlagBits2 typedef VkFlags64 VkAccessFlagBits2; static const VkAccessFlagBits2 VK_ACCESS_2_NONE = 0ULL; static const VkAccessFlagBits2 VK_ACCESS_2_NONE_KHR = 0ULL; static const VkAccessFlagBits2 VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT = 0x00000001ULL; static const VkAccessFlagBits2 VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT_KHR = 0x00000001ULL; static const VkAccessFlagBits2 VK_ACCESS_2_INDEX_READ_BIT = 0x00000002ULL; static const VkAccessFlagBits2 VK_ACCESS_2_INDEX_READ_BIT_KHR = 0x00000002ULL; static const VkAccessFlagBits2 VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004ULL; static const VkAccessFlagBits2 VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT_KHR = 0x00000004ULL; static const VkAccessFlagBits2 VK_ACCESS_2_UNIFORM_READ_BIT = 0x00000008ULL; static const VkAccessFlagBits2 VK_ACCESS_2_UNIFORM_READ_BIT_KHR = 0x00000008ULL; static const VkAccessFlagBits2 VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT = 0x00000010ULL; static const VkAccessFlagBits2 VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT_KHR = 0x00000010ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_READ_BIT = 0x00000020ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_READ_BIT_KHR = 0x00000020ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_WRITE_BIT = 0x00000040ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_WRITE_BIT_KHR = 0x00000040ULL; static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT = 0x00000080ULL; static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT_KHR = 0x00000080ULL; static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100ULL; static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT_KHR = 0x00000100ULL; static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200ULL; static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT_KHR = 0x00000200ULL; static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400ULL; static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT_KHR = 0x00000400ULL; static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_READ_BIT = 0x00000800ULL; static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_READ_BIT_KHR = 0x00000800ULL; static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_WRITE_BIT = 0x00001000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR = 0x00001000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_HOST_READ_BIT = 0x00002000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_HOST_READ_BIT_KHR = 0x00002000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_HOST_WRITE_BIT = 0x00004000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_HOST_WRITE_BIT_KHR = 0x00004000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_READ_BIT = 0x00008000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_READ_BIT_KHR = 0x00008000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_WRITE_BIT = 0x00010000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_WRITE_BIT_KHR = 0x00010000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_SAMPLED_READ_BIT = 0x100000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_SAMPLED_READ_BIT_KHR = 0x100000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_READ_BIT = 0x200000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_READ_BIT_KHR = 0x200000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT = 0x400000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT_KHR = 0x400000000ULL; #ifdef VK_ENABLE_BETA_EXTENSIONS static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_DECODE_READ_BIT_KHR = 0x800000000ULL; #endif #ifdef VK_ENABLE_BETA_EXTENSIONS static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_DECODE_WRITE_BIT_KHR = 0x1000000000ULL; #endif #ifdef VK_ENABLE_BETA_EXTENSIONS static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_ENCODE_READ_BIT_KHR = 0x2000000000ULL; #endif #ifdef VK_ENABLE_BETA_EXTENSIONS static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_ENCODE_WRITE_BIT_KHR = 0x4000000000ULL; #endif static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_COMMAND_PREPROCESS_READ_BIT_NV = 0x00020000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_COMMAND_PREPROCESS_WRITE_BIT_NV = 0x00040000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR = 0x00800000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x00200000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x00400000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_NV = 0x00200000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 0x00400000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x01000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_INVOCATION_MASK_READ_BIT_HUAWEI = 0x8000000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_BINDING_TABLE_READ_BIT_KHR = 0x10000000000ULL; typedef enum VkSubmitFlagBits { VK_SUBMIT_PROTECTED_BIT = 0x00000001, VK_SUBMIT_PROTECTED_BIT_KHR = VK_SUBMIT_PROTECTED_BIT, VK_SUBMIT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkSubmitFlagBits; typedef VkFlags VkSubmitFlags; typedef enum VkRenderingFlagBits { VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT = 0x00000001, VK_RENDERING_SUSPENDING_BIT = 0x00000002, VK_RENDERING_RESUMING_BIT = 0x00000004, VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT_KHR = VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT, VK_RENDERING_SUSPENDING_BIT_KHR = VK_RENDERING_SUSPENDING_BIT, VK_RENDERING_RESUMING_BIT_KHR = VK_RENDERING_RESUMING_BIT, VK_RENDERING_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkRenderingFlagBits; typedef VkFlags VkRenderingFlags; typedef VkFlags64 VkFormatFeatureFlags2; // Flag bits for VkFormatFeatureFlagBits2 typedef VkFlags64 VkFormatFeatureFlagBits2; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT = 0x00000001ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT_KHR = 0x00000001ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT = 0x00000002ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT_KHR = 0x00000002ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT_KHR = 0x00000004ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR = 0x00000008ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT = 0x00000010ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT_KHR = 0x00000010ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT_KHR = 0x00000020ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT = 0x00000040ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT_KHR = 0x00000040ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT = 0x00000080ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR = 0x00000080ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT_KHR = 0x00000100ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR = 0x00000200ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_SRC_BIT = 0x00000400ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_SRC_BIT_KHR = 0x00000400ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_DST_BIT = 0x00000800ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_DST_BIT_KHR = 0x00000800ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT_KHR = 0x00001000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT = 0x00002000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = 0x00002000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT = 0x00004000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT_KHR = 0x00004000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT = 0x00008000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT_KHR = 0x00008000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT = 0x00010000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT_KHR = 0x00010000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = 0x00020000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = 0x00040000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = 0x00080000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = 0x00100000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = 0x00200000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DISJOINT_BIT = 0x00400000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DISJOINT_BIT_KHR = 0x00400000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT = 0x00800000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT_KHR = 0x00800000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT = 0x80000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR = 0x80000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT = 0x100000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR = 0x100000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT = 0x200000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT_KHR = 0x200000000ULL; #ifdef VK_ENABLE_BETA_EXTENSIONS static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_DECODE_OUTPUT_BIT_KHR = 0x02000000ULL; #endif #ifdef VK_ENABLE_BETA_EXTENSIONS static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_DECODE_DPB_BIT_KHR = 0x04000000ULL; #endif static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR = 0x20000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x01000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x40000000ULL; #ifdef VK_ENABLE_BETA_EXTENSIONS static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_ENCODE_INPUT_BIT_KHR = 0x08000000ULL; #endif #ifdef VK_ENABLE_BETA_EXTENSIONS static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_ENCODE_DPB_BIT_KHR = 0x10000000ULL; #endif static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_LINEAR_COLOR_ATTACHMENT_BIT_NV = 0x4000000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_WEIGHT_IMAGE_BIT_QCOM = 0x400000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_WEIGHT_SAMPLED_IMAGE_BIT_QCOM = 0x800000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLOCK_MATCHING_BIT_QCOM = 0x1000000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BOX_FILTER_SAMPLED_BIT_QCOM = 0x2000000000ULL; typedef struct VkPhysicalDeviceVulkan13Features { VkStructureType sType; void* pNext; VkBool32 robustImageAccess; VkBool32 inlineUniformBlock; VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind; VkBool32 pipelineCreationCacheControl; VkBool32 privateData; VkBool32 shaderDemoteToHelperInvocation; VkBool32 shaderTerminateInvocation; VkBool32 subgroupSizeControl; VkBool32 computeFullSubgroups; VkBool32 synchronization2; VkBool32 textureCompressionASTC_HDR; VkBool32 shaderZeroInitializeWorkgroupMemory; VkBool32 dynamicRendering; VkBool32 shaderIntegerDotProduct; VkBool32 maintenance4; } VkPhysicalDeviceVulkan13Features; typedef struct VkPhysicalDeviceVulkan13Properties { VkStructureType sType; void* pNext; uint32_t minSubgroupSize; uint32_t maxSubgroupSize; uint32_t maxComputeWorkgroupSubgroups; VkShaderStageFlags requiredSubgroupSizeStages; uint32_t maxInlineUniformBlockSize; uint32_t maxPerStageDescriptorInlineUniformBlocks; uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks; uint32_t maxDescriptorSetInlineUniformBlocks; uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks; uint32_t maxInlineUniformTotalSize; VkBool32 integerDotProduct8BitUnsignedAccelerated; VkBool32 integerDotProduct8BitSignedAccelerated; VkBool32 integerDotProduct8BitMixedSignednessAccelerated; VkBool32 integerDotProduct4x8BitPackedUnsignedAccelerated; VkBool32 integerDotProduct4x8BitPackedSignedAccelerated; VkBool32 integerDotProduct4x8BitPackedMixedSignednessAccelerated; VkBool32 integerDotProduct16BitUnsignedAccelerated; VkBool32 integerDotProduct16BitSignedAccelerated; VkBool32 integerDotProduct16BitMixedSignednessAccelerated; VkBool32 integerDotProduct32BitUnsignedAccelerated; VkBool32 integerDotProduct32BitSignedAccelerated; VkBool32 integerDotProduct32BitMixedSignednessAccelerated; VkBool32 integerDotProduct64BitUnsignedAccelerated; VkBool32 integerDotProduct64BitSignedAccelerated; VkBool32 integerDotProduct64BitMixedSignednessAccelerated; VkBool32 integerDotProductAccumulatingSaturating8BitUnsignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating8BitSignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated; VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated; VkBool32 integerDotProductAccumulatingSaturating16BitUnsignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating16BitSignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated; VkBool32 integerDotProductAccumulatingSaturating32BitUnsignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating32BitSignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated; VkBool32 integerDotProductAccumulatingSaturating64BitUnsignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating64BitSignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated; VkDeviceSize storageTexelBufferOffsetAlignmentBytes; VkBool32 storageTexelBufferOffsetSingleTexelAlignment; VkDeviceSize uniformTexelBufferOffsetAlignmentBytes; VkBool32 uniformTexelBufferOffsetSingleTexelAlignment; VkDeviceSize maxBufferSize; } VkPhysicalDeviceVulkan13Properties; typedef struct VkPipelineCreationFeedback { VkPipelineCreationFeedbackFlags flags; uint64_t duration; } VkPipelineCreationFeedback; typedef struct VkPipelineCreationFeedbackCreateInfo { VkStructureType sType; const void* pNext; VkPipelineCreationFeedback* pPipelineCreationFeedback; uint32_t pipelineStageCreationFeedbackCount; VkPipelineCreationFeedback* pPipelineStageCreationFeedbacks; } VkPipelineCreationFeedbackCreateInfo; typedef struct VkPhysicalDeviceShaderTerminateInvocationFeatures { VkStructureType sType; void* pNext; VkBool32 shaderTerminateInvocation; } VkPhysicalDeviceShaderTerminateInvocationFeatures; typedef struct VkPhysicalDeviceToolProperties { VkStructureType sType; void* pNext; char name[VK_MAX_EXTENSION_NAME_SIZE]; char version[VK_MAX_EXTENSION_NAME_SIZE]; VkToolPurposeFlags purposes; char description[VK_MAX_DESCRIPTION_SIZE]; char layer[VK_MAX_EXTENSION_NAME_SIZE]; } VkPhysicalDeviceToolProperties; typedef struct VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures { VkStructureType sType; void* pNext; VkBool32 shaderDemoteToHelperInvocation; } VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures; typedef struct VkPhysicalDevicePrivateDataFeatures { VkStructureType sType; void* pNext; VkBool32 privateData; } VkPhysicalDevicePrivateDataFeatures; typedef struct VkDevicePrivateDataCreateInfo { VkStructureType sType; const void* pNext; uint32_t privateDataSlotRequestCount; } VkDevicePrivateDataCreateInfo; typedef struct VkPrivateDataSlotCreateInfo { VkStructureType sType; const void* pNext; VkPrivateDataSlotCreateFlags flags; } VkPrivateDataSlotCreateInfo; typedef struct VkPhysicalDevicePipelineCreationCacheControlFeatures { VkStructureType sType; void* pNext; VkBool32 pipelineCreationCacheControl; } VkPhysicalDevicePipelineCreationCacheControlFeatures; typedef struct VkMemoryBarrier2 { VkStructureType sType; const void* pNext; VkPipelineStageFlags2 srcStageMask; VkAccessFlags2 srcAccessMask; VkPipelineStageFlags2 dstStageMask; VkAccessFlags2 dstAccessMask; } VkMemoryBarrier2; typedef struct VkBufferMemoryBarrier2 { VkStructureType sType; const void* pNext; VkPipelineStageFlags2 srcStageMask; VkAccessFlags2 srcAccessMask; VkPipelineStageFlags2 dstStageMask; VkAccessFlags2 dstAccessMask; uint32_t srcQueueFamilyIndex; uint32_t dstQueueFamilyIndex; VkBuffer buffer; VkDeviceSize offset; VkDeviceSize size; } VkBufferMemoryBarrier2; typedef struct VkImageMemoryBarrier2 { VkStructureType sType; const void* pNext; VkPipelineStageFlags2 srcStageMask; VkAccessFlags2 srcAccessMask; VkPipelineStageFlags2 dstStageMask; VkAccessFlags2 dstAccessMask; VkImageLayout oldLayout; VkImageLayout newLayout; uint32_t srcQueueFamilyIndex; uint32_t dstQueueFamilyIndex; VkImage image; VkImageSubresourceRange subresourceRange; } VkImageMemoryBarrier2; typedef struct VkDependencyInfo { VkStructureType sType; const void* pNext; VkDependencyFlags dependencyFlags; uint32_t memoryBarrierCount; const VkMemoryBarrier2* pMemoryBarriers; uint32_t bufferMemoryBarrierCount; const VkBufferMemoryBarrier2* pBufferMemoryBarriers; uint32_t imageMemoryBarrierCount; const VkImageMemoryBarrier2* pImageMemoryBarriers; } VkDependencyInfo; typedef struct VkSemaphoreSubmitInfo { VkStructureType sType; const void* pNext; VkSemaphore semaphore; uint64_t value; VkPipelineStageFlags2 stageMask; uint32_t deviceIndex; } VkSemaphoreSubmitInfo; typedef struct VkCommandBufferSubmitInfo { VkStructureType sType; const void* pNext; VkCommandBuffer commandBuffer; uint32_t deviceMask; } VkCommandBufferSubmitInfo; typedef struct VkSubmitInfo2 { VkStructureType sType; const void* pNext; VkSubmitFlags flags; uint32_t waitSemaphoreInfoCount; const VkSemaphoreSubmitInfo* pWaitSemaphoreInfos; uint32_t commandBufferInfoCount; const VkCommandBufferSubmitInfo* pCommandBufferInfos; uint32_t signalSemaphoreInfoCount; const VkSemaphoreSubmitInfo* pSignalSemaphoreInfos; } VkSubmitInfo2; typedef struct VkPhysicalDeviceSynchronization2Features { VkStructureType sType; void* pNext; VkBool32 synchronization2; } VkPhysicalDeviceSynchronization2Features; typedef struct VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures { VkStructureType sType; void* pNext; VkBool32 shaderZeroInitializeWorkgroupMemory; } VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures; typedef struct VkPhysicalDeviceImageRobustnessFeatures { VkStructureType sType; void* pNext; VkBool32 robustImageAccess; } VkPhysicalDeviceImageRobustnessFeatures; typedef struct VkBufferCopy2 { VkStructureType sType; const void* pNext; VkDeviceSize srcOffset; VkDeviceSize dstOffset; VkDeviceSize size; } VkBufferCopy2; typedef struct VkCopyBufferInfo2 { VkStructureType sType; const void* pNext; VkBuffer srcBuffer; VkBuffer dstBuffer; uint32_t regionCount; const VkBufferCopy2* pRegions; } VkCopyBufferInfo2; typedef struct VkImageCopy2 { VkStructureType sType; const void* pNext; VkImageSubresourceLayers srcSubresource; VkOffset3D srcOffset; VkImageSubresourceLayers dstSubresource; VkOffset3D dstOffset; VkExtent3D extent; } VkImageCopy2; typedef struct VkCopyImageInfo2 { VkStructureType sType; const void* pNext; VkImage srcImage; VkImageLayout srcImageLayout; VkImage dstImage; VkImageLayout dstImageLayout; uint32_t regionCount; const VkImageCopy2* pRegions; } VkCopyImageInfo2; typedef struct VkBufferImageCopy2 { VkStructureType sType; const void* pNext; VkDeviceSize bufferOffset; uint32_t bufferRowLength; uint32_t bufferImageHeight; VkImageSubresourceLayers imageSubresource; VkOffset3D imageOffset; VkExtent3D imageExtent; } VkBufferImageCopy2; typedef struct VkCopyBufferToImageInfo2 { VkStructureType sType; const void* pNext; VkBuffer srcBuffer; VkImage dstImage; VkImageLayout dstImageLayout; uint32_t regionCount; const VkBufferImageCopy2* pRegions; } VkCopyBufferToImageInfo2; typedef struct VkCopyImageToBufferInfo2 { VkStructureType sType; const void* pNext; VkImage srcImage; VkImageLayout srcImageLayout; VkBuffer dstBuffer; uint32_t regionCount; const VkBufferImageCopy2* pRegions; } VkCopyImageToBufferInfo2; typedef struct VkImageBlit2 { VkStructureType sType; const void* pNext; VkImageSubresourceLayers srcSubresource; VkOffset3D srcOffsets[2]; VkImageSubresourceLayers dstSubresource; VkOffset3D dstOffsets[2]; } VkImageBlit2; typedef struct VkBlitImageInfo2 { VkStructureType sType; const void* pNext; VkImage srcImage; VkImageLayout srcImageLayout; VkImage dstImage; VkImageLayout dstImageLayout; uint32_t regionCount; const VkImageBlit2* pRegions; VkFilter filter; } VkBlitImageInfo2; typedef struct VkImageResolve2 { VkStructureType sType; const void* pNext; VkImageSubresourceLayers srcSubresource; VkOffset3D srcOffset; VkImageSubresourceLayers dstSubresource; VkOffset3D dstOffset; VkExtent3D extent; } VkImageResolve2; typedef struct VkResolveImageInfo2 { VkStructureType sType; const void* pNext; VkImage srcImage; VkImageLayout srcImageLayout; VkImage dstImage; VkImageLayout dstImageLayout; uint32_t regionCount; const VkImageResolve2* pRegions; } VkResolveImageInfo2; typedef struct VkPhysicalDeviceSubgroupSizeControlFeatures { VkStructureType sType; void* pNext; VkBool32 subgroupSizeControl; VkBool32 computeFullSubgroups; } VkPhysicalDeviceSubgroupSizeControlFeatures; typedef struct VkPhysicalDeviceSubgroupSizeControlProperties { VkStructureType sType; void* pNext; uint32_t minSubgroupSize; uint32_t maxSubgroupSize; uint32_t maxComputeWorkgroupSubgroups; VkShaderStageFlags requiredSubgroupSizeStages; } VkPhysicalDeviceSubgroupSizeControlProperties; typedef struct VkPipelineShaderStageRequiredSubgroupSizeCreateInfo { VkStructureType sType; void* pNext; uint32_t requiredSubgroupSize; } VkPipelineShaderStageRequiredSubgroupSizeCreateInfo; typedef struct VkPhysicalDeviceInlineUniformBlockFeatures { VkStructureType sType; void* pNext; VkBool32 inlineUniformBlock; VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind; } VkPhysicalDeviceInlineUniformBlockFeatures; typedef struct VkPhysicalDeviceInlineUniformBlockProperties { VkStructureType sType; void* pNext; uint32_t maxInlineUniformBlockSize; uint32_t maxPerStageDescriptorInlineUniformBlocks; uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks; uint32_t maxDescriptorSetInlineUniformBlocks; uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks; } VkPhysicalDeviceInlineUniformBlockProperties; typedef struct VkWriteDescriptorSetInlineUniformBlock { VkStructureType sType; const void* pNext; uint32_t dataSize; const void* pData; } VkWriteDescriptorSetInlineUniformBlock; typedef struct VkDescriptorPoolInlineUniformBlockCreateInfo { VkStructureType sType; const void* pNext; uint32_t maxInlineUniformBlockBindings; } VkDescriptorPoolInlineUniformBlockCreateInfo; typedef struct VkPhysicalDeviceTextureCompressionASTCHDRFeatures { VkStructureType sType; void* pNext; VkBool32 textureCompressionASTC_HDR; } VkPhysicalDeviceTextureCompressionASTCHDRFeatures; typedef struct VkRenderingAttachmentInfo { VkStructureType sType; const void* pNext; VkImageView imageView; VkImageLayout imageLayout; VkResolveModeFlagBits resolveMode; VkImageView resolveImageView; VkImageLayout resolveImageLayout; VkAttachmentLoadOp loadOp; VkAttachmentStoreOp storeOp; VkClearValue clearValue; } VkRenderingAttachmentInfo; typedef struct VkRenderingInfo { VkStructureType sType; const void* pNext; VkRenderingFlags flags; VkRect2D renderArea; uint32_t layerCount; uint32_t viewMask; uint32_t colorAttachmentCount; const VkRenderingAttachmentInfo* pColorAttachments; const VkRenderingAttachmentInfo* pDepthAttachment; const VkRenderingAttachmentInfo* pStencilAttachment; } VkRenderingInfo; typedef struct VkPipelineRenderingCreateInfo { VkStructureType sType; const void* pNext; uint32_t viewMask; uint32_t colorAttachmentCount; const VkFormat* pColorAttachmentFormats; VkFormat depthAttachmentFormat; VkFormat stencilAttachmentFormat; } VkPipelineRenderingCreateInfo; typedef struct VkPhysicalDeviceDynamicRenderingFeatures { VkStructureType sType; void* pNext; VkBool32 dynamicRendering; } VkPhysicalDeviceDynamicRenderingFeatures; typedef struct VkCommandBufferInheritanceRenderingInfo { VkStructureType sType; const void* pNext; VkRenderingFlags flags; uint32_t viewMask; uint32_t colorAttachmentCount; const VkFormat* pColorAttachmentFormats; VkFormat depthAttachmentFormat; VkFormat stencilAttachmentFormat; VkSampleCountFlagBits rasterizationSamples; } VkCommandBufferInheritanceRenderingInfo; typedef struct VkPhysicalDeviceShaderIntegerDotProductFeatures { VkStructureType sType; void* pNext; VkBool32 shaderIntegerDotProduct; } VkPhysicalDeviceShaderIntegerDotProductFeatures; typedef struct VkPhysicalDeviceShaderIntegerDotProductProperties { VkStructureType sType; void* pNext; VkBool32 integerDotProduct8BitUnsignedAccelerated; VkBool32 integerDotProduct8BitSignedAccelerated; VkBool32 integerDotProduct8BitMixedSignednessAccelerated; VkBool32 integerDotProduct4x8BitPackedUnsignedAccelerated; VkBool32 integerDotProduct4x8BitPackedSignedAccelerated; VkBool32 integerDotProduct4x8BitPackedMixedSignednessAccelerated; VkBool32 integerDotProduct16BitUnsignedAccelerated; VkBool32 integerDotProduct16BitSignedAccelerated; VkBool32 integerDotProduct16BitMixedSignednessAccelerated; VkBool32 integerDotProduct32BitUnsignedAccelerated; VkBool32 integerDotProduct32BitSignedAccelerated; VkBool32 integerDotProduct32BitMixedSignednessAccelerated; VkBool32 integerDotProduct64BitUnsignedAccelerated; VkBool32 integerDotProduct64BitSignedAccelerated; VkBool32 integerDotProduct64BitMixedSignednessAccelerated; VkBool32 integerDotProductAccumulatingSaturating8BitUnsignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating8BitSignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated; VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated; VkBool32 integerDotProductAccumulatingSaturating16BitUnsignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating16BitSignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated; VkBool32 integerDotProductAccumulatingSaturating32BitUnsignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating32BitSignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated; VkBool32 integerDotProductAccumulatingSaturating64BitUnsignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating64BitSignedAccelerated; VkBool32 integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated; } VkPhysicalDeviceShaderIntegerDotProductProperties; typedef struct VkPhysicalDeviceTexelBufferAlignmentProperties { VkStructureType sType; void* pNext; VkDeviceSize storageTexelBufferOffsetAlignmentBytes; VkBool32 storageTexelBufferOffsetSingleTexelAlignment; VkDeviceSize uniformTexelBufferOffsetAlignmentBytes; VkBool32 uniformTexelBufferOffsetSingleTexelAlignment; } VkPhysicalDeviceTexelBufferAlignmentProperties; typedef struct VkFormatProperties3 { VkStructureType sType; void* pNext; VkFormatFeatureFlags2 linearTilingFeatures; VkFormatFeatureFlags2 optimalTilingFeatures; VkFormatFeatureFlags2 bufferFeatures; } VkFormatProperties3; typedef struct VkPhysicalDeviceMaintenance4Features { VkStructureType sType; void* pNext; VkBool32 maintenance4; } VkPhysicalDeviceMaintenance4Features; typedef struct VkPhysicalDeviceMaintenance4Properties { VkStructureType sType; void* pNext; VkDeviceSize maxBufferSize; } VkPhysicalDeviceMaintenance4Properties; typedef struct VkDeviceBufferMemoryRequirements { VkStructureType sType; const void* pNext; const VkBufferCreateInfo* pCreateInfo; } VkDeviceBufferMemoryRequirements; typedef struct VkDeviceImageMemoryRequirements { VkStructureType sType; const void* pNext; const VkImageCreateInfo* pCreateInfo; VkImageAspectFlagBits planeAspect; } VkDeviceImageMemoryRequirements; typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceToolProperties)(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolProperties* pToolProperties); typedef VkResult (VKAPI_PTR *PFN_vkCreatePrivateDataSlot)(VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot); typedef void (VKAPI_PTR *PFN_vkDestroyPrivateDataSlot)(VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkSetPrivateData)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data); typedef void (VKAPI_PTR *PFN_vkGetPrivateData)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData); typedef void (VKAPI_PTR *PFN_vkCmdSetEvent2)(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfo* pDependencyInfo); typedef void (VKAPI_PTR *PFN_vkCmdResetEvent2)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask); typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents2)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfo* pDependencyInfos); typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier2)(VkCommandBuffer commandBuffer, const VkDependencyInfo* pDependencyInfo); typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp2)(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkQueryPool queryPool, uint32_t query); typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit2)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence); typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer2)(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2* pCopyBufferInfo); typedef void (VKAPI_PTR *PFN_vkCmdCopyImage2)(VkCommandBuffer commandBuffer, const VkCopyImageInfo2* pCopyImageInfo); typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage2)(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo); typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer2)(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo); typedef void (VKAPI_PTR *PFN_vkCmdBlitImage2)(VkCommandBuffer commandBuffer, const VkBlitImageInfo2* pBlitImageInfo); typedef void (VKAPI_PTR *PFN_vkCmdResolveImage2)(VkCommandBuffer commandBuffer, const VkResolveImageInfo2* pResolveImageInfo); typedef void (VKAPI_PTR *PFN_vkCmdBeginRendering)(VkCommandBuffer commandBuffer, const VkRenderingInfo* pRenderingInfo); typedef void (VKAPI_PTR *PFN_vkCmdEndRendering)(VkCommandBuffer commandBuffer); typedef void (VKAPI_PTR *PFN_vkCmdSetCullMode)(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode); typedef void (VKAPI_PTR *PFN_vkCmdSetFrontFace)(VkCommandBuffer commandBuffer, VkFrontFace frontFace); typedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveTopology)(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology); typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWithCount)(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports); typedef void (VKAPI_PTR *PFN_vkCmdSetScissorWithCount)(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors); typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers2)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides); typedef void (VKAPI_PTR *PFN_vkCmdSetDepthTestEnable)(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable); typedef void (VKAPI_PTR *PFN_vkCmdSetDepthWriteEnable)(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable); typedef void (VKAPI_PTR *PFN_vkCmdSetDepthCompareOp)(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp); typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBoundsTestEnable)(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable); typedef void (VKAPI_PTR *PFN_vkCmdSetStencilTestEnable)(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable); typedef void (VKAPI_PTR *PFN_vkCmdSetStencilOp)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp); typedef void (VKAPI_PTR *PFN_vkCmdSetRasterizerDiscardEnable)(VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable); typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBiasEnable)(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable); typedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveRestartEnable)(VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable); typedef void (VKAPI_PTR *PFN_vkGetDeviceBufferMemoryRequirements)(VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements); typedef void (VKAPI_PTR *PFN_vkGetDeviceImageMemoryRequirements)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements); typedef void (VKAPI_PTR *PFN_vkGetDeviceImageSparseMemoryRequirements)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceToolProperties( VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolProperties* pToolProperties); VKAPI_ATTR VkResult VKAPI_CALL vkCreatePrivateDataSlot( VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot); VKAPI_ATTR void VKAPI_CALL vkDestroyPrivateDataSlot( VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkSetPrivateData( VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data); VKAPI_ATTR void VKAPI_CALL vkGetPrivateData( VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData); VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent2( VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfo* pDependencyInfo); VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent2( VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask); VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents2( VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfo* pDependencyInfos); VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier2( VkCommandBuffer commandBuffer, const VkDependencyInfo* pDependencyInfo); VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp2( VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkQueryPool queryPool, uint32_t query); VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit2( VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence); VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer2( VkCommandBuffer commandBuffer, const VkCopyBufferInfo2* pCopyBufferInfo); VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage2( VkCommandBuffer commandBuffer, const VkCopyImageInfo2* pCopyImageInfo); VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage2( VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo); VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer2( VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo); VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage2( VkCommandBuffer commandBuffer, const VkBlitImageInfo2* pBlitImageInfo); VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage2( VkCommandBuffer commandBuffer, const VkResolveImageInfo2* pResolveImageInfo); VKAPI_ATTR void VKAPI_CALL vkCmdBeginRendering( VkCommandBuffer commandBuffer, const VkRenderingInfo* pRenderingInfo); VKAPI_ATTR void VKAPI_CALL vkCmdEndRendering( VkCommandBuffer commandBuffer); VKAPI_ATTR void VKAPI_CALL vkCmdSetCullMode( VkCommandBuffer commandBuffer, VkCullModeFlags cullMode); VKAPI_ATTR void VKAPI_CALL vkCmdSetFrontFace( VkCommandBuffer commandBuffer, VkFrontFace frontFace); VKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveTopology( VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology); VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWithCount( VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports); VKAPI_ATTR void VKAPI_CALL vkCmdSetScissorWithCount( VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors); VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers2( VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides); VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthTestEnable( VkCommandBuffer commandBuffer, VkBool32 depthTestEnable); VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthWriteEnable( VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable); VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthCompareOp( VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp); VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBoundsTestEnable( VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable); VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilTestEnable( VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable); VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilOp( VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp); VKAPI_ATTR void VKAPI_CALL vkCmdSetRasterizerDiscardEnable( VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable); VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBiasEnable( VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable); VKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveRestartEnable( VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable); VKAPI_ATTR void VKAPI_CALL vkGetDeviceBufferMemoryRequirements( VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements); VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageMemoryRequirements( VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements); VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSparseMemoryRequirements( VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); #endif #define VK_KHR_surface 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR) #define VK_KHR_SURFACE_SPEC_VERSION 25 #define VK_KHR_SURFACE_EXTENSION_NAME "VK_KHR_surface" typedef enum VkPresentModeKHR { VK_PRESENT_MODE_IMMEDIATE_KHR = 0, VK_PRESENT_MODE_MAILBOX_KHR = 1, VK_PRESENT_MODE_FIFO_KHR = 2, VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3, VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR = 1000111000, VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR = 1000111001, VK_PRESENT_MODE_MAX_ENUM_KHR = 0x7FFFFFFF } VkPresentModeKHR; typedef enum VkColorSpaceKHR { VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0, VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001, VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002, VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT = 1000104003, VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004, VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005, VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006, VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007, VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008, VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009, VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010, VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011, VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012, VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013, VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT = 1000104014, VK_COLOR_SPACE_DISPLAY_NATIVE_AMD = 1000213000, VK_COLORSPACE_SRGB_NONLINEAR_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT, VK_COLOR_SPACE_MAX_ENUM_KHR = 0x7FFFFFFF } VkColorSpaceKHR; typedef enum VkSurfaceTransformFlagBitsKHR { VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001, VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002, VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004, VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008, VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010, VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020, VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040, VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080, VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100, VK_SURFACE_TRANSFORM_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF } VkSurfaceTransformFlagBitsKHR; typedef enum VkCompositeAlphaFlagBitsKHR { VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002, VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004, VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008, VK_COMPOSITE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF } VkCompositeAlphaFlagBitsKHR; typedef VkFlags VkCompositeAlphaFlagsKHR; typedef VkFlags VkSurfaceTransformFlagsKHR; typedef struct VkSurfaceCapabilitiesKHR { uint32_t minImageCount; uint32_t maxImageCount; VkExtent2D currentExtent; VkExtent2D minImageExtent; VkExtent2D maxImageExtent; uint32_t maxImageArrayLayers; VkSurfaceTransformFlagsKHR supportedTransforms; VkSurfaceTransformFlagBitsKHR currentTransform; VkCompositeAlphaFlagsKHR supportedCompositeAlpha; VkImageUsageFlags supportedUsageFlags; } VkSurfaceCapabilitiesKHR; typedef struct VkSurfaceFormatKHR { VkFormat format; VkColorSpaceKHR colorSpace; } VkSurfaceFormatKHR; typedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported); typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats); typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR( VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported); VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats); VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); #endif #define VK_KHR_swapchain 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR) #define VK_KHR_SWAPCHAIN_SPEC_VERSION 70 #define VK_KHR_SWAPCHAIN_EXTENSION_NAME "VK_KHR_swapchain" typedef enum VkSwapchainCreateFlagBitsKHR { VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = 0x00000001, VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR = 0x00000002, VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR = 0x00000004, VK_SWAPCHAIN_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF } VkSwapchainCreateFlagBitsKHR; typedef VkFlags VkSwapchainCreateFlagsKHR; typedef enum VkDeviceGroupPresentModeFlagBitsKHR { VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR = 0x00000001, VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR = 0x00000002, VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR = 0x00000004, VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR = 0x00000008, VK_DEVICE_GROUP_PRESENT_MODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF } VkDeviceGroupPresentModeFlagBitsKHR; typedef VkFlags VkDeviceGroupPresentModeFlagsKHR; typedef struct VkSwapchainCreateInfoKHR { VkStructureType sType; const void* pNext; VkSwapchainCreateFlagsKHR flags; VkSurfaceKHR surface; uint32_t minImageCount; VkFormat imageFormat; VkColorSpaceKHR imageColorSpace; VkExtent2D imageExtent; uint32_t imageArrayLayers; VkImageUsageFlags imageUsage; VkSharingMode imageSharingMode; uint32_t queueFamilyIndexCount; const uint32_t* pQueueFamilyIndices; VkSurfaceTransformFlagBitsKHR preTransform; VkCompositeAlphaFlagBitsKHR compositeAlpha; VkPresentModeKHR presentMode; VkBool32 clipped; VkSwapchainKHR oldSwapchain; } VkSwapchainCreateInfoKHR; typedef struct VkPresentInfoKHR { VkStructureType sType; const void* pNext; uint32_t waitSemaphoreCount; const VkSemaphore* pWaitSemaphores; uint32_t swapchainCount; const VkSwapchainKHR* pSwapchains; const uint32_t* pImageIndices; VkResult* pResults; } VkPresentInfoKHR; typedef struct VkImageSwapchainCreateInfoKHR { VkStructureType sType; const void* pNext; VkSwapchainKHR swapchain; } VkImageSwapchainCreateInfoKHR; typedef struct VkBindImageMemorySwapchainInfoKHR { VkStructureType sType; const void* pNext; VkSwapchainKHR swapchain; uint32_t imageIndex; } VkBindImageMemorySwapchainInfoKHR; typedef struct VkAcquireNextImageInfoKHR { VkStructureType sType; const void* pNext; VkSwapchainKHR swapchain; uint64_t timeout; VkSemaphore semaphore; VkFence fence; uint32_t deviceMask; } VkAcquireNextImageInfoKHR; typedef struct VkDeviceGroupPresentCapabilitiesKHR { VkStructureType sType; void* pNext; uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE]; VkDeviceGroupPresentModeFlagsKHR modes; } VkDeviceGroupPresentCapabilitiesKHR; typedef struct VkDeviceGroupPresentInfoKHR { VkStructureType sType; const void* pNext; uint32_t swapchainCount; const uint32_t* pDeviceMasks; VkDeviceGroupPresentModeFlagBitsKHR mode; } VkDeviceGroupPresentInfoKHR; typedef struct VkDeviceGroupSwapchainCreateInfoKHR { VkStructureType sType; const void* pNext; VkDeviceGroupPresentModeFlagsKHR modes; } VkDeviceGroupSwapchainCreateInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain); typedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages); typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex); typedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo); typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupPresentCapabilitiesKHR)(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModesKHR)(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes); typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDevicePresentRectanglesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects); typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImage2KHR)(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR( VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain); VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR( VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR( VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages); VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR( VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex); VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR( VkQueue queue, const VkPresentInfoKHR* pPresentInfo); VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHR( VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR( VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes); VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects); VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHR( VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex); #endif #define VK_KHR_display 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR) #define VK_KHR_DISPLAY_SPEC_VERSION 23 #define VK_KHR_DISPLAY_EXTENSION_NAME "VK_KHR_display" typedef VkFlags VkDisplayModeCreateFlagsKHR; typedef enum VkDisplayPlaneAlphaFlagBitsKHR { VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002, VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004, VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008, VK_DISPLAY_PLANE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF } VkDisplayPlaneAlphaFlagBitsKHR; typedef VkFlags VkDisplayPlaneAlphaFlagsKHR; typedef VkFlags VkDisplaySurfaceCreateFlagsKHR; typedef struct VkDisplayModeParametersKHR { VkExtent2D visibleRegion; uint32_t refreshRate; } VkDisplayModeParametersKHR; typedef struct VkDisplayModeCreateInfoKHR { VkStructureType sType; const void* pNext; VkDisplayModeCreateFlagsKHR flags; VkDisplayModeParametersKHR parameters; } VkDisplayModeCreateInfoKHR; typedef struct VkDisplayModePropertiesKHR { VkDisplayModeKHR displayMode; VkDisplayModeParametersKHR parameters; } VkDisplayModePropertiesKHR; typedef struct VkDisplayPlaneCapabilitiesKHR { VkDisplayPlaneAlphaFlagsKHR supportedAlpha; VkOffset2D minSrcPosition; VkOffset2D maxSrcPosition; VkExtent2D minSrcExtent; VkExtent2D maxSrcExtent; VkOffset2D minDstPosition; VkOffset2D maxDstPosition; VkExtent2D minDstExtent; VkExtent2D maxDstExtent; } VkDisplayPlaneCapabilitiesKHR; typedef struct VkDisplayPlanePropertiesKHR { VkDisplayKHR currentDisplay; uint32_t currentStackIndex; } VkDisplayPlanePropertiesKHR; typedef struct VkDisplayPropertiesKHR { VkDisplayKHR display; const char* displayName; VkExtent2D physicalDimensions; VkExtent2D physicalResolution; VkSurfaceTransformFlagsKHR supportedTransforms; VkBool32 planeReorderPossible; VkBool32 persistentContent; } VkDisplayPropertiesKHR; typedef struct VkDisplaySurfaceCreateInfoKHR { VkStructureType sType; const void* pNext; VkDisplaySurfaceCreateFlagsKHR flags; VkDisplayModeKHR displayMode; uint32_t planeIndex; uint32_t planeStackIndex; VkSurfaceTransformFlagBitsKHR transform; float globalAlpha; VkDisplayPlaneAlphaFlagBitsKHR alphaMode; VkExtent2D imageExtent; } VkDisplaySurfaceCreateInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties); typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties); typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays); typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties); typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode); typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities); typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties); VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties); VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR( VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays); VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR( VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties); VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR( VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode); VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR( VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities); VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR( VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); #endif #define VK_KHR_display_swapchain 1 #define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 10 #define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain" typedef struct VkDisplayPresentInfoKHR { VkStructureType sType; const void* pNext; VkRect2D srcRect; VkRect2D dstRect; VkBool32 persistent; } VkDisplayPresentInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR( VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains); #endif #define VK_KHR_sampler_mirror_clamp_to_edge 1 #define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 3 #define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME "VK_KHR_sampler_mirror_clamp_to_edge" #define VK_KHR_dynamic_rendering 1 #define VK_KHR_DYNAMIC_RENDERING_SPEC_VERSION 1 #define VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME "VK_KHR_dynamic_rendering" typedef VkRenderingFlags VkRenderingFlagsKHR; typedef VkRenderingFlagBits VkRenderingFlagBitsKHR; typedef VkRenderingInfo VkRenderingInfoKHR; typedef VkRenderingAttachmentInfo VkRenderingAttachmentInfoKHR; typedef VkPipelineRenderingCreateInfo VkPipelineRenderingCreateInfoKHR; typedef VkPhysicalDeviceDynamicRenderingFeatures VkPhysicalDeviceDynamicRenderingFeaturesKHR; typedef VkCommandBufferInheritanceRenderingInfo VkCommandBufferInheritanceRenderingInfoKHR; typedef struct VkRenderingFragmentShadingRateAttachmentInfoKHR { VkStructureType sType; const void* pNext; VkImageView imageView; VkImageLayout imageLayout; VkExtent2D shadingRateAttachmentTexelSize; } VkRenderingFragmentShadingRateAttachmentInfoKHR; typedef struct VkRenderingFragmentDensityMapAttachmentInfoEXT { VkStructureType sType; const void* pNext; VkImageView imageView; VkImageLayout imageLayout; } VkRenderingFragmentDensityMapAttachmentInfoEXT; typedef struct VkAttachmentSampleCountInfoAMD { VkStructureType sType; const void* pNext; uint32_t colorAttachmentCount; const VkSampleCountFlagBits* pColorAttachmentSamples; VkSampleCountFlagBits depthStencilAttachmentSamples; } VkAttachmentSampleCountInfoAMD; typedef VkAttachmentSampleCountInfoAMD VkAttachmentSampleCountInfoNV; typedef struct VkMultiviewPerViewAttributesInfoNVX { VkStructureType sType; const void* pNext; VkBool32 perViewAttributes; VkBool32 perViewAttributesPositionXOnly; } VkMultiviewPerViewAttributesInfoNVX; typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderingKHR)(VkCommandBuffer commandBuffer, const VkRenderingInfo* pRenderingInfo); typedef void (VKAPI_PTR *PFN_vkCmdEndRenderingKHR)(VkCommandBuffer commandBuffer); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderingKHR( VkCommandBuffer commandBuffer, const VkRenderingInfo* pRenderingInfo); VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderingKHR( VkCommandBuffer commandBuffer); #endif #define VK_KHR_multiview 1 #define VK_KHR_MULTIVIEW_SPEC_VERSION 1 #define VK_KHR_MULTIVIEW_EXTENSION_NAME "VK_KHR_multiview" typedef VkRenderPassMultiviewCreateInfo VkRenderPassMultiviewCreateInfoKHR; typedef VkPhysicalDeviceMultiviewFeatures VkPhysicalDeviceMultiviewFeaturesKHR; typedef VkPhysicalDeviceMultiviewProperties VkPhysicalDeviceMultiviewPropertiesKHR; #define VK_KHR_get_physical_device_properties2 1 #define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 2 #define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_physical_device_properties2" typedef VkPhysicalDeviceFeatures2 VkPhysicalDeviceFeatures2KHR; typedef VkPhysicalDeviceProperties2 VkPhysicalDeviceProperties2KHR; typedef VkFormatProperties2 VkFormatProperties2KHR; typedef VkImageFormatProperties2 VkImageFormatProperties2KHR; typedef VkPhysicalDeviceImageFormatInfo2 VkPhysicalDeviceImageFormatInfo2KHR; typedef VkQueueFamilyProperties2 VkQueueFamilyProperties2KHR; typedef VkPhysicalDeviceMemoryProperties2 VkPhysicalDeviceMemoryProperties2KHR; typedef VkSparseImageFormatProperties2 VkSparseImageFormatProperties2KHR; typedef VkPhysicalDeviceSparseImageFormatInfo2 VkPhysicalDeviceSparseImageFormatInfo2KHR; typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2KHR)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR( VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR( VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR( VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR( VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR( VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); #endif #define VK_KHR_device_group 1 #define VK_KHR_DEVICE_GROUP_SPEC_VERSION 4 #define VK_KHR_DEVICE_GROUP_EXTENSION_NAME "VK_KHR_device_group" typedef VkPeerMemoryFeatureFlags VkPeerMemoryFeatureFlagsKHR; typedef VkPeerMemoryFeatureFlagBits VkPeerMemoryFeatureFlagBitsKHR; typedef VkMemoryAllocateFlags VkMemoryAllocateFlagsKHR; typedef VkMemoryAllocateFlagBits VkMemoryAllocateFlagBitsKHR; typedef VkMemoryAllocateFlagsInfo VkMemoryAllocateFlagsInfoKHR; typedef VkDeviceGroupRenderPassBeginInfo VkDeviceGroupRenderPassBeginInfoKHR; typedef VkDeviceGroupCommandBufferBeginInfo VkDeviceGroupCommandBufferBeginInfoKHR; typedef VkDeviceGroupSubmitInfo VkDeviceGroupSubmitInfoKHR; typedef VkDeviceGroupBindSparseInfo VkDeviceGroupBindSparseInfoKHR; typedef VkBindBufferMemoryDeviceGroupInfo VkBindBufferMemoryDeviceGroupInfoKHR; typedef VkBindImageMemoryDeviceGroupInfo VkBindImageMemoryDeviceGroupInfoKHR; typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMaskKHR)(VkCommandBuffer commandBuffer, uint32_t deviceMask); typedef void (VKAPI_PTR *PFN_vkCmdDispatchBaseKHR)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHR( VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMaskKHR( VkCommandBuffer commandBuffer, uint32_t deviceMask); VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHR( VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); #endif #define VK_KHR_shader_draw_parameters 1 #define VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION 1 #define VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME "VK_KHR_shader_draw_parameters" #define VK_KHR_maintenance1 1 #define VK_KHR_MAINTENANCE_1_SPEC_VERSION 2 #define VK_KHR_MAINTENANCE_1_EXTENSION_NAME "VK_KHR_maintenance1" #define VK_KHR_MAINTENANCE1_SPEC_VERSION VK_KHR_MAINTENANCE_1_SPEC_VERSION #define VK_KHR_MAINTENANCE1_EXTENSION_NAME VK_KHR_MAINTENANCE_1_EXTENSION_NAME typedef VkCommandPoolTrimFlags VkCommandPoolTrimFlagsKHR; typedef void (VKAPI_PTR *PFN_vkTrimCommandPoolKHR)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkTrimCommandPoolKHR( VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); #endif #define VK_KHR_device_group_creation 1 #define VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION 1 #define VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME "VK_KHR_device_group_creation" #define VK_MAX_DEVICE_GROUP_SIZE_KHR VK_MAX_DEVICE_GROUP_SIZE typedef VkPhysicalDeviceGroupProperties VkPhysicalDeviceGroupPropertiesKHR; typedef VkDeviceGroupDeviceCreateInfo VkDeviceGroupDeviceCreateInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroupsKHR)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHR( VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); #endif #define VK_KHR_external_memory_capabilities 1 #define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 #define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_memory_capabilities" #define VK_LUID_SIZE_KHR VK_LUID_SIZE typedef VkExternalMemoryHandleTypeFlags VkExternalMemoryHandleTypeFlagsKHR; typedef VkExternalMemoryHandleTypeFlagBits VkExternalMemoryHandleTypeFlagBitsKHR; typedef VkExternalMemoryFeatureFlags VkExternalMemoryFeatureFlagsKHR; typedef VkExternalMemoryFeatureFlagBits VkExternalMemoryFeatureFlagBitsKHR; typedef VkExternalMemoryProperties VkExternalMemoryPropertiesKHR; typedef VkPhysicalDeviceExternalImageFormatInfo VkPhysicalDeviceExternalImageFormatInfoKHR; typedef VkExternalImageFormatProperties VkExternalImageFormatPropertiesKHR; typedef VkPhysicalDeviceExternalBufferInfo VkPhysicalDeviceExternalBufferInfoKHR; typedef VkExternalBufferProperties VkExternalBufferPropertiesKHR; typedef VkPhysicalDeviceIDProperties VkPhysicalDeviceIDPropertiesKHR; typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); #endif #define VK_KHR_external_memory 1 #define VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION 1 #define VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME "VK_KHR_external_memory" #define VK_QUEUE_FAMILY_EXTERNAL_KHR VK_QUEUE_FAMILY_EXTERNAL typedef VkExternalMemoryImageCreateInfo VkExternalMemoryImageCreateInfoKHR; typedef VkExternalMemoryBufferCreateInfo VkExternalMemoryBufferCreateInfoKHR; typedef VkExportMemoryAllocateInfo VkExportMemoryAllocateInfoKHR; #define VK_KHR_external_memory_fd 1 #define VK_KHR_EXTERNAL_MEMORY_FD_SPEC_VERSION 1 #define VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME "VK_KHR_external_memory_fd" typedef struct VkImportMemoryFdInfoKHR { VkStructureType sType; const void* pNext; VkExternalMemoryHandleTypeFlagBits handleType; int fd; } VkImportMemoryFdInfoKHR; typedef struct VkMemoryFdPropertiesKHR { VkStructureType sType; void* pNext; uint32_t memoryTypeBits; } VkMemoryFdPropertiesKHR; typedef struct VkMemoryGetFdInfoKHR { VkStructureType sType; const void* pNext; VkDeviceMemory memory; VkExternalMemoryHandleTypeFlagBits handleType; } VkMemoryGetFdInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdKHR)(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd); typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdPropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHR( VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd); VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHR( VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties); #endif #define VK_KHR_external_semaphore_capabilities 1 #define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION 1 #define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_semaphore_capabilities" typedef VkExternalSemaphoreHandleTypeFlags VkExternalSemaphoreHandleTypeFlagsKHR; typedef VkExternalSemaphoreHandleTypeFlagBits VkExternalSemaphoreHandleTypeFlagBitsKHR; typedef VkExternalSemaphoreFeatureFlags VkExternalSemaphoreFeatureFlagsKHR; typedef VkExternalSemaphoreFeatureFlagBits VkExternalSemaphoreFeatureFlagBitsKHR; typedef VkPhysicalDeviceExternalSemaphoreInfo VkPhysicalDeviceExternalSemaphoreInfoKHR; typedef VkExternalSemaphoreProperties VkExternalSemaphorePropertiesKHR; typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); #endif #define VK_KHR_external_semaphore 1 #define VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION 1 #define VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME "VK_KHR_external_semaphore" typedef VkSemaphoreImportFlags VkSemaphoreImportFlagsKHR; typedef VkSemaphoreImportFlagBits VkSemaphoreImportFlagBitsKHR; typedef VkExportSemaphoreCreateInfo VkExportSemaphoreCreateInfoKHR; #define VK_KHR_external_semaphore_fd 1 #define VK_KHR_EXTERNAL_SEMAPHORE_FD_SPEC_VERSION 1 #define VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME "VK_KHR_external_semaphore_fd" typedef struct VkImportSemaphoreFdInfoKHR { VkStructureType sType; const void* pNext; VkSemaphore semaphore; VkSemaphoreImportFlags flags; VkExternalSemaphoreHandleTypeFlagBits handleType; int fd; } VkImportSemaphoreFdInfoKHR; typedef struct VkSemaphoreGetFdInfoKHR { VkStructureType sType; const void* pNext; VkSemaphore semaphore; VkExternalSemaphoreHandleTypeFlagBits handleType; } VkSemaphoreGetFdInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreFdKHR)(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreFdKHR)(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHR( VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR( VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd); #endif #define VK_KHR_push_descriptor 1 #define VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION 2 #define VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME "VK_KHR_push_descriptor" typedef struct VkPhysicalDevicePushDescriptorPropertiesKHR { VkStructureType sType; void* pNext; uint32_t maxPushDescriptors; } VkPhysicalDevicePushDescriptorPropertiesKHR; typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetKHR)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites); typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplateKHR)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetKHR( VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites); VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplateKHR( VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData); #endif #define VK_KHR_shader_float16_int8 1 #define VK_KHR_SHADER_FLOAT16_INT8_SPEC_VERSION 1 #define VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME "VK_KHR_shader_float16_int8" typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceShaderFloat16Int8FeaturesKHR; typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceFloat16Int8FeaturesKHR; #define VK_KHR_16bit_storage 1 #define VK_KHR_16BIT_STORAGE_SPEC_VERSION 1 #define VK_KHR_16BIT_STORAGE_EXTENSION_NAME "VK_KHR_16bit_storage" typedef VkPhysicalDevice16BitStorageFeatures VkPhysicalDevice16BitStorageFeaturesKHR; #define VK_KHR_incremental_present 1 #define VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION 2 #define VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME "VK_KHR_incremental_present" typedef struct VkRectLayerKHR { VkOffset2D offset; VkExtent2D extent; uint32_t layer; } VkRectLayerKHR; typedef struct VkPresentRegionKHR { uint32_t rectangleCount; const VkRectLayerKHR* pRectangles; } VkPresentRegionKHR; typedef struct VkPresentRegionsKHR { VkStructureType sType; const void* pNext; uint32_t swapchainCount; const VkPresentRegionKHR* pRegions; } VkPresentRegionsKHR; #define VK_KHR_descriptor_update_template 1 typedef VkDescriptorUpdateTemplate VkDescriptorUpdateTemplateKHR; #define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION 1 #define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME "VK_KHR_descriptor_update_template" typedef VkDescriptorUpdateTemplateType VkDescriptorUpdateTemplateTypeKHR; typedef VkDescriptorUpdateTemplateCreateFlags VkDescriptorUpdateTemplateCreateFlagsKHR; typedef VkDescriptorUpdateTemplateEntry VkDescriptorUpdateTemplateEntryKHR; typedef VkDescriptorUpdateTemplateCreateInfo VkDescriptorUpdateTemplateCreateInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplateKHR)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplateKHR)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplateKHR)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR( VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR( VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR( VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); #endif #define VK_KHR_imageless_framebuffer 1 #define VK_KHR_IMAGELESS_FRAMEBUFFER_SPEC_VERSION 1 #define VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME "VK_KHR_imageless_framebuffer" typedef VkPhysicalDeviceImagelessFramebufferFeatures VkPhysicalDeviceImagelessFramebufferFeaturesKHR; typedef VkFramebufferAttachmentsCreateInfo VkFramebufferAttachmentsCreateInfoKHR; typedef VkFramebufferAttachmentImageInfo VkFramebufferAttachmentImageInfoKHR; typedef VkRenderPassAttachmentBeginInfo VkRenderPassAttachmentBeginInfoKHR; #define VK_KHR_create_renderpass2 1 #define VK_KHR_CREATE_RENDERPASS_2_SPEC_VERSION 1 #define VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME "VK_KHR_create_renderpass2" typedef VkRenderPassCreateInfo2 VkRenderPassCreateInfo2KHR; typedef VkAttachmentDescription2 VkAttachmentDescription2KHR; typedef VkAttachmentReference2 VkAttachmentReference2KHR; typedef VkSubpassDescription2 VkSubpassDescription2KHR; typedef VkSubpassDependency2 VkSubpassDependency2KHR; typedef VkSubpassBeginInfo VkSubpassBeginInfoKHR; typedef VkSubpassEndInfo VkSubpassEndInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2KHR)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2KHR( VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2KHR( VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2KHR( VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2KHR( VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); #endif #define VK_KHR_shared_presentable_image 1 #define VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION 1 #define VK_KHR_SHARED_PRESENTABLE_IMAGE_EXTENSION_NAME "VK_KHR_shared_presentable_image" typedef struct VkSharedPresentSurfaceCapabilitiesKHR { VkStructureType sType; void* pNext; VkImageUsageFlags sharedPresentSupportedUsageFlags; } VkSharedPresentSurfaceCapabilitiesKHR; typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainStatusKHR)(VkDevice device, VkSwapchainKHR swapchain); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainStatusKHR( VkDevice device, VkSwapchainKHR swapchain); #endif #define VK_KHR_external_fence_capabilities 1 #define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION 1 #define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_fence_capabilities" typedef VkExternalFenceHandleTypeFlags VkExternalFenceHandleTypeFlagsKHR; typedef VkExternalFenceHandleTypeFlagBits VkExternalFenceHandleTypeFlagBitsKHR; typedef VkExternalFenceFeatureFlags VkExternalFenceFeatureFlagsKHR; typedef VkExternalFenceFeatureFlagBits VkExternalFenceFeatureFlagBitsKHR; typedef VkPhysicalDeviceExternalFenceInfo VkPhysicalDeviceExternalFenceInfoKHR; typedef VkExternalFenceProperties VkExternalFencePropertiesKHR; typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFencePropertiesKHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); #endif #define VK_KHR_external_fence 1 #define VK_KHR_EXTERNAL_FENCE_SPEC_VERSION 1 #define VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME "VK_KHR_external_fence" typedef VkFenceImportFlags VkFenceImportFlagsKHR; typedef VkFenceImportFlagBits VkFenceImportFlagBitsKHR; typedef VkExportFenceCreateInfo VkExportFenceCreateInfoKHR; #define VK_KHR_external_fence_fd 1 #define VK_KHR_EXTERNAL_FENCE_FD_SPEC_VERSION 1 #define VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME "VK_KHR_external_fence_fd" typedef struct VkImportFenceFdInfoKHR { VkStructureType sType; const void* pNext; VkFence fence; VkFenceImportFlags flags; VkExternalFenceHandleTypeFlagBits handleType; int fd; } VkImportFenceFdInfoKHR; typedef struct VkFenceGetFdInfoKHR { VkStructureType sType; const void* pNext; VkFence fence; VkExternalFenceHandleTypeFlagBits handleType; } VkFenceGetFdInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkImportFenceFdKHR)(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo); typedef VkResult (VKAPI_PTR *PFN_vkGetFenceFdKHR)(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceFdKHR( VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo); VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceFdKHR( VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd); #endif #define VK_KHR_performance_query 1 #define VK_KHR_PERFORMANCE_QUERY_SPEC_VERSION 1 #define VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME "VK_KHR_performance_query" typedef enum VkPerformanceCounterUnitKHR { VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR = 0, VK_PERFORMANCE_COUNTER_UNIT_PERCENTAGE_KHR = 1, VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR = 2, VK_PERFORMANCE_COUNTER_UNIT_BYTES_KHR = 3, VK_PERFORMANCE_COUNTER_UNIT_BYTES_PER_SECOND_KHR = 4, VK_PERFORMANCE_COUNTER_UNIT_KELVIN_KHR = 5, VK_PERFORMANCE_COUNTER_UNIT_WATTS_KHR = 6, VK_PERFORMANCE_COUNTER_UNIT_VOLTS_KHR = 7, VK_PERFORMANCE_COUNTER_UNIT_AMPS_KHR = 8, VK_PERFORMANCE_COUNTER_UNIT_HERTZ_KHR = 9, VK_PERFORMANCE_COUNTER_UNIT_CYCLES_KHR = 10, VK_PERFORMANCE_COUNTER_UNIT_MAX_ENUM_KHR = 0x7FFFFFFF } VkPerformanceCounterUnitKHR; typedef enum VkPerformanceCounterScopeKHR { VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR = 0, VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR = 1, VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR = 2, VK_QUERY_SCOPE_COMMAND_BUFFER_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR, VK_QUERY_SCOPE_RENDER_PASS_KHR = VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR, VK_QUERY_SCOPE_COMMAND_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR, VK_PERFORMANCE_COUNTER_SCOPE_MAX_ENUM_KHR = 0x7FFFFFFF } VkPerformanceCounterScopeKHR; typedef enum VkPerformanceCounterStorageKHR { VK_PERFORMANCE_COUNTER_STORAGE_INT32_KHR = 0, VK_PERFORMANCE_COUNTER_STORAGE_INT64_KHR = 1, VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR = 2, VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR = 3, VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR = 4, VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR = 5, VK_PERFORMANCE_COUNTER_STORAGE_MAX_ENUM_KHR = 0x7FFFFFFF } VkPerformanceCounterStorageKHR; typedef enum VkPerformanceCounterDescriptionFlagBitsKHR { VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR = 0x00000001, VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR = 0x00000002, VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR, VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR, VK_PERFORMANCE_COUNTER_DESCRIPTION_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF } VkPerformanceCounterDescriptionFlagBitsKHR; typedef VkFlags VkPerformanceCounterDescriptionFlagsKHR; typedef enum VkAcquireProfilingLockFlagBitsKHR { VK_ACQUIRE_PROFILING_LOCK_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF } VkAcquireProfilingLockFlagBitsKHR; typedef VkFlags VkAcquireProfilingLockFlagsKHR; typedef struct VkPhysicalDevicePerformanceQueryFeaturesKHR { VkStructureType sType; void* pNext; VkBool32 performanceCounterQueryPools; VkBool32 performanceCounterMultipleQueryPools; } VkPhysicalDevicePerformanceQueryFeaturesKHR; typedef struct VkPhysicalDevicePerformanceQueryPropertiesKHR { VkStructureType sType; void* pNext; VkBool32 allowCommandBufferQueryCopies; } VkPhysicalDevicePerformanceQueryPropertiesKHR; typedef struct VkPerformanceCounterKHR { VkStructureType sType; void* pNext; VkPerformanceCounterUnitKHR unit; VkPerformanceCounterScopeKHR scope; VkPerformanceCounterStorageKHR storage; uint8_t uuid[VK_UUID_SIZE]; } VkPerformanceCounterKHR; typedef struct VkPerformanceCounterDescriptionKHR { VkStructureType sType; void* pNext; VkPerformanceCounterDescriptionFlagsKHR flags; char name[VK_MAX_DESCRIPTION_SIZE]; char category[VK_MAX_DESCRIPTION_SIZE]; char description[VK_MAX_DESCRIPTION_SIZE]; } VkPerformanceCounterDescriptionKHR; typedef struct VkQueryPoolPerformanceCreateInfoKHR { VkStructureType sType; const void* pNext; uint32_t queueFamilyIndex; uint32_t counterIndexCount; const uint32_t* pCounterIndices; } VkQueryPoolPerformanceCreateInfoKHR; typedef union VkPerformanceCounterResultKHR { int32_t int32; int64_t int64; uint32_t uint32; uint64_t uint64; float float32; double float64; } VkPerformanceCounterResultKHR; typedef struct VkAcquireProfilingLockInfoKHR { VkStructureType sType; const void* pNext; VkAcquireProfilingLockFlagsKHR flags; uint64_t timeout; } VkAcquireProfilingLockInfoKHR; typedef struct VkPerformanceQuerySubmitInfoKHR { VkStructureType sType; const void* pNext; uint32_t counterPassIndex; } VkPerformanceQuerySubmitInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t* pCounterCount, VkPerformanceCounterKHR* pCounters, VkPerformanceCounterDescriptionKHR* pCounterDescriptions); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR)(VkPhysicalDevice physicalDevice, const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses); typedef VkResult (VKAPI_PTR *PFN_vkAcquireProfilingLockKHR)(VkDevice device, const VkAcquireProfilingLockInfoKHR* pInfo); typedef void (VKAPI_PTR *PFN_vkReleaseProfilingLockKHR)(VkDevice device); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t* pCounterCount, VkPerformanceCounterKHR* pCounters, VkPerformanceCounterDescriptionKHR* pCounterDescriptions); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( VkPhysicalDevice physicalDevice, const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses); VKAPI_ATTR VkResult VKAPI_CALL vkAcquireProfilingLockKHR( VkDevice device, const VkAcquireProfilingLockInfoKHR* pInfo); VKAPI_ATTR void VKAPI_CALL vkReleaseProfilingLockKHR( VkDevice device); #endif #define VK_KHR_maintenance2 1 #define VK_KHR_MAINTENANCE_2_SPEC_VERSION 1 #define VK_KHR_MAINTENANCE_2_EXTENSION_NAME "VK_KHR_maintenance2" #define VK_KHR_MAINTENANCE2_SPEC_VERSION VK_KHR_MAINTENANCE_2_SPEC_VERSION #define VK_KHR_MAINTENANCE2_EXTENSION_NAME VK_KHR_MAINTENANCE_2_EXTENSION_NAME typedef VkPointClippingBehavior VkPointClippingBehaviorKHR; typedef VkTessellationDomainOrigin VkTessellationDomainOriginKHR; typedef VkPhysicalDevicePointClippingProperties VkPhysicalDevicePointClippingPropertiesKHR; typedef VkRenderPassInputAttachmentAspectCreateInfo VkRenderPassInputAttachmentAspectCreateInfoKHR; typedef VkInputAttachmentAspectReference VkInputAttachmentAspectReferenceKHR; typedef VkImageViewUsageCreateInfo VkImageViewUsageCreateInfoKHR; typedef VkPipelineTessellationDomainOriginStateCreateInfo VkPipelineTessellationDomainOriginStateCreateInfoKHR; #define VK_KHR_get_surface_capabilities2 1 #define VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION 1 #define VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME "VK_KHR_get_surface_capabilities2" typedef struct VkPhysicalDeviceSurfaceInfo2KHR { VkStructureType sType; const void* pNext; VkSurfaceKHR surface; } VkPhysicalDeviceSurfaceInfo2KHR; typedef struct VkSurfaceCapabilities2KHR { VkStructureType sType; void* pNext; VkSurfaceCapabilitiesKHR surfaceCapabilities; } VkSurfaceCapabilities2KHR; typedef struct VkSurfaceFormat2KHR { VkStructureType sType; void* pNext; VkSurfaceFormatKHR surfaceFormat; } VkSurfaceFormat2KHR; typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities); typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities); VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats); #endif #define VK_KHR_variable_pointers 1 #define VK_KHR_VARIABLE_POINTERS_SPEC_VERSION 1 #define VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME "VK_KHR_variable_pointers" typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeaturesKHR; typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointersFeaturesKHR; #define VK_KHR_get_display_properties2 1 #define VK_KHR_GET_DISPLAY_PROPERTIES_2_SPEC_VERSION 1 #define VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_display_properties2" typedef struct VkDisplayProperties2KHR { VkStructureType sType; void* pNext; VkDisplayPropertiesKHR displayProperties; } VkDisplayProperties2KHR; typedef struct VkDisplayPlaneProperties2KHR { VkStructureType sType; void* pNext; VkDisplayPlanePropertiesKHR displayPlaneProperties; } VkDisplayPlaneProperties2KHR; typedef struct VkDisplayModeProperties2KHR { VkStructureType sType; void* pNext; VkDisplayModePropertiesKHR displayModeProperties; } VkDisplayModeProperties2KHR; typedef struct VkDisplayPlaneInfo2KHR { VkStructureType sType; const void* pNext; VkDisplayModeKHR mode; uint32_t planeIndex; } VkDisplayPlaneInfo2KHR; typedef struct VkDisplayPlaneCapabilities2KHR { VkStructureType sType; void* pNext; VkDisplayPlaneCapabilitiesKHR capabilities; } VkDisplayPlaneCapabilities2KHR; typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayProperties2KHR* pProperties); typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlaneProperties2KHR* pProperties); typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModeProperties2KHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties); typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayProperties2KHR( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayProperties2KHR* pProperties); VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlaneProperties2KHR( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlaneProperties2KHR* pProperties); VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModeProperties2KHR( VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties); VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilities2KHR( VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities); #endif #define VK_KHR_dedicated_allocation 1 #define VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION 3 #define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_KHR_dedicated_allocation" typedef VkMemoryDedicatedRequirements VkMemoryDedicatedRequirementsKHR; typedef VkMemoryDedicatedAllocateInfo VkMemoryDedicatedAllocateInfoKHR; #define VK_KHR_storage_buffer_storage_class 1 #define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION 1 #define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME "VK_KHR_storage_buffer_storage_class" #define VK_KHR_relaxed_block_layout 1 #define VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION 1 #define VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME "VK_KHR_relaxed_block_layout" #define VK_KHR_get_memory_requirements2 1 #define VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION 1 #define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME "VK_KHR_get_memory_requirements2" typedef VkBufferMemoryRequirementsInfo2 VkBufferMemoryRequirementsInfo2KHR; typedef VkImageMemoryRequirementsInfo2 VkImageMemoryRequirementsInfo2KHR; typedef VkImageSparseMemoryRequirementsInfo2 VkImageSparseMemoryRequirementsInfo2KHR; typedef VkMemoryRequirements2 VkMemoryRequirements2KHR; typedef VkSparseImageMemoryRequirements2 VkSparseImageMemoryRequirements2KHR; typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2KHR)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2KHR)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2KHR)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2KHR( VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2KHR( VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2KHR( VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); #endif #define VK_KHR_image_format_list 1 #define VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION 1 #define VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME "VK_KHR_image_format_list" typedef VkImageFormatListCreateInfo VkImageFormatListCreateInfoKHR; #define VK_KHR_sampler_ycbcr_conversion 1 typedef VkSamplerYcbcrConversion VkSamplerYcbcrConversionKHR; #define VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION 14 #define VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME "VK_KHR_sampler_ycbcr_conversion" typedef VkSamplerYcbcrModelConversion VkSamplerYcbcrModelConversionKHR; typedef VkSamplerYcbcrRange VkSamplerYcbcrRangeKHR; typedef VkChromaLocation VkChromaLocationKHR; typedef VkSamplerYcbcrConversionCreateInfo VkSamplerYcbcrConversionCreateInfoKHR; typedef VkSamplerYcbcrConversionInfo VkSamplerYcbcrConversionInfoKHR; typedef VkBindImagePlaneMemoryInfo VkBindImagePlaneMemoryInfoKHR; typedef VkImagePlaneMemoryRequirementsInfo VkImagePlaneMemoryRequirementsInfoKHR; typedef VkPhysicalDeviceSamplerYcbcrConversionFeatures VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR; typedef VkSamplerYcbcrConversionImageFormatProperties VkSamplerYcbcrConversionImageFormatPropertiesKHR; typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversionKHR)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversionKHR)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversionKHR( VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversionKHR( VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); #endif #define VK_KHR_bind_memory2 1 #define VK_KHR_BIND_MEMORY_2_SPEC_VERSION 1 #define VK_KHR_BIND_MEMORY_2_EXTENSION_NAME "VK_KHR_bind_memory2" typedef VkBindBufferMemoryInfo VkBindBufferMemoryInfoKHR; typedef VkBindImageMemoryInfo VkBindImageMemoryInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHR( VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR( VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); #endif #define VK_KHR_maintenance3 1 #define VK_KHR_MAINTENANCE_3_SPEC_VERSION 1 #define VK_KHR_MAINTENANCE_3_EXTENSION_NAME "VK_KHR_maintenance3" #define VK_KHR_MAINTENANCE3_SPEC_VERSION VK_KHR_MAINTENANCE_3_SPEC_VERSION #define VK_KHR_MAINTENANCE3_EXTENSION_NAME VK_KHR_MAINTENANCE_3_EXTENSION_NAME typedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR; typedef VkDescriptorSetLayoutSupport VkDescriptorSetLayoutSupportKHR; typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupportKHR)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupportKHR( VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); #endif #define VK_KHR_draw_indirect_count 1 #define VK_KHR_DRAW_INDIRECT_COUNT_SPEC_VERSION 1 #define VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_KHR_draw_indirect_count" typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountKHR( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountKHR( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); #endif #define VK_KHR_shader_subgroup_extended_types 1 #define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_SPEC_VERSION 1 #define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_EXTENSION_NAME "VK_KHR_shader_subgroup_extended_types" typedef VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR; #define VK_KHR_8bit_storage 1 #define VK_KHR_8BIT_STORAGE_SPEC_VERSION 1 #define VK_KHR_8BIT_STORAGE_EXTENSION_NAME "VK_KHR_8bit_storage" typedef VkPhysicalDevice8BitStorageFeatures VkPhysicalDevice8BitStorageFeaturesKHR; #define VK_KHR_shader_atomic_int64 1 #define VK_KHR_SHADER_ATOMIC_INT64_SPEC_VERSION 1 #define VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME "VK_KHR_shader_atomic_int64" typedef VkPhysicalDeviceShaderAtomicInt64Features VkPhysicalDeviceShaderAtomicInt64FeaturesKHR; #define VK_KHR_shader_clock 1 #define VK_KHR_SHADER_CLOCK_SPEC_VERSION 1 #define VK_KHR_SHADER_CLOCK_EXTENSION_NAME "VK_KHR_shader_clock" typedef struct VkPhysicalDeviceShaderClockFeaturesKHR { VkStructureType sType; void* pNext; VkBool32 shaderSubgroupClock; VkBool32 shaderDeviceClock; } VkPhysicalDeviceShaderClockFeaturesKHR; #define VK_KHR_global_priority 1 #define VK_MAX_GLOBAL_PRIORITY_SIZE_KHR 16U #define VK_KHR_GLOBAL_PRIORITY_SPEC_VERSION 1 #define VK_KHR_GLOBAL_PRIORITY_EXTENSION_NAME "VK_KHR_global_priority" typedef enum VkQueueGlobalPriorityKHR { VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR = 128, VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR = 256, VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR = 512, VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR = 1024, VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR, VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR, VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR, VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR, VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM_KHR = 0x7FFFFFFF } VkQueueGlobalPriorityKHR; typedef struct VkDeviceQueueGlobalPriorityCreateInfoKHR { VkStructureType sType; const void* pNext; VkQueueGlobalPriorityKHR globalPriority; } VkDeviceQueueGlobalPriorityCreateInfoKHR; typedef struct VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR { VkStructureType sType; void* pNext; VkBool32 globalPriorityQuery; } VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR; typedef struct VkQueueFamilyGlobalPriorityPropertiesKHR { VkStructureType sType; void* pNext; uint32_t priorityCount; VkQueueGlobalPriorityKHR priorities[VK_MAX_GLOBAL_PRIORITY_SIZE_KHR]; } VkQueueFamilyGlobalPriorityPropertiesKHR; #define VK_KHR_driver_properties 1 #define VK_KHR_DRIVER_PROPERTIES_SPEC_VERSION 1 #define VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME "VK_KHR_driver_properties" #define VK_MAX_DRIVER_NAME_SIZE_KHR VK_MAX_DRIVER_NAME_SIZE #define VK_MAX_DRIVER_INFO_SIZE_KHR VK_MAX_DRIVER_INFO_SIZE typedef VkDriverId VkDriverIdKHR; typedef VkConformanceVersion VkConformanceVersionKHR; typedef VkPhysicalDeviceDriverProperties VkPhysicalDeviceDriverPropertiesKHR; #define VK_KHR_shader_float_controls 1 #define VK_KHR_SHADER_FLOAT_CONTROLS_SPEC_VERSION 4 #define VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME "VK_KHR_shader_float_controls" typedef VkShaderFloatControlsIndependence VkShaderFloatControlsIndependenceKHR; typedef VkPhysicalDeviceFloatControlsProperties VkPhysicalDeviceFloatControlsPropertiesKHR; #define VK_KHR_depth_stencil_resolve 1 #define VK_KHR_DEPTH_STENCIL_RESOLVE_SPEC_VERSION 1 #define VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME "VK_KHR_depth_stencil_resolve" typedef VkResolveModeFlagBits VkResolveModeFlagBitsKHR; typedef VkResolveModeFlags VkResolveModeFlagsKHR; typedef VkSubpassDescriptionDepthStencilResolve VkSubpassDescriptionDepthStencilResolveKHR; typedef VkPhysicalDeviceDepthStencilResolveProperties VkPhysicalDeviceDepthStencilResolvePropertiesKHR; #define VK_KHR_swapchain_mutable_format 1 #define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_SPEC_VERSION 1 #define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME "VK_KHR_swapchain_mutable_format" #define VK_KHR_timeline_semaphore 1 #define VK_KHR_TIMELINE_SEMAPHORE_SPEC_VERSION 2 #define VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME "VK_KHR_timeline_semaphore" typedef VkSemaphoreType VkSemaphoreTypeKHR; typedef VkSemaphoreWaitFlagBits VkSemaphoreWaitFlagBitsKHR; typedef VkSemaphoreWaitFlags VkSemaphoreWaitFlagsKHR; typedef VkPhysicalDeviceTimelineSemaphoreFeatures VkPhysicalDeviceTimelineSemaphoreFeaturesKHR; typedef VkPhysicalDeviceTimelineSemaphoreProperties VkPhysicalDeviceTimelineSemaphorePropertiesKHR; typedef VkSemaphoreTypeCreateInfo VkSemaphoreTypeCreateInfoKHR; typedef VkTimelineSemaphoreSubmitInfo VkTimelineSemaphoreSubmitInfoKHR; typedef VkSemaphoreWaitInfo VkSemaphoreWaitInfoKHR; typedef VkSemaphoreSignalInfo VkSemaphoreSignalInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValueKHR)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue); typedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphoresKHR)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); typedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphoreKHR)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValueKHR( VkDevice device, VkSemaphore semaphore, uint64_t* pValue); VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphoresKHR( VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphoreKHR( VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); #endif #define VK_KHR_vulkan_memory_model 1 #define VK_KHR_VULKAN_MEMORY_MODEL_SPEC_VERSION 3 #define VK_KHR_VULKAN_MEMORY_MODEL_EXTENSION_NAME "VK_KHR_vulkan_memory_model" typedef VkPhysicalDeviceVulkanMemoryModelFeatures VkPhysicalDeviceVulkanMemoryModelFeaturesKHR; #define VK_KHR_shader_terminate_invocation 1 #define VK_KHR_SHADER_TERMINATE_INVOCATION_SPEC_VERSION 1 #define VK_KHR_SHADER_TERMINATE_INVOCATION_EXTENSION_NAME "VK_KHR_shader_terminate_invocation" typedef VkPhysicalDeviceShaderTerminateInvocationFeatures VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR; #define VK_KHR_fragment_shading_rate 1 #define VK_KHR_FRAGMENT_SHADING_RATE_SPEC_VERSION 2 #define VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME "VK_KHR_fragment_shading_rate" typedef enum VkFragmentShadingRateCombinerOpKHR { VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR = 0, VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR = 1, VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR = 2, VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR = 3, VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR = 4, VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_ENUM_KHR = 0x7FFFFFFF } VkFragmentShadingRateCombinerOpKHR; typedef struct VkFragmentShadingRateAttachmentInfoKHR { VkStructureType sType; const void* pNext; const VkAttachmentReference2* pFragmentShadingRateAttachment; VkExtent2D shadingRateAttachmentTexelSize; } VkFragmentShadingRateAttachmentInfoKHR; typedef struct VkPipelineFragmentShadingRateStateCreateInfoKHR { VkStructureType sType; const void* pNext; VkExtent2D fragmentSize; VkFragmentShadingRateCombinerOpKHR combinerOps[2]; } VkPipelineFragmentShadingRateStateCreateInfoKHR; typedef struct VkPhysicalDeviceFragmentShadingRateFeaturesKHR { VkStructureType sType; void* pNext; VkBool32 pipelineFragmentShadingRate; VkBool32 primitiveFragmentShadingRate; VkBool32 attachmentFragmentShadingRate; } VkPhysicalDeviceFragmentShadingRateFeaturesKHR; typedef struct VkPhysicalDeviceFragmentShadingRatePropertiesKHR { VkStructureType sType; void* pNext; VkExtent2D minFragmentShadingRateAttachmentTexelSize; VkExtent2D maxFragmentShadingRateAttachmentTexelSize; uint32_t maxFragmentShadingRateAttachmentTexelSizeAspectRatio; VkBool32 primitiveFragmentShadingRateWithMultipleViewports; VkBool32 layeredShadingRateAttachments; VkBool32 fragmentShadingRateNonTrivialCombinerOps; VkExtent2D maxFragmentSize; uint32_t maxFragmentSizeAspectRatio; uint32_t maxFragmentShadingRateCoverageSamples; VkSampleCountFlagBits maxFragmentShadingRateRasterizationSamples; VkBool32 fragmentShadingRateWithShaderDepthStencilWrites; VkBool32 fragmentShadingRateWithSampleMask; VkBool32 fragmentShadingRateWithShaderSampleMask; VkBool32 fragmentShadingRateWithConservativeRasterization; VkBool32 fragmentShadingRateWithFragmentShaderInterlock; VkBool32 fragmentShadingRateWithCustomSampleLocations; VkBool32 fragmentShadingRateStrictMultiplyCombiner; } VkPhysicalDeviceFragmentShadingRatePropertiesKHR; typedef struct VkPhysicalDeviceFragmentShadingRateKHR { VkStructureType sType; void* pNext; VkSampleCountFlags sampleCounts; VkExtent2D fragmentSize; } VkPhysicalDeviceFragmentShadingRateKHR; typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pFragmentShadingRateCount, VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates); typedef void (VKAPI_PTR *PFN_vkCmdSetFragmentShadingRateKHR)(VkCommandBuffer commandBuffer, const VkExtent2D* pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceFragmentShadingRatesKHR( VkPhysicalDevice physicalDevice, uint32_t* pFragmentShadingRateCount, VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates); VKAPI_ATTR void VKAPI_CALL vkCmdSetFragmentShadingRateKHR( VkCommandBuffer commandBuffer, const VkExtent2D* pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); #endif #define VK_KHR_spirv_1_4 1 #define VK_KHR_SPIRV_1_4_SPEC_VERSION 1 #define VK_KHR_SPIRV_1_4_EXTENSION_NAME "VK_KHR_spirv_1_4" #define VK_KHR_surface_protected_capabilities 1 #define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_SPEC_VERSION 1 #define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_EXTENSION_NAME "VK_KHR_surface_protected_capabilities" typedef struct VkSurfaceProtectedCapabilitiesKHR { VkStructureType sType; const void* pNext; VkBool32 supportsProtected; } VkSurfaceProtectedCapabilitiesKHR; #define VK_KHR_separate_depth_stencil_layouts 1 #define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_SPEC_VERSION 1 #define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_EXTENSION_NAME "VK_KHR_separate_depth_stencil_layouts" typedef VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR; typedef VkAttachmentReferenceStencilLayout VkAttachmentReferenceStencilLayoutKHR; typedef VkAttachmentDescriptionStencilLayout VkAttachmentDescriptionStencilLayoutKHR; #define VK_KHR_present_wait 1 #define VK_KHR_PRESENT_WAIT_SPEC_VERSION 1 #define VK_KHR_PRESENT_WAIT_EXTENSION_NAME "VK_KHR_present_wait" typedef struct VkPhysicalDevicePresentWaitFeaturesKHR { VkStructureType sType; void* pNext; VkBool32 presentWait; } VkPhysicalDevicePresentWaitFeaturesKHR; typedef VkResult (VKAPI_PTR *PFN_vkWaitForPresentKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t presentId, uint64_t timeout); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkWaitForPresentKHR( VkDevice device, VkSwapchainKHR swapchain, uint64_t presentId, uint64_t timeout); #endif #define VK_KHR_uniform_buffer_standard_layout 1 #define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_SPEC_VERSION 1 #define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME "VK_KHR_uniform_buffer_standard_layout" typedef VkPhysicalDeviceUniformBufferStandardLayoutFeatures VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR; #define VK_KHR_buffer_device_address 1 #define VK_KHR_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 1 #define VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME "VK_KHR_buffer_device_address" typedef VkPhysicalDeviceBufferDeviceAddressFeatures VkPhysicalDeviceBufferDeviceAddressFeaturesKHR; typedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoKHR; typedef VkBufferOpaqueCaptureAddressCreateInfo VkBufferOpaqueCaptureAddressCreateInfoKHR; typedef VkMemoryOpaqueCaptureAddressAllocateInfo VkMemoryOpaqueCaptureAddressAllocateInfoKHR; typedef VkDeviceMemoryOpaqueCaptureAddressInfo VkDeviceMemoryOpaqueCaptureAddressInfoKHR; typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); typedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); typedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressKHR( VkDevice device, const VkBufferDeviceAddressInfo* pInfo); VKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddressKHR( VkDevice device, const VkBufferDeviceAddressInfo* pInfo); VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddressKHR( VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); #endif #define VK_KHR_deferred_host_operations 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeferredOperationKHR) #define VK_KHR_DEFERRED_HOST_OPERATIONS_SPEC_VERSION 4 #define VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME "VK_KHR_deferred_host_operations" typedef VkResult (VKAPI_PTR *PFN_vkCreateDeferredOperationKHR)(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation); typedef void (VKAPI_PTR *PFN_vkDestroyDeferredOperationKHR)(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator); typedef uint32_t (VKAPI_PTR *PFN_vkGetDeferredOperationMaxConcurrencyKHR)(VkDevice device, VkDeferredOperationKHR operation); typedef VkResult (VKAPI_PTR *PFN_vkGetDeferredOperationResultKHR)(VkDevice device, VkDeferredOperationKHR operation); typedef VkResult (VKAPI_PTR *PFN_vkDeferredOperationJoinKHR)(VkDevice device, VkDeferredOperationKHR operation); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateDeferredOperationKHR( VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation); VKAPI_ATTR void VKAPI_CALL vkDestroyDeferredOperationKHR( VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR uint32_t VKAPI_CALL vkGetDeferredOperationMaxConcurrencyKHR( VkDevice device, VkDeferredOperationKHR operation); VKAPI_ATTR VkResult VKAPI_CALL vkGetDeferredOperationResultKHR( VkDevice device, VkDeferredOperationKHR operation); VKAPI_ATTR VkResult VKAPI_CALL vkDeferredOperationJoinKHR( VkDevice device, VkDeferredOperationKHR operation); #endif #define VK_KHR_pipeline_executable_properties 1 #define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_SPEC_VERSION 1 #define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME "VK_KHR_pipeline_executable_properties" typedef enum VkPipelineExecutableStatisticFormatKHR { VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR = 0, VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_INT64_KHR = 1, VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR = 2, VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR = 3, VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_MAX_ENUM_KHR = 0x7FFFFFFF } VkPipelineExecutableStatisticFormatKHR; typedef struct VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR { VkStructureType sType; void* pNext; VkBool32 pipelineExecutableInfo; } VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR; typedef struct VkPipelineInfoKHR { VkStructureType sType; const void* pNext; VkPipeline pipeline; } VkPipelineInfoKHR; typedef struct VkPipelineExecutablePropertiesKHR { VkStructureType sType; void* pNext; VkShaderStageFlags stages; char name[VK_MAX_DESCRIPTION_SIZE]; char description[VK_MAX_DESCRIPTION_SIZE]; uint32_t subgroupSize; } VkPipelineExecutablePropertiesKHR; typedef struct VkPipelineExecutableInfoKHR { VkStructureType sType; const void* pNext; VkPipeline pipeline; uint32_t executableIndex; } VkPipelineExecutableInfoKHR; typedef union VkPipelineExecutableStatisticValueKHR { VkBool32 b32; int64_t i64; uint64_t u64; double f64; } VkPipelineExecutableStatisticValueKHR; typedef struct VkPipelineExecutableStatisticKHR { VkStructureType sType; void* pNext; char name[VK_MAX_DESCRIPTION_SIZE]; char description[VK_MAX_DESCRIPTION_SIZE]; VkPipelineExecutableStatisticFormatKHR format; VkPipelineExecutableStatisticValueKHR value; } VkPipelineExecutableStatisticKHR; typedef struct VkPipelineExecutableInternalRepresentationKHR { VkStructureType sType; void* pNext; char name[VK_MAX_DESCRIPTION_SIZE]; char description[VK_MAX_DESCRIPTION_SIZE]; VkBool32 isText; size_t dataSize; void* pData; } VkPipelineExecutableInternalRepresentationKHR; typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutablePropertiesKHR)(VkDevice device, const VkPipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties); typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableStatisticsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics); typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableInternalRepresentationsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutablePropertiesKHR( VkDevice device, const VkPipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties); VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableStatisticsKHR( VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics); VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableInternalRepresentationsKHR( VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations); #endif #define VK_KHR_shader_integer_dot_product 1 #define VK_KHR_SHADER_INTEGER_DOT_PRODUCT_SPEC_VERSION 1 #define VK_KHR_SHADER_INTEGER_DOT_PRODUCT_EXTENSION_NAME "VK_KHR_shader_integer_dot_product" typedef VkPhysicalDeviceShaderIntegerDotProductFeatures VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR; typedef VkPhysicalDeviceShaderIntegerDotProductProperties VkPhysicalDeviceShaderIntegerDotProductPropertiesKHR; #define VK_KHR_pipeline_library 1 #define VK_KHR_PIPELINE_LIBRARY_SPEC_VERSION 1 #define VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME "VK_KHR_pipeline_library" typedef struct VkPipelineLibraryCreateInfoKHR { VkStructureType sType; const void* pNext; uint32_t libraryCount; const VkPipeline* pLibraries; } VkPipelineLibraryCreateInfoKHR; #define VK_KHR_shader_non_semantic_info 1 #define VK_KHR_SHADER_NON_SEMANTIC_INFO_SPEC_VERSION 1 #define VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME "VK_KHR_shader_non_semantic_info" #define VK_KHR_present_id 1 #define VK_KHR_PRESENT_ID_SPEC_VERSION 1 #define VK_KHR_PRESENT_ID_EXTENSION_NAME "VK_KHR_present_id" typedef struct VkPresentIdKHR { VkStructureType sType; const void* pNext; uint32_t swapchainCount; const uint64_t* pPresentIds; } VkPresentIdKHR; typedef struct VkPhysicalDevicePresentIdFeaturesKHR { VkStructureType sType; void* pNext; VkBool32 presentId; } VkPhysicalDevicePresentIdFeaturesKHR; #define VK_KHR_synchronization2 1 #define VK_KHR_SYNCHRONIZATION_2_SPEC_VERSION 1 #define VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME "VK_KHR_synchronization2" typedef VkPipelineStageFlags2 VkPipelineStageFlags2KHR; typedef VkPipelineStageFlagBits2 VkPipelineStageFlagBits2KHR; typedef VkAccessFlags2 VkAccessFlags2KHR; typedef VkAccessFlagBits2 VkAccessFlagBits2KHR; typedef VkSubmitFlagBits VkSubmitFlagBitsKHR; typedef VkSubmitFlags VkSubmitFlagsKHR; typedef VkMemoryBarrier2 VkMemoryBarrier2KHR; typedef VkBufferMemoryBarrier2 VkBufferMemoryBarrier2KHR; typedef VkImageMemoryBarrier2 VkImageMemoryBarrier2KHR; typedef VkDependencyInfo VkDependencyInfoKHR; typedef VkSubmitInfo2 VkSubmitInfo2KHR; typedef VkSemaphoreSubmitInfo VkSemaphoreSubmitInfoKHR; typedef VkCommandBufferSubmitInfo VkCommandBufferSubmitInfoKHR; typedef VkPhysicalDeviceSynchronization2Features VkPhysicalDeviceSynchronization2FeaturesKHR; typedef struct VkQueueFamilyCheckpointProperties2NV { VkStructureType sType; void* pNext; VkPipelineStageFlags2 checkpointExecutionStageMask; } VkQueueFamilyCheckpointProperties2NV; typedef struct VkCheckpointData2NV { VkStructureType sType; void* pNext; VkPipelineStageFlags2 stage; void* pCheckpointMarker; } VkCheckpointData2NV; typedef void (VKAPI_PTR *PFN_vkCmdSetEvent2KHR)(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfo* pDependencyInfo); typedef void (VKAPI_PTR *PFN_vkCmdResetEvent2KHR)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask); typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents2KHR)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfo* pDependencyInfos); typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier2KHR)(VkCommandBuffer commandBuffer, const VkDependencyInfo* pDependencyInfo); typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp2KHR)(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkQueryPool queryPool, uint32_t query); typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit2KHR)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence); typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarker2AMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker); typedef void (VKAPI_PTR *PFN_vkGetQueueCheckpointData2NV)(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointData2NV* pCheckpointData); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent2KHR( VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfo* pDependencyInfo); VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent2KHR( VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask); VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents2KHR( VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfo* pDependencyInfos); VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier2KHR( VkCommandBuffer commandBuffer, const VkDependencyInfo* pDependencyInfo); VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp2KHR( VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkQueryPool queryPool, uint32_t query); VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit2KHR( VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence); VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarker2AMD( VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker); VKAPI_ATTR void VKAPI_CALL vkGetQueueCheckpointData2NV( VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointData2NV* pCheckpointData); #endif #define VK_KHR_fragment_shader_barycentric 1 #define VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION 1 #define VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME "VK_KHR_fragment_shader_barycentric" typedef struct VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR { VkStructureType sType; void* pNext; VkBool32 fragmentShaderBarycentric; } VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR; typedef struct VkPhysicalDeviceFragmentShaderBarycentricPropertiesKHR { VkStructureType sType; void* pNext; VkBool32 triStripVertexOrderIndependentOfProvokingVertex; } VkPhysicalDeviceFragmentShaderBarycentricPropertiesKHR; #define VK_KHR_shader_subgroup_uniform_control_flow 1 #define VK_KHR_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_SPEC_VERSION 1 #define VK_KHR_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_EXTENSION_NAME "VK_KHR_shader_subgroup_uniform_control_flow" typedef struct VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR { VkStructureType sType; void* pNext; VkBool32 shaderSubgroupUniformControlFlow; } VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR; #define VK_KHR_zero_initialize_workgroup_memory 1 #define VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_SPEC_VERSION 1 #define VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_EXTENSION_NAME "VK_KHR_zero_initialize_workgroup_memory" typedef VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR; #define VK_KHR_workgroup_memory_explicit_layout 1 #define VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_SPEC_VERSION 1 #define VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_EXTENSION_NAME "VK_KHR_workgroup_memory_explicit_layout" typedef struct VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR { VkStructureType sType; void* pNext; VkBool32 workgroupMemoryExplicitLayout; VkBool32 workgroupMemoryExplicitLayoutScalarBlockLayout; VkBool32 workgroupMemoryExplicitLayout8BitAccess; VkBool32 workgroupMemoryExplicitLayout16BitAccess; } VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR; #define VK_KHR_copy_commands2 1 #define VK_KHR_COPY_COMMANDS_2_SPEC_VERSION 1 #define VK_KHR_COPY_COMMANDS_2_EXTENSION_NAME "VK_KHR_copy_commands2" typedef VkCopyBufferInfo2 VkCopyBufferInfo2KHR; typedef VkCopyImageInfo2 VkCopyImageInfo2KHR; typedef VkCopyBufferToImageInfo2 VkCopyBufferToImageInfo2KHR; typedef VkCopyImageToBufferInfo2 VkCopyImageToBufferInfo2KHR; typedef VkBlitImageInfo2 VkBlitImageInfo2KHR; typedef VkResolveImageInfo2 VkResolveImageInfo2KHR; typedef VkBufferCopy2 VkBufferCopy2KHR; typedef VkImageCopy2 VkImageCopy2KHR; typedef VkImageBlit2 VkImageBlit2KHR; typedef VkBufferImageCopy2 VkBufferImageCopy2KHR; typedef VkImageResolve2 VkImageResolve2KHR; typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2* pCopyBufferInfo); typedef void (VKAPI_PTR *PFN_vkCmdCopyImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageInfo2* pCopyImageInfo); typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo); typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo); typedef void (VKAPI_PTR *PFN_vkCmdBlitImage2KHR)(VkCommandBuffer commandBuffer, const VkBlitImageInfo2* pBlitImageInfo); typedef void (VKAPI_PTR *PFN_vkCmdResolveImage2KHR)(VkCommandBuffer commandBuffer, const VkResolveImageInfo2* pResolveImageInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer2KHR( VkCommandBuffer commandBuffer, const VkCopyBufferInfo2* pCopyBufferInfo); VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage2KHR( VkCommandBuffer commandBuffer, const VkCopyImageInfo2* pCopyImageInfo); VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage2KHR( VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo); VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer2KHR( VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo); VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage2KHR( VkCommandBuffer commandBuffer, const VkBlitImageInfo2* pBlitImageInfo); VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage2KHR( VkCommandBuffer commandBuffer, const VkResolveImageInfo2* pResolveImageInfo); #endif #define VK_KHR_format_feature_flags2 1 #define VK_KHR_FORMAT_FEATURE_FLAGS_2_SPEC_VERSION 1 #define VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME "VK_KHR_format_feature_flags2" typedef VkFormatFeatureFlags2 VkFormatFeatureFlags2KHR; typedef VkFormatFeatureFlagBits2 VkFormatFeatureFlagBits2KHR; typedef VkFormatProperties3 VkFormatProperties3KHR; #define VK_KHR_ray_tracing_maintenance1 1 #define VK_KHR_RAY_TRACING_MAINTENANCE_1_SPEC_VERSION 1 #define VK_KHR_RAY_TRACING_MAINTENANCE_1_EXTENSION_NAME "VK_KHR_ray_tracing_maintenance1" typedef struct VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR { VkStructureType sType; void* pNext; VkBool32 rayTracingMaintenance1; VkBool32 rayTracingPipelineTraceRaysIndirect2; } VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR; typedef struct VkTraceRaysIndirectCommand2KHR { VkDeviceAddress raygenShaderRecordAddress; VkDeviceSize raygenShaderRecordSize; VkDeviceAddress missShaderBindingTableAddress; VkDeviceSize missShaderBindingTableSize; VkDeviceSize missShaderBindingTableStride; VkDeviceAddress hitShaderBindingTableAddress; VkDeviceSize hitShaderBindingTableSize; VkDeviceSize hitShaderBindingTableStride; VkDeviceAddress callableShaderBindingTableAddress; VkDeviceSize callableShaderBindingTableSize; VkDeviceSize callableShaderBindingTableStride; uint32_t width; uint32_t height; uint32_t depth; } VkTraceRaysIndirectCommand2KHR; typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirect2KHR)(VkCommandBuffer commandBuffer, VkDeviceAddress indirectDeviceAddress); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirect2KHR( VkCommandBuffer commandBuffer, VkDeviceAddress indirectDeviceAddress); #endif #define VK_KHR_portability_enumeration 1 #define VK_KHR_PORTABILITY_ENUMERATION_SPEC_VERSION 1 #define VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME "VK_KHR_portability_enumeration" #define VK_KHR_maintenance4 1 #define VK_KHR_MAINTENANCE_4_SPEC_VERSION 2 #define VK_KHR_MAINTENANCE_4_EXTENSION_NAME "VK_KHR_maintenance4" typedef VkPhysicalDeviceMaintenance4Features VkPhysicalDeviceMaintenance4FeaturesKHR; typedef VkPhysicalDeviceMaintenance4Properties VkPhysicalDeviceMaintenance4PropertiesKHR; typedef VkDeviceBufferMemoryRequirements VkDeviceBufferMemoryRequirementsKHR; typedef VkDeviceImageMemoryRequirements VkDeviceImageMemoryRequirementsKHR; typedef void (VKAPI_PTR *PFN_vkGetDeviceBufferMemoryRequirementsKHR)(VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements); typedef void (VKAPI_PTR *PFN_vkGetDeviceImageMemoryRequirementsKHR)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements); typedef void (VKAPI_PTR *PFN_vkGetDeviceImageSparseMemoryRequirementsKHR)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkGetDeviceBufferMemoryRequirementsKHR( VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements); VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageMemoryRequirementsKHR( VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements); VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSparseMemoryRequirementsKHR( VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); #endif #define VK_EXT_debug_report 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT) #define VK_EXT_DEBUG_REPORT_SPEC_VERSION 10 #define VK_EXT_DEBUG_REPORT_EXTENSION_NAME "VK_EXT_debug_report" typedef enum VkDebugReportObjectTypeEXT { VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14, VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25, VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27, VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT = 28, VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29, VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000, VK_DEBUG_REPORT_OBJECT_TYPE_CU_MODULE_NVX_EXT = 1000029000, VK_DEBUG_REPORT_OBJECT_TYPE_CU_FUNCTION_NVX_EXT = 1000029001, VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT = 1000150000, VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA_EXT = 1000366000, VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF } VkDebugReportObjectTypeEXT; typedef enum VkDebugReportFlagBitsEXT { VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001, VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004, VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008, VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010, VK_DEBUG_REPORT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF } VkDebugReportFlagBitsEXT; typedef VkFlags VkDebugReportFlagsEXT; typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)( VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage, void* pUserData); typedef struct VkDebugReportCallbackCreateInfoEXT { VkStructureType sType; const void* pNext; VkDebugReportFlagsEXT flags; PFN_vkDebugReportCallbackEXT pfnCallback; void* pUserData; } VkDebugReportCallbackCreateInfoEXT; typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback); typedef void (VKAPI_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator); typedef void (VKAPI_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT( VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback); VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT( VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT( VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage); #endif #define VK_NV_glsl_shader 1 #define VK_NV_GLSL_SHADER_SPEC_VERSION 1 #define VK_NV_GLSL_SHADER_EXTENSION_NAME "VK_NV_glsl_shader" #define VK_EXT_depth_range_unrestricted 1 #define VK_EXT_DEPTH_RANGE_UNRESTRICTED_SPEC_VERSION 1 #define VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME "VK_EXT_depth_range_unrestricted" #define VK_IMG_filter_cubic 1 #define VK_IMG_FILTER_CUBIC_SPEC_VERSION 1 #define VK_IMG_FILTER_CUBIC_EXTENSION_NAME "VK_IMG_filter_cubic" #define VK_AMD_rasterization_order 1 #define VK_AMD_RASTERIZATION_ORDER_SPEC_VERSION 1 #define VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME "VK_AMD_rasterization_order" typedef enum VkRasterizationOrderAMD { VK_RASTERIZATION_ORDER_STRICT_AMD = 0, VK_RASTERIZATION_ORDER_RELAXED_AMD = 1, VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFFFFFF } VkRasterizationOrderAMD; typedef struct VkPipelineRasterizationStateRasterizationOrderAMD { VkStructureType sType; const void* pNext; VkRasterizationOrderAMD rasterizationOrder; } VkPipelineRasterizationStateRasterizationOrderAMD; #define VK_AMD_shader_trinary_minmax 1 #define VK_AMD_SHADER_TRINARY_MINMAX_SPEC_VERSION 1 #define VK_AMD_SHADER_TRINARY_MINMAX_EXTENSION_NAME "VK_AMD_shader_trinary_minmax" #define VK_AMD_shader_explicit_vertex_parameter 1 #define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_SPEC_VERSION 1 #define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_EXTENSION_NAME "VK_AMD_shader_explicit_vertex_parameter" #define VK_EXT_debug_marker 1 #define VK_EXT_DEBUG_MARKER_SPEC_VERSION 4 #define VK_EXT_DEBUG_MARKER_EXTENSION_NAME "VK_EXT_debug_marker" typedef struct VkDebugMarkerObjectNameInfoEXT { VkStructureType sType; const void* pNext; VkDebugReportObjectTypeEXT objectType; uint64_t object; const char* pObjectName; } VkDebugMarkerObjectNameInfoEXT; typedef struct VkDebugMarkerObjectTagInfoEXT { VkStructureType sType; const void* pNext; VkDebugReportObjectTypeEXT objectType; uint64_t object; uint64_t tagName; size_t tagSize; const void* pTag; } VkDebugMarkerObjectTagInfoEXT; typedef struct VkDebugMarkerMarkerInfoEXT { VkStructureType sType; const void* pNext; const char* pMarkerName; float color[4]; } VkDebugMarkerMarkerInfoEXT; typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectTagEXT)(VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo); typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectNameEXT)(VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo); typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerBeginEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerEndEXT)(VkCommandBuffer commandBuffer); typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerInsertEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectTagEXT( VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo); VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectNameEXT( VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo); VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerBeginEXT( VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerEndEXT( VkCommandBuffer commandBuffer); VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerInsertEXT( VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); #endif #define VK_AMD_gcn_shader 1 #define VK_AMD_GCN_SHADER_SPEC_VERSION 1 #define VK_AMD_GCN_SHADER_EXTENSION_NAME "VK_AMD_gcn_shader" #define VK_NV_dedicated_allocation 1 #define VK_NV_DEDICATED_ALLOCATION_SPEC_VERSION 1 #define VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_NV_dedicated_allocation" typedef struct VkDedicatedAllocationImageCreateInfoNV { VkStructureType sType; const void* pNext; VkBool32 dedicatedAllocation; } VkDedicatedAllocationImageCreateInfoNV; typedef struct VkDedicatedAllocationBufferCreateInfoNV { VkStructureType sType; const void* pNext; VkBool32 dedicatedAllocation; } VkDedicatedAllocationBufferCreateInfoNV; typedef struct VkDedicatedAllocationMemoryAllocateInfoNV { VkStructureType sType; const void* pNext; VkImage image; VkBuffer buffer; } VkDedicatedAllocationMemoryAllocateInfoNV; #define VK_EXT_transform_feedback 1 #define VK_EXT_TRANSFORM_FEEDBACK_SPEC_VERSION 1 #define VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME "VK_EXT_transform_feedback" typedef VkFlags VkPipelineRasterizationStateStreamCreateFlagsEXT; typedef struct VkPhysicalDeviceTransformFeedbackFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 transformFeedback; VkBool32 geometryStreams; } VkPhysicalDeviceTransformFeedbackFeaturesEXT; typedef struct VkPhysicalDeviceTransformFeedbackPropertiesEXT { VkStructureType sType; void* pNext; uint32_t maxTransformFeedbackStreams; uint32_t maxTransformFeedbackBuffers; VkDeviceSize maxTransformFeedbackBufferSize; uint32_t maxTransformFeedbackStreamDataSize; uint32_t maxTransformFeedbackBufferDataSize; uint32_t maxTransformFeedbackBufferDataStride; VkBool32 transformFeedbackQueries; VkBool32 transformFeedbackStreamsLinesTriangles; VkBool32 transformFeedbackRasterizationStreamSelect; VkBool32 transformFeedbackDraw; } VkPhysicalDeviceTransformFeedbackPropertiesEXT; typedef struct VkPipelineRasterizationStateStreamCreateInfoEXT { VkStructureType sType; const void* pNext; VkPipelineRasterizationStateStreamCreateFlagsEXT flags; uint32_t rasterizationStream; } VkPipelineRasterizationStateStreamCreateInfoEXT; typedef void (VKAPI_PTR *PFN_vkCmdBindTransformFeedbackBuffersEXT)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes); typedef void (VKAPI_PTR *PFN_vkCmdBeginTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); typedef void (VKAPI_PTR *PFN_vkCmdEndTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); typedef void (VKAPI_PTR *PFN_vkCmdBeginQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index); typedef void (VKAPI_PTR *PFN_vkCmdEndQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index); typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectByteCountEXT)(VkCommandBuffer commandBuffer, uint32_t instanceCount, uint32_t firstInstance, VkBuffer counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdBindTransformFeedbackBuffersEXT( VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes); VKAPI_ATTR void VKAPI_CALL vkCmdBeginTransformFeedbackEXT( VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); VKAPI_ATTR void VKAPI_CALL vkCmdEndTransformFeedbackEXT( VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); VKAPI_ATTR void VKAPI_CALL vkCmdBeginQueryIndexedEXT( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index); VKAPI_ATTR void VKAPI_CALL vkCmdEndQueryIndexedEXT( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index); VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectByteCountEXT( VkCommandBuffer commandBuffer, uint32_t instanceCount, uint32_t firstInstance, VkBuffer counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride); #endif #define VK_NVX_binary_import 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCuModuleNVX) VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCuFunctionNVX) #define VK_NVX_BINARY_IMPORT_SPEC_VERSION 1 #define VK_NVX_BINARY_IMPORT_EXTENSION_NAME "VK_NVX_binary_import" typedef struct VkCuModuleCreateInfoNVX { VkStructureType sType; const void* pNext; size_t dataSize; const void* pData; } VkCuModuleCreateInfoNVX; typedef struct VkCuFunctionCreateInfoNVX { VkStructureType sType; const void* pNext; VkCuModuleNVX module; const char* pName; } VkCuFunctionCreateInfoNVX; typedef struct VkCuLaunchInfoNVX { VkStructureType sType; const void* pNext; VkCuFunctionNVX function; uint32_t gridDimX; uint32_t gridDimY; uint32_t gridDimZ; uint32_t blockDimX; uint32_t blockDimY; uint32_t blockDimZ; uint32_t sharedMemBytes; size_t paramCount; const void* const * pParams; size_t extraCount; const void* const * pExtras; } VkCuLaunchInfoNVX; typedef VkResult (VKAPI_PTR *PFN_vkCreateCuModuleNVX)(VkDevice device, const VkCuModuleCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCuModuleNVX* pModule); typedef VkResult (VKAPI_PTR *PFN_vkCreateCuFunctionNVX)(VkDevice device, const VkCuFunctionCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCuFunctionNVX* pFunction); typedef void (VKAPI_PTR *PFN_vkDestroyCuModuleNVX)(VkDevice device, VkCuModuleNVX module, const VkAllocationCallbacks* pAllocator); typedef void (VKAPI_PTR *PFN_vkDestroyCuFunctionNVX)(VkDevice device, VkCuFunctionNVX function, const VkAllocationCallbacks* pAllocator); typedef void (VKAPI_PTR *PFN_vkCmdCuLaunchKernelNVX)(VkCommandBuffer commandBuffer, const VkCuLaunchInfoNVX* pLaunchInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateCuModuleNVX( VkDevice device, const VkCuModuleCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCuModuleNVX* pModule); VKAPI_ATTR VkResult VKAPI_CALL vkCreateCuFunctionNVX( VkDevice device, const VkCuFunctionCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCuFunctionNVX* pFunction); VKAPI_ATTR void VKAPI_CALL vkDestroyCuModuleNVX( VkDevice device, VkCuModuleNVX module, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR void VKAPI_CALL vkDestroyCuFunctionNVX( VkDevice device, VkCuFunctionNVX function, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR void VKAPI_CALL vkCmdCuLaunchKernelNVX( VkCommandBuffer commandBuffer, const VkCuLaunchInfoNVX* pLaunchInfo); #endif #define VK_NVX_image_view_handle 1 #define VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION 2 #define VK_NVX_IMAGE_VIEW_HANDLE_EXTENSION_NAME "VK_NVX_image_view_handle" typedef struct VkImageViewHandleInfoNVX { VkStructureType sType; const void* pNext; VkImageView imageView; VkDescriptorType descriptorType; VkSampler sampler; } VkImageViewHandleInfoNVX; typedef struct VkImageViewAddressPropertiesNVX { VkStructureType sType; void* pNext; VkDeviceAddress deviceAddress; VkDeviceSize size; } VkImageViewAddressPropertiesNVX; typedef uint32_t (VKAPI_PTR *PFN_vkGetImageViewHandleNVX)(VkDevice device, const VkImageViewHandleInfoNVX* pInfo); typedef VkResult (VKAPI_PTR *PFN_vkGetImageViewAddressNVX)(VkDevice device, VkImageView imageView, VkImageViewAddressPropertiesNVX* pProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR uint32_t VKAPI_CALL vkGetImageViewHandleNVX( VkDevice device, const VkImageViewHandleInfoNVX* pInfo); VKAPI_ATTR VkResult VKAPI_CALL vkGetImageViewAddressNVX( VkDevice device, VkImageView imageView, VkImageViewAddressPropertiesNVX* pProperties); #endif #define VK_AMD_draw_indirect_count 1 #define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 2 #define VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_AMD_draw_indirect_count" typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountAMD( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountAMD( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); #endif #define VK_AMD_negative_viewport_height 1 #define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_SPEC_VERSION 1 #define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME "VK_AMD_negative_viewport_height" #define VK_AMD_gpu_shader_half_float 1 #define VK_AMD_GPU_SHADER_HALF_FLOAT_SPEC_VERSION 2 #define VK_AMD_GPU_SHADER_HALF_FLOAT_EXTENSION_NAME "VK_AMD_gpu_shader_half_float" #define VK_AMD_shader_ballot 1 #define VK_AMD_SHADER_BALLOT_SPEC_VERSION 1 #define VK_AMD_SHADER_BALLOT_EXTENSION_NAME "VK_AMD_shader_ballot" #define VK_AMD_texture_gather_bias_lod 1 #define VK_AMD_TEXTURE_GATHER_BIAS_LOD_SPEC_VERSION 1 #define VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME "VK_AMD_texture_gather_bias_lod" typedef struct VkTextureLODGatherFormatPropertiesAMD { VkStructureType sType; void* pNext; VkBool32 supportsTextureGatherLODBiasAMD; } VkTextureLODGatherFormatPropertiesAMD; #define VK_AMD_shader_info 1 #define VK_AMD_SHADER_INFO_SPEC_VERSION 1 #define VK_AMD_SHADER_INFO_EXTENSION_NAME "VK_AMD_shader_info" typedef enum VkShaderInfoTypeAMD { VK_SHADER_INFO_TYPE_STATISTICS_AMD = 0, VK_SHADER_INFO_TYPE_BINARY_AMD = 1, VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD = 2, VK_SHADER_INFO_TYPE_MAX_ENUM_AMD = 0x7FFFFFFF } VkShaderInfoTypeAMD; typedef struct VkShaderResourceUsageAMD { uint32_t numUsedVgprs; uint32_t numUsedSgprs; uint32_t ldsSizePerLocalWorkGroup; size_t ldsUsageSizeInBytes; size_t scratchMemUsageInBytes; } VkShaderResourceUsageAMD; typedef struct VkShaderStatisticsInfoAMD { VkShaderStageFlags shaderStageMask; VkShaderResourceUsageAMD resourceUsage; uint32_t numPhysicalVgprs; uint32_t numPhysicalSgprs; uint32_t numAvailableVgprs; uint32_t numAvailableSgprs; uint32_t computeWorkGroupSize[3]; } VkShaderStatisticsInfoAMD; typedef VkResult (VKAPI_PTR *PFN_vkGetShaderInfoAMD)(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetShaderInfoAMD( VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo); #endif #define VK_AMD_shader_image_load_store_lod 1 #define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_SPEC_VERSION 1 #define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_EXTENSION_NAME "VK_AMD_shader_image_load_store_lod" #define VK_NV_corner_sampled_image 1 #define VK_NV_CORNER_SAMPLED_IMAGE_SPEC_VERSION 2 #define VK_NV_CORNER_SAMPLED_IMAGE_EXTENSION_NAME "VK_NV_corner_sampled_image" typedef struct VkPhysicalDeviceCornerSampledImageFeaturesNV { VkStructureType sType; void* pNext; VkBool32 cornerSampledImage; } VkPhysicalDeviceCornerSampledImageFeaturesNV; #define VK_IMG_format_pvrtc 1 #define VK_IMG_FORMAT_PVRTC_SPEC_VERSION 1 #define VK_IMG_FORMAT_PVRTC_EXTENSION_NAME "VK_IMG_format_pvrtc" #define VK_NV_external_memory_capabilities 1 #define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 #define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_NV_external_memory_capabilities" typedef enum VkExternalMemoryHandleTypeFlagBitsNV { VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV = 0x00000001, VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV = 0x00000002, VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV = 0x00000004, VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV = 0x00000008, VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF } VkExternalMemoryHandleTypeFlagBitsNV; typedef VkFlags VkExternalMemoryHandleTypeFlagsNV; typedef enum VkExternalMemoryFeatureFlagBitsNV { VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV = 0x00000001, VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV = 0x00000002, VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV = 0x00000004, VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF } VkExternalMemoryFeatureFlagBitsNV; typedef VkFlags VkExternalMemoryFeatureFlagsNV; typedef struct VkExternalImageFormatPropertiesNV { VkImageFormatProperties imageFormatProperties; VkExternalMemoryFeatureFlagsNV externalMemoryFeatures; VkExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes; VkExternalMemoryHandleTypeFlagsNV compatibleHandleTypes; } VkExternalImageFormatPropertiesNV; typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceExternalImageFormatPropertiesNV( VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); #endif #define VK_NV_external_memory 1 #define VK_NV_EXTERNAL_MEMORY_SPEC_VERSION 1 #define VK_NV_EXTERNAL_MEMORY_EXTENSION_NAME "VK_NV_external_memory" typedef struct VkExternalMemoryImageCreateInfoNV { VkStructureType sType; const void* pNext; VkExternalMemoryHandleTypeFlagsNV handleTypes; } VkExternalMemoryImageCreateInfoNV; typedef struct VkExportMemoryAllocateInfoNV { VkStructureType sType; const void* pNext; VkExternalMemoryHandleTypeFlagsNV handleTypes; } VkExportMemoryAllocateInfoNV; #define VK_EXT_validation_flags 1 #define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 2 #define VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME "VK_EXT_validation_flags" typedef enum VkValidationCheckEXT { VK_VALIDATION_CHECK_ALL_EXT = 0, VK_VALIDATION_CHECK_SHADERS_EXT = 1, VK_VALIDATION_CHECK_MAX_ENUM_EXT = 0x7FFFFFFF } VkValidationCheckEXT; typedef struct VkValidationFlagsEXT { VkStructureType sType; const void* pNext; uint32_t disabledValidationCheckCount; const VkValidationCheckEXT* pDisabledValidationChecks; } VkValidationFlagsEXT; #define VK_EXT_shader_subgroup_ballot 1 #define VK_EXT_SHADER_SUBGROUP_BALLOT_SPEC_VERSION 1 #define VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME "VK_EXT_shader_subgroup_ballot" #define VK_EXT_shader_subgroup_vote 1 #define VK_EXT_SHADER_SUBGROUP_VOTE_SPEC_VERSION 1 #define VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME "VK_EXT_shader_subgroup_vote" #define VK_EXT_texture_compression_astc_hdr 1 #define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_SPEC_VERSION 1 #define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_EXTENSION_NAME "VK_EXT_texture_compression_astc_hdr" typedef VkPhysicalDeviceTextureCompressionASTCHDRFeatures VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT; #define VK_EXT_astc_decode_mode 1 #define VK_EXT_ASTC_DECODE_MODE_SPEC_VERSION 1 #define VK_EXT_ASTC_DECODE_MODE_EXTENSION_NAME "VK_EXT_astc_decode_mode" typedef struct VkImageViewASTCDecodeModeEXT { VkStructureType sType; const void* pNext; VkFormat decodeMode; } VkImageViewASTCDecodeModeEXT; typedef struct VkPhysicalDeviceASTCDecodeFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 decodeModeSharedExponent; } VkPhysicalDeviceASTCDecodeFeaturesEXT; #define VK_EXT_pipeline_robustness 1 #define VK_EXT_PIPELINE_ROBUSTNESS_SPEC_VERSION 1 #define VK_EXT_PIPELINE_ROBUSTNESS_EXTENSION_NAME "VK_EXT_pipeline_robustness" typedef enum VkPipelineRobustnessBufferBehaviorEXT { VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT = 0, VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT = 1, VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT = 2, VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT = 3, VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_MAX_ENUM_EXT = 0x7FFFFFFF } VkPipelineRobustnessBufferBehaviorEXT; typedef enum VkPipelineRobustnessImageBehaviorEXT { VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT_EXT = 0, VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT = 1, VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT = 2, VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT = 3, VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_MAX_ENUM_EXT = 0x7FFFFFFF } VkPipelineRobustnessImageBehaviorEXT; typedef struct VkPhysicalDevicePipelineRobustnessFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 pipelineRobustness; } VkPhysicalDevicePipelineRobustnessFeaturesEXT; typedef struct VkPhysicalDevicePipelineRobustnessPropertiesEXT { VkStructureType sType; void* pNext; VkPipelineRobustnessBufferBehaviorEXT defaultRobustnessStorageBuffers; VkPipelineRobustnessBufferBehaviorEXT defaultRobustnessUniformBuffers; VkPipelineRobustnessBufferBehaviorEXT defaultRobustnessVertexInputs; VkPipelineRobustnessImageBehaviorEXT defaultRobustnessImages; } VkPhysicalDevicePipelineRobustnessPropertiesEXT; typedef struct VkPipelineRobustnessCreateInfoEXT { VkStructureType sType; const void* pNext; VkPipelineRobustnessBufferBehaviorEXT storageBuffers; VkPipelineRobustnessBufferBehaviorEXT uniformBuffers; VkPipelineRobustnessBufferBehaviorEXT vertexInputs; VkPipelineRobustnessImageBehaviorEXT images; } VkPipelineRobustnessCreateInfoEXT; #define VK_EXT_conditional_rendering 1 #define VK_EXT_CONDITIONAL_RENDERING_SPEC_VERSION 2 #define VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME "VK_EXT_conditional_rendering" typedef enum VkConditionalRenderingFlagBitsEXT { VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT = 0x00000001, VK_CONDITIONAL_RENDERING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF } VkConditionalRenderingFlagBitsEXT; typedef VkFlags VkConditionalRenderingFlagsEXT; typedef struct VkConditionalRenderingBeginInfoEXT { VkStructureType sType; const void* pNext; VkBuffer buffer; VkDeviceSize offset; VkConditionalRenderingFlagsEXT flags; } VkConditionalRenderingBeginInfoEXT; typedef struct VkPhysicalDeviceConditionalRenderingFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 conditionalRendering; VkBool32 inheritedConditionalRendering; } VkPhysicalDeviceConditionalRenderingFeaturesEXT; typedef struct VkCommandBufferInheritanceConditionalRenderingInfoEXT { VkStructureType sType; const void* pNext; VkBool32 conditionalRenderingEnable; } VkCommandBufferInheritanceConditionalRenderingInfoEXT; typedef void (VKAPI_PTR *PFN_vkCmdBeginConditionalRenderingEXT)(VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); typedef void (VKAPI_PTR *PFN_vkCmdEndConditionalRenderingEXT)(VkCommandBuffer commandBuffer); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdBeginConditionalRenderingEXT( VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); VKAPI_ATTR void VKAPI_CALL vkCmdEndConditionalRenderingEXT( VkCommandBuffer commandBuffer); #endif #define VK_NV_clip_space_w_scaling 1 #define VK_NV_CLIP_SPACE_W_SCALING_SPEC_VERSION 1 #define VK_NV_CLIP_SPACE_W_SCALING_EXTENSION_NAME "VK_NV_clip_space_w_scaling" typedef struct VkViewportWScalingNV { float xcoeff; float ycoeff; } VkViewportWScalingNV; typedef struct VkPipelineViewportWScalingStateCreateInfoNV { VkStructureType sType; const void* pNext; VkBool32 viewportWScalingEnable; uint32_t viewportCount; const VkViewportWScalingNV* pViewportWScalings; } VkPipelineViewportWScalingStateCreateInfoNV; typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWScalingNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWScalingNV( VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings); #endif #define VK_EXT_direct_mode_display 1 #define VK_EXT_DIRECT_MODE_DISPLAY_SPEC_VERSION 1 #define VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME "VK_EXT_direct_mode_display" typedef VkResult (VKAPI_PTR *PFN_vkReleaseDisplayEXT)(VkPhysicalDevice physicalDevice, VkDisplayKHR display); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT( VkPhysicalDevice physicalDevice, VkDisplayKHR display); #endif #define VK_EXT_display_surface_counter 1 #define VK_EXT_DISPLAY_SURFACE_COUNTER_SPEC_VERSION 1 #define VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME "VK_EXT_display_surface_counter" typedef enum VkSurfaceCounterFlagBitsEXT { VK_SURFACE_COUNTER_VBLANK_BIT_EXT = 0x00000001, VK_SURFACE_COUNTER_VBLANK_EXT = VK_SURFACE_COUNTER_VBLANK_BIT_EXT, VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF } VkSurfaceCounterFlagBitsEXT; typedef VkFlags VkSurfaceCounterFlagsEXT; typedef struct VkSurfaceCapabilities2EXT { VkStructureType sType; void* pNext; uint32_t minImageCount; uint32_t maxImageCount; VkExtent2D currentExtent; VkExtent2D minImageExtent; VkExtent2D maxImageExtent; uint32_t maxImageArrayLayers; VkSurfaceTransformFlagsKHR supportedTransforms; VkSurfaceTransformFlagBitsKHR currentTransform; VkCompositeAlphaFlagsKHR supportedCompositeAlpha; VkImageUsageFlags supportedUsageFlags; VkSurfaceCounterFlagsEXT supportedSurfaceCounters; } VkSurfaceCapabilities2EXT; typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities); #endif #define VK_EXT_display_control 1 #define VK_EXT_DISPLAY_CONTROL_SPEC_VERSION 1 #define VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME "VK_EXT_display_control" typedef enum VkDisplayPowerStateEXT { VK_DISPLAY_POWER_STATE_OFF_EXT = 0, VK_DISPLAY_POWER_STATE_SUSPEND_EXT = 1, VK_DISPLAY_POWER_STATE_ON_EXT = 2, VK_DISPLAY_POWER_STATE_MAX_ENUM_EXT = 0x7FFFFFFF } VkDisplayPowerStateEXT; typedef enum VkDeviceEventTypeEXT { VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT = 0, VK_DEVICE_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF } VkDeviceEventTypeEXT; typedef enum VkDisplayEventTypeEXT { VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT = 0, VK_DISPLAY_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF } VkDisplayEventTypeEXT; typedef struct VkDisplayPowerInfoEXT { VkStructureType sType; const void* pNext; VkDisplayPowerStateEXT powerState; } VkDisplayPowerInfoEXT; typedef struct VkDeviceEventInfoEXT { VkStructureType sType; const void* pNext; VkDeviceEventTypeEXT deviceEvent; } VkDeviceEventInfoEXT; typedef struct VkDisplayEventInfoEXT { VkStructureType sType; const void* pNext; VkDisplayEventTypeEXT displayEvent; } VkDisplayEventInfoEXT; typedef struct VkSwapchainCounterCreateInfoEXT { VkStructureType sType; const void* pNext; VkSurfaceCounterFlagsEXT surfaceCounters; } VkSwapchainCounterCreateInfoEXT; typedef VkResult (VKAPI_PTR *PFN_vkDisplayPowerControlEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo); typedef VkResult (VKAPI_PTR *PFN_vkRegisterDeviceEventEXT)(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); typedef VkResult (VKAPI_PTR *PFN_vkRegisterDisplayEventEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainCounterEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkDisplayPowerControlEXT( VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo); VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDeviceEventEXT( VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDisplayEventEXT( VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainCounterEXT( VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue); #endif #define VK_GOOGLE_display_timing 1 #define VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION 1 #define VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME "VK_GOOGLE_display_timing" typedef struct VkRefreshCycleDurationGOOGLE { uint64_t refreshDuration; } VkRefreshCycleDurationGOOGLE; typedef struct VkPastPresentationTimingGOOGLE { uint32_t presentID; uint64_t desiredPresentTime; uint64_t actualPresentTime; uint64_t earliestPresentTime; uint64_t presentMargin; } VkPastPresentationTimingGOOGLE; typedef struct VkPresentTimeGOOGLE { uint32_t presentID; uint64_t desiredPresentTime; } VkPresentTimeGOOGLE; typedef struct VkPresentTimesInfoGOOGLE { VkStructureType sType; const void* pNext; uint32_t swapchainCount; const VkPresentTimeGOOGLE* pTimes; } VkPresentTimesInfoGOOGLE; typedef VkResult (VKAPI_PTR *PFN_vkGetRefreshCycleDurationGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); typedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetRefreshCycleDurationGOOGLE( VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE( VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings); #endif #define VK_NV_sample_mask_override_coverage 1 #define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_SPEC_VERSION 1 #define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME "VK_NV_sample_mask_override_coverage" #define VK_NV_geometry_shader_passthrough 1 #define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_SPEC_VERSION 1 #define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME "VK_NV_geometry_shader_passthrough" #define VK_NV_viewport_array2 1 #define VK_NV_VIEWPORT_ARRAY_2_SPEC_VERSION 1 #define VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME "VK_NV_viewport_array2" #define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION VK_NV_VIEWPORT_ARRAY_2_SPEC_VERSION #define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME #define VK_NVX_multiview_per_view_attributes 1 #define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_SPEC_VERSION 1 #define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME "VK_NVX_multiview_per_view_attributes" typedef struct VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { VkStructureType sType; void* pNext; VkBool32 perViewPositionAllComponents; } VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; #define VK_NV_viewport_swizzle 1 #define VK_NV_VIEWPORT_SWIZZLE_SPEC_VERSION 1 #define VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME "VK_NV_viewport_swizzle" typedef enum VkViewportCoordinateSwizzleNV { VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV = 0, VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV = 1, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV = 2, VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV = 3, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV = 4, VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV = 5, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV = 6, VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV = 7, VK_VIEWPORT_COORDINATE_SWIZZLE_MAX_ENUM_NV = 0x7FFFFFFF } VkViewportCoordinateSwizzleNV; typedef VkFlags VkPipelineViewportSwizzleStateCreateFlagsNV; typedef struct VkViewportSwizzleNV { VkViewportCoordinateSwizzleNV x; VkViewportCoordinateSwizzleNV y; VkViewportCoordinateSwizzleNV z; VkViewportCoordinateSwizzleNV w; } VkViewportSwizzleNV; typedef struct VkPipelineViewportSwizzleStateCreateInfoNV { VkStructureType sType; const void* pNext; VkPipelineViewportSwizzleStateCreateFlagsNV flags; uint32_t viewportCount; const VkViewportSwizzleNV* pViewportSwizzles; } VkPipelineViewportSwizzleStateCreateInfoNV; #define VK_EXT_discard_rectangles 1 #define VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION 1 #define VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME "VK_EXT_discard_rectangles" typedef enum VkDiscardRectangleModeEXT { VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT = 0, VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT = 1, VK_DISCARD_RECTANGLE_MODE_MAX_ENUM_EXT = 0x7FFFFFFF } VkDiscardRectangleModeEXT; typedef VkFlags VkPipelineDiscardRectangleStateCreateFlagsEXT; typedef struct VkPhysicalDeviceDiscardRectanglePropertiesEXT { VkStructureType sType; void* pNext; uint32_t maxDiscardRectangles; } VkPhysicalDeviceDiscardRectanglePropertiesEXT; typedef struct VkPipelineDiscardRectangleStateCreateInfoEXT { VkStructureType sType; const void* pNext; VkPipelineDiscardRectangleStateCreateFlagsEXT flags; VkDiscardRectangleModeEXT discardRectangleMode; uint32_t discardRectangleCount; const VkRect2D* pDiscardRectangles; } VkPipelineDiscardRectangleStateCreateInfoEXT; typedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleEXT)(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleEXT( VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles); #endif #define VK_EXT_conservative_rasterization 1 #define VK_EXT_CONSERVATIVE_RASTERIZATION_SPEC_VERSION 1 #define VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME "VK_EXT_conservative_rasterization" typedef enum VkConservativeRasterizationModeEXT { VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT = 0, VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT = 1, VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT = 2, VK_CONSERVATIVE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF } VkConservativeRasterizationModeEXT; typedef VkFlags VkPipelineRasterizationConservativeStateCreateFlagsEXT; typedef struct VkPhysicalDeviceConservativeRasterizationPropertiesEXT { VkStructureType sType; void* pNext; float primitiveOverestimationSize; float maxExtraPrimitiveOverestimationSize; float extraPrimitiveOverestimationSizeGranularity; VkBool32 primitiveUnderestimation; VkBool32 conservativePointAndLineRasterization; VkBool32 degenerateTrianglesRasterized; VkBool32 degenerateLinesRasterized; VkBool32 fullyCoveredFragmentShaderInputVariable; VkBool32 conservativeRasterizationPostDepthCoverage; } VkPhysicalDeviceConservativeRasterizationPropertiesEXT; typedef struct VkPipelineRasterizationConservativeStateCreateInfoEXT { VkStructureType sType; const void* pNext; VkPipelineRasterizationConservativeStateCreateFlagsEXT flags; VkConservativeRasterizationModeEXT conservativeRasterizationMode; float extraPrimitiveOverestimationSize; } VkPipelineRasterizationConservativeStateCreateInfoEXT; #define VK_EXT_depth_clip_enable 1 #define VK_EXT_DEPTH_CLIP_ENABLE_SPEC_VERSION 1 #define VK_EXT_DEPTH_CLIP_ENABLE_EXTENSION_NAME "VK_EXT_depth_clip_enable" typedef VkFlags VkPipelineRasterizationDepthClipStateCreateFlagsEXT; typedef struct VkPhysicalDeviceDepthClipEnableFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 depthClipEnable; } VkPhysicalDeviceDepthClipEnableFeaturesEXT; typedef struct VkPipelineRasterizationDepthClipStateCreateInfoEXT { VkStructureType sType; const void* pNext; VkPipelineRasterizationDepthClipStateCreateFlagsEXT flags; VkBool32 depthClipEnable; } VkPipelineRasterizationDepthClipStateCreateInfoEXT; #define VK_EXT_swapchain_colorspace 1 #define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 4 #define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME "VK_EXT_swapchain_colorspace" #define VK_EXT_hdr_metadata 1 #define VK_EXT_HDR_METADATA_SPEC_VERSION 2 #define VK_EXT_HDR_METADATA_EXTENSION_NAME "VK_EXT_hdr_metadata" typedef struct VkXYColorEXT { float x; float y; } VkXYColorEXT; typedef struct VkHdrMetadataEXT { VkStructureType sType; const void* pNext; VkXYColorEXT displayPrimaryRed; VkXYColorEXT displayPrimaryGreen; VkXYColorEXT displayPrimaryBlue; VkXYColorEXT whitePoint; float maxLuminance; float minLuminance; float maxContentLightLevel; float maxFrameAverageLightLevel; } VkHdrMetadataEXT; typedef void (VKAPI_PTR *PFN_vkSetHdrMetadataEXT)(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkSetHdrMetadataEXT( VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata); #endif #define VK_EXT_external_memory_dma_buf 1 #define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_SPEC_VERSION 1 #define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME "VK_EXT_external_memory_dma_buf" #define VK_EXT_queue_family_foreign 1 #define VK_EXT_QUEUE_FAMILY_FOREIGN_SPEC_VERSION 1 #define VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME "VK_EXT_queue_family_foreign" #define VK_QUEUE_FAMILY_FOREIGN_EXT (~2U) #define VK_EXT_debug_utils 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugUtilsMessengerEXT) #define VK_EXT_DEBUG_UTILS_SPEC_VERSION 2 #define VK_EXT_DEBUG_UTILS_EXTENSION_NAME "VK_EXT_debug_utils" typedef VkFlags VkDebugUtilsMessengerCallbackDataFlagsEXT; typedef enum VkDebugUtilsMessageSeverityFlagBitsEXT { VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT = 0x00000001, VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT = 0x00000010, VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT = 0x00000100, VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT = 0x00001000, VK_DEBUG_UTILS_MESSAGE_SEVERITY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF } VkDebugUtilsMessageSeverityFlagBitsEXT; typedef enum VkDebugUtilsMessageTypeFlagBitsEXT { VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT = 0x00000001, VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT = 0x00000002, VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT = 0x00000004, VK_DEBUG_UTILS_MESSAGE_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF } VkDebugUtilsMessageTypeFlagBitsEXT; typedef VkFlags VkDebugUtilsMessageTypeFlagsEXT; typedef VkFlags VkDebugUtilsMessageSeverityFlagsEXT; typedef VkFlags VkDebugUtilsMessengerCreateFlagsEXT; typedef struct VkDebugUtilsLabelEXT { VkStructureType sType; const void* pNext; const char* pLabelName; float color[4]; } VkDebugUtilsLabelEXT; typedef struct VkDebugUtilsObjectNameInfoEXT { VkStructureType sType; const void* pNext; VkObjectType objectType; uint64_t objectHandle; const char* pObjectName; } VkDebugUtilsObjectNameInfoEXT; typedef struct VkDebugUtilsMessengerCallbackDataEXT { VkStructureType sType; const void* pNext; VkDebugUtilsMessengerCallbackDataFlagsEXT flags; const char* pMessageIdName; int32_t messageIdNumber; const char* pMessage; uint32_t queueLabelCount; const VkDebugUtilsLabelEXT* pQueueLabels; uint32_t cmdBufLabelCount; const VkDebugUtilsLabelEXT* pCmdBufLabels; uint32_t objectCount; const VkDebugUtilsObjectNameInfoEXT* pObjects; } VkDebugUtilsMessengerCallbackDataEXT; typedef VkBool32 (VKAPI_PTR *PFN_vkDebugUtilsMessengerCallbackEXT)( VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, void* pUserData); typedef struct VkDebugUtilsMessengerCreateInfoEXT { VkStructureType sType; const void* pNext; VkDebugUtilsMessengerCreateFlagsEXT flags; VkDebugUtilsMessageSeverityFlagsEXT messageSeverity; VkDebugUtilsMessageTypeFlagsEXT messageType; PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback; void* pUserData; } VkDebugUtilsMessengerCreateInfoEXT; typedef struct VkDebugUtilsObjectTagInfoEXT { VkStructureType sType; const void* pNext; VkObjectType objectType; uint64_t objectHandle; uint64_t tagName; size_t tagSize; const void* pTag; } VkDebugUtilsObjectTagInfoEXT; typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectNameEXT)(VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo); typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectTagEXT)(VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo); typedef void (VKAPI_PTR *PFN_vkQueueBeginDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); typedef void (VKAPI_PTR *PFN_vkQueueEndDebugUtilsLabelEXT)(VkQueue queue); typedef void (VKAPI_PTR *PFN_vkQueueInsertDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); typedef void (VKAPI_PTR *PFN_vkCmdBeginDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); typedef void (VKAPI_PTR *PFN_vkCmdEndDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer); typedef void (VKAPI_PTR *PFN_vkCmdInsertDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugUtilsMessengerEXT)(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger); typedef void (VKAPI_PTR *PFN_vkDestroyDebugUtilsMessengerEXT)(VkInstance instance, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator); typedef void (VKAPI_PTR *PFN_vkSubmitDebugUtilsMessageEXT)(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectNameEXT( VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo); VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectTagEXT( VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo); VKAPI_ATTR void VKAPI_CALL vkQueueBeginDebugUtilsLabelEXT( VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); VKAPI_ATTR void VKAPI_CALL vkQueueEndDebugUtilsLabelEXT( VkQueue queue); VKAPI_ATTR void VKAPI_CALL vkQueueInsertDebugUtilsLabelEXT( VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); VKAPI_ATTR void VKAPI_CALL vkCmdBeginDebugUtilsLabelEXT( VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); VKAPI_ATTR void VKAPI_CALL vkCmdEndDebugUtilsLabelEXT( VkCommandBuffer commandBuffer); VKAPI_ATTR void VKAPI_CALL vkCmdInsertDebugUtilsLabelEXT( VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugUtilsMessengerEXT( VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger); VKAPI_ATTR void VKAPI_CALL vkDestroyDebugUtilsMessengerEXT( VkInstance instance, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR void VKAPI_CALL vkSubmitDebugUtilsMessageEXT( VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); #endif #define VK_EXT_sampler_filter_minmax 1 #define VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION 2 #define VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME "VK_EXT_sampler_filter_minmax" typedef VkSamplerReductionMode VkSamplerReductionModeEXT; typedef VkSamplerReductionModeCreateInfo VkSamplerReductionModeCreateInfoEXT; typedef VkPhysicalDeviceSamplerFilterMinmaxProperties VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT; #define VK_AMD_gpu_shader_int16 1 #define VK_AMD_GPU_SHADER_INT16_SPEC_VERSION 2 #define VK_AMD_GPU_SHADER_INT16_EXTENSION_NAME "VK_AMD_gpu_shader_int16" #define VK_AMD_mixed_attachment_samples 1 #define VK_AMD_MIXED_ATTACHMENT_SAMPLES_SPEC_VERSION 1 #define VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME "VK_AMD_mixed_attachment_samples" #define VK_AMD_shader_fragment_mask 1 #define VK_AMD_SHADER_FRAGMENT_MASK_SPEC_VERSION 1 #define VK_AMD_SHADER_FRAGMENT_MASK_EXTENSION_NAME "VK_AMD_shader_fragment_mask" #define VK_EXT_inline_uniform_block 1 #define VK_EXT_INLINE_UNIFORM_BLOCK_SPEC_VERSION 1 #define VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME "VK_EXT_inline_uniform_block" typedef VkPhysicalDeviceInlineUniformBlockFeatures VkPhysicalDeviceInlineUniformBlockFeaturesEXT; typedef VkPhysicalDeviceInlineUniformBlockProperties VkPhysicalDeviceInlineUniformBlockPropertiesEXT; typedef VkWriteDescriptorSetInlineUniformBlock VkWriteDescriptorSetInlineUniformBlockEXT; typedef VkDescriptorPoolInlineUniformBlockCreateInfo VkDescriptorPoolInlineUniformBlockCreateInfoEXT; #define VK_EXT_shader_stencil_export 1 #define VK_EXT_SHADER_STENCIL_EXPORT_SPEC_VERSION 1 #define VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME "VK_EXT_shader_stencil_export" #define VK_EXT_sample_locations 1 #define VK_EXT_SAMPLE_LOCATIONS_SPEC_VERSION 1 #define VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME "VK_EXT_sample_locations" typedef struct VkSampleLocationEXT { float x; float y; } VkSampleLocationEXT; typedef struct VkSampleLocationsInfoEXT { VkStructureType sType; const void* pNext; VkSampleCountFlagBits sampleLocationsPerPixel; VkExtent2D sampleLocationGridSize; uint32_t sampleLocationsCount; const VkSampleLocationEXT* pSampleLocations; } VkSampleLocationsInfoEXT; typedef struct VkAttachmentSampleLocationsEXT { uint32_t attachmentIndex; VkSampleLocationsInfoEXT sampleLocationsInfo; } VkAttachmentSampleLocationsEXT; typedef struct VkSubpassSampleLocationsEXT { uint32_t subpassIndex; VkSampleLocationsInfoEXT sampleLocationsInfo; } VkSubpassSampleLocationsEXT; typedef struct VkRenderPassSampleLocationsBeginInfoEXT { VkStructureType sType; const void* pNext; uint32_t attachmentInitialSampleLocationsCount; const VkAttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations; uint32_t postSubpassSampleLocationsCount; const VkSubpassSampleLocationsEXT* pPostSubpassSampleLocations; } VkRenderPassSampleLocationsBeginInfoEXT; typedef struct VkPipelineSampleLocationsStateCreateInfoEXT { VkStructureType sType; const void* pNext; VkBool32 sampleLocationsEnable; VkSampleLocationsInfoEXT sampleLocationsInfo; } VkPipelineSampleLocationsStateCreateInfoEXT; typedef struct VkPhysicalDeviceSampleLocationsPropertiesEXT { VkStructureType sType; void* pNext; VkSampleCountFlags sampleLocationSampleCounts; VkExtent2D maxSampleLocationGridSize; float sampleLocationCoordinateRange[2]; uint32_t sampleLocationSubPixelBits; VkBool32 variableSampleLocations; } VkPhysicalDeviceSampleLocationsPropertiesEXT; typedef struct VkMultisamplePropertiesEXT { VkStructureType sType; void* pNext; VkExtent2D maxSampleLocationGridSize; } VkMultisamplePropertiesEXT; typedef void (VKAPI_PTR *PFN_vkCmdSetSampleLocationsEXT)(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo); typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)(VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetSampleLocationsEXT( VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo); VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMultisamplePropertiesEXT( VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties); #endif #define VK_EXT_blend_operation_advanced 1 #define VK_EXT_BLEND_OPERATION_ADVANCED_SPEC_VERSION 2 #define VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME "VK_EXT_blend_operation_advanced" typedef enum VkBlendOverlapEXT { VK_BLEND_OVERLAP_UNCORRELATED_EXT = 0, VK_BLEND_OVERLAP_DISJOINT_EXT = 1, VK_BLEND_OVERLAP_CONJOINT_EXT = 2, VK_BLEND_OVERLAP_MAX_ENUM_EXT = 0x7FFFFFFF } VkBlendOverlapEXT; typedef struct VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 advancedBlendCoherentOperations; } VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT; typedef struct VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT { VkStructureType sType; void* pNext; uint32_t advancedBlendMaxColorAttachments; VkBool32 advancedBlendIndependentBlend; VkBool32 advancedBlendNonPremultipliedSrcColor; VkBool32 advancedBlendNonPremultipliedDstColor; VkBool32 advancedBlendCorrelatedOverlap; VkBool32 advancedBlendAllOperations; } VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT; typedef struct VkPipelineColorBlendAdvancedStateCreateInfoEXT { VkStructureType sType; const void* pNext; VkBool32 srcPremultiplied; VkBool32 dstPremultiplied; VkBlendOverlapEXT blendOverlap; } VkPipelineColorBlendAdvancedStateCreateInfoEXT; #define VK_NV_fragment_coverage_to_color 1 #define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_SPEC_VERSION 1 #define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME "VK_NV_fragment_coverage_to_color" typedef VkFlags VkPipelineCoverageToColorStateCreateFlagsNV; typedef struct VkPipelineCoverageToColorStateCreateInfoNV { VkStructureType sType; const void* pNext; VkPipelineCoverageToColorStateCreateFlagsNV flags; VkBool32 coverageToColorEnable; uint32_t coverageToColorLocation; } VkPipelineCoverageToColorStateCreateInfoNV; #define VK_NV_framebuffer_mixed_samples 1 #define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_SPEC_VERSION 1 #define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME "VK_NV_framebuffer_mixed_samples" typedef enum VkCoverageModulationModeNV { VK_COVERAGE_MODULATION_MODE_NONE_NV = 0, VK_COVERAGE_MODULATION_MODE_RGB_NV = 1, VK_COVERAGE_MODULATION_MODE_ALPHA_NV = 2, VK_COVERAGE_MODULATION_MODE_RGBA_NV = 3, VK_COVERAGE_MODULATION_MODE_MAX_ENUM_NV = 0x7FFFFFFF } VkCoverageModulationModeNV; typedef VkFlags VkPipelineCoverageModulationStateCreateFlagsNV; typedef struct VkPipelineCoverageModulationStateCreateInfoNV { VkStructureType sType; const void* pNext; VkPipelineCoverageModulationStateCreateFlagsNV flags; VkCoverageModulationModeNV coverageModulationMode; VkBool32 coverageModulationTableEnable; uint32_t coverageModulationTableCount; const float* pCoverageModulationTable; } VkPipelineCoverageModulationStateCreateInfoNV; #define VK_NV_fill_rectangle 1 #define VK_NV_FILL_RECTANGLE_SPEC_VERSION 1 #define VK_NV_FILL_RECTANGLE_EXTENSION_NAME "VK_NV_fill_rectangle" #define VK_NV_shader_sm_builtins 1 #define VK_NV_SHADER_SM_BUILTINS_SPEC_VERSION 1 #define VK_NV_SHADER_SM_BUILTINS_EXTENSION_NAME "VK_NV_shader_sm_builtins" typedef struct VkPhysicalDeviceShaderSMBuiltinsPropertiesNV { VkStructureType sType; void* pNext; uint32_t shaderSMCount; uint32_t shaderWarpsPerSM; } VkPhysicalDeviceShaderSMBuiltinsPropertiesNV; typedef struct VkPhysicalDeviceShaderSMBuiltinsFeaturesNV { VkStructureType sType; void* pNext; VkBool32 shaderSMBuiltins; } VkPhysicalDeviceShaderSMBuiltinsFeaturesNV; #define VK_EXT_post_depth_coverage 1 #define VK_EXT_POST_DEPTH_COVERAGE_SPEC_VERSION 1 #define VK_EXT_POST_DEPTH_COVERAGE_EXTENSION_NAME "VK_EXT_post_depth_coverage" #define VK_EXT_image_drm_format_modifier 1 #define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_SPEC_VERSION 2 #define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME "VK_EXT_image_drm_format_modifier" typedef struct VkDrmFormatModifierPropertiesEXT { uint64_t drmFormatModifier; uint32_t drmFormatModifierPlaneCount; VkFormatFeatureFlags drmFormatModifierTilingFeatures; } VkDrmFormatModifierPropertiesEXT; typedef struct VkDrmFormatModifierPropertiesListEXT { VkStructureType sType; void* pNext; uint32_t drmFormatModifierCount; VkDrmFormatModifierPropertiesEXT* pDrmFormatModifierProperties; } VkDrmFormatModifierPropertiesListEXT; typedef struct VkPhysicalDeviceImageDrmFormatModifierInfoEXT { VkStructureType sType; const void* pNext; uint64_t drmFormatModifier; VkSharingMode sharingMode; uint32_t queueFamilyIndexCount; const uint32_t* pQueueFamilyIndices; } VkPhysicalDeviceImageDrmFormatModifierInfoEXT; typedef struct VkImageDrmFormatModifierListCreateInfoEXT { VkStructureType sType; const void* pNext; uint32_t drmFormatModifierCount; const uint64_t* pDrmFormatModifiers; } VkImageDrmFormatModifierListCreateInfoEXT; typedef struct VkImageDrmFormatModifierExplicitCreateInfoEXT { VkStructureType sType; const void* pNext; uint64_t drmFormatModifier; uint32_t drmFormatModifierPlaneCount; const VkSubresourceLayout* pPlaneLayouts; } VkImageDrmFormatModifierExplicitCreateInfoEXT; typedef struct VkImageDrmFormatModifierPropertiesEXT { VkStructureType sType; void* pNext; uint64_t drmFormatModifier; } VkImageDrmFormatModifierPropertiesEXT; typedef struct VkDrmFormatModifierProperties2EXT { uint64_t drmFormatModifier; uint32_t drmFormatModifierPlaneCount; VkFormatFeatureFlags2 drmFormatModifierTilingFeatures; } VkDrmFormatModifierProperties2EXT; typedef struct VkDrmFormatModifierPropertiesList2EXT { VkStructureType sType; void* pNext; uint32_t drmFormatModifierCount; VkDrmFormatModifierProperties2EXT* pDrmFormatModifierProperties; } VkDrmFormatModifierPropertiesList2EXT; typedef VkResult (VKAPI_PTR *PFN_vkGetImageDrmFormatModifierPropertiesEXT)(VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetImageDrmFormatModifierPropertiesEXT( VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties); #endif #define VK_EXT_validation_cache 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkValidationCacheEXT) #define VK_EXT_VALIDATION_CACHE_SPEC_VERSION 1 #define VK_EXT_VALIDATION_CACHE_EXTENSION_NAME "VK_EXT_validation_cache" typedef enum VkValidationCacheHeaderVersionEXT { VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT = 1, VK_VALIDATION_CACHE_HEADER_VERSION_MAX_ENUM_EXT = 0x7FFFFFFF } VkValidationCacheHeaderVersionEXT; typedef VkFlags VkValidationCacheCreateFlagsEXT; typedef struct VkValidationCacheCreateInfoEXT { VkStructureType sType; const void* pNext; VkValidationCacheCreateFlagsEXT flags; size_t initialDataSize; const void* pInitialData; } VkValidationCacheCreateInfoEXT; typedef struct VkShaderModuleValidationCacheCreateInfoEXT { VkStructureType sType; const void* pNext; VkValidationCacheEXT validationCache; } VkShaderModuleValidationCacheCreateInfoEXT; typedef VkResult (VKAPI_PTR *PFN_vkCreateValidationCacheEXT)(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache); typedef void (VKAPI_PTR *PFN_vkDestroyValidationCacheEXT)(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkMergeValidationCachesEXT)(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches); typedef VkResult (VKAPI_PTR *PFN_vkGetValidationCacheDataEXT)(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateValidationCacheEXT( VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache); VKAPI_ATTR void VKAPI_CALL vkDestroyValidationCacheEXT( VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkMergeValidationCachesEXT( VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches); VKAPI_ATTR VkResult VKAPI_CALL vkGetValidationCacheDataEXT( VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData); #endif #define VK_EXT_descriptor_indexing 1 #define VK_EXT_DESCRIPTOR_INDEXING_SPEC_VERSION 2 #define VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME "VK_EXT_descriptor_indexing" typedef VkDescriptorBindingFlagBits VkDescriptorBindingFlagBitsEXT; typedef VkDescriptorBindingFlags VkDescriptorBindingFlagsEXT; typedef VkDescriptorSetLayoutBindingFlagsCreateInfo VkDescriptorSetLayoutBindingFlagsCreateInfoEXT; typedef VkPhysicalDeviceDescriptorIndexingFeatures VkPhysicalDeviceDescriptorIndexingFeaturesEXT; typedef VkPhysicalDeviceDescriptorIndexingProperties VkPhysicalDeviceDescriptorIndexingPropertiesEXT; typedef VkDescriptorSetVariableDescriptorCountAllocateInfo VkDescriptorSetVariableDescriptorCountAllocateInfoEXT; typedef VkDescriptorSetVariableDescriptorCountLayoutSupport VkDescriptorSetVariableDescriptorCountLayoutSupportEXT; #define VK_EXT_shader_viewport_index_layer 1 #define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION 1 #define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME "VK_EXT_shader_viewport_index_layer" #define VK_NV_shading_rate_image 1 #define VK_NV_SHADING_RATE_IMAGE_SPEC_VERSION 3 #define VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME "VK_NV_shading_rate_image" typedef enum VkShadingRatePaletteEntryNV { VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV = 0, VK_SHADING_RATE_PALETTE_ENTRY_16_INVOCATIONS_PER_PIXEL_NV = 1, VK_SHADING_RATE_PALETTE_ENTRY_8_INVOCATIONS_PER_PIXEL_NV = 2, VK_SHADING_RATE_PALETTE_ENTRY_4_INVOCATIONS_PER_PIXEL_NV = 3, VK_SHADING_RATE_PALETTE_ENTRY_2_INVOCATIONS_PER_PIXEL_NV = 4, VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV = 5, VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X1_PIXELS_NV = 6, VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV = 7, VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X2_PIXELS_NV = 8, VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X2_PIXELS_NV = 9, VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X4_PIXELS_NV = 10, VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV = 11, VK_SHADING_RATE_PALETTE_ENTRY_MAX_ENUM_NV = 0x7FFFFFFF } VkShadingRatePaletteEntryNV; typedef enum VkCoarseSampleOrderTypeNV { VK_COARSE_SAMPLE_ORDER_TYPE_DEFAULT_NV = 0, VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV = 1, VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV = 2, VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV = 3, VK_COARSE_SAMPLE_ORDER_TYPE_MAX_ENUM_NV = 0x7FFFFFFF } VkCoarseSampleOrderTypeNV; typedef struct VkShadingRatePaletteNV { uint32_t shadingRatePaletteEntryCount; const VkShadingRatePaletteEntryNV* pShadingRatePaletteEntries; } VkShadingRatePaletteNV; typedef struct VkPipelineViewportShadingRateImageStateCreateInfoNV { VkStructureType sType; const void* pNext; VkBool32 shadingRateImageEnable; uint32_t viewportCount; const VkShadingRatePaletteNV* pShadingRatePalettes; } VkPipelineViewportShadingRateImageStateCreateInfoNV; typedef struct VkPhysicalDeviceShadingRateImageFeaturesNV { VkStructureType sType; void* pNext; VkBool32 shadingRateImage; VkBool32 shadingRateCoarseSampleOrder; } VkPhysicalDeviceShadingRateImageFeaturesNV; typedef struct VkPhysicalDeviceShadingRateImagePropertiesNV { VkStructureType sType; void* pNext; VkExtent2D shadingRateTexelSize; uint32_t shadingRatePaletteSize; uint32_t shadingRateMaxCoarseSamples; } VkPhysicalDeviceShadingRateImagePropertiesNV; typedef struct VkCoarseSampleLocationNV { uint32_t pixelX; uint32_t pixelY; uint32_t sample; } VkCoarseSampleLocationNV; typedef struct VkCoarseSampleOrderCustomNV { VkShadingRatePaletteEntryNV shadingRate; uint32_t sampleCount; uint32_t sampleLocationCount; const VkCoarseSampleLocationNV* pSampleLocations; } VkCoarseSampleOrderCustomNV; typedef struct VkPipelineViewportCoarseSampleOrderStateCreateInfoNV { VkStructureType sType; const void* pNext; VkCoarseSampleOrderTypeNV sampleOrderType; uint32_t customSampleOrderCount; const VkCoarseSampleOrderCustomNV* pCustomSampleOrders; } VkPipelineViewportCoarseSampleOrderStateCreateInfoNV; typedef void (VKAPI_PTR *PFN_vkCmdBindShadingRateImageNV)(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout); typedef void (VKAPI_PTR *PFN_vkCmdSetViewportShadingRatePaletteNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes); typedef void (VKAPI_PTR *PFN_vkCmdSetCoarseSampleOrderNV)(VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdBindShadingRateImageNV( VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout); VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportShadingRatePaletteNV( VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes); VKAPI_ATTR void VKAPI_CALL vkCmdSetCoarseSampleOrderNV( VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders); #endif #define VK_NV_ray_tracing 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNV) #define VK_NV_RAY_TRACING_SPEC_VERSION 3 #define VK_NV_RAY_TRACING_EXTENSION_NAME "VK_NV_ray_tracing" #define VK_SHADER_UNUSED_KHR (~0U) #define VK_SHADER_UNUSED_NV VK_SHADER_UNUSED_KHR typedef enum VkRayTracingShaderGroupTypeKHR { VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR = 0, VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR = 1, VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR = 2, VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR, VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR, VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR, VK_RAY_TRACING_SHADER_GROUP_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF } VkRayTracingShaderGroupTypeKHR; typedef VkRayTracingShaderGroupTypeKHR VkRayTracingShaderGroupTypeNV; typedef enum VkGeometryTypeKHR { VK_GEOMETRY_TYPE_TRIANGLES_KHR = 0, VK_GEOMETRY_TYPE_AABBS_KHR = 1, VK_GEOMETRY_TYPE_INSTANCES_KHR = 2, VK_GEOMETRY_TYPE_TRIANGLES_NV = VK_GEOMETRY_TYPE_TRIANGLES_KHR, VK_GEOMETRY_TYPE_AABBS_NV = VK_GEOMETRY_TYPE_AABBS_KHR, VK_GEOMETRY_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF } VkGeometryTypeKHR; typedef VkGeometryTypeKHR VkGeometryTypeNV; typedef enum VkAccelerationStructureTypeKHR { VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR = 0, VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR = 1, VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR = 2, VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF } VkAccelerationStructureTypeKHR; typedef VkAccelerationStructureTypeKHR VkAccelerationStructureTypeNV; typedef enum VkCopyAccelerationStructureModeKHR { VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR = 0, VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR = 1, VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR = 2, VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR = 3, VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR, VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR, VK_COPY_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_KHR = 0x7FFFFFFF } VkCopyAccelerationStructureModeKHR; typedef VkCopyAccelerationStructureModeKHR VkCopyAccelerationStructureModeNV; typedef enum VkAccelerationStructureMemoryRequirementsTypeNV { VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = 0, VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = 1, VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = 2, VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_MAX_ENUM_NV = 0x7FFFFFFF } VkAccelerationStructureMemoryRequirementsTypeNV; typedef enum VkGeometryFlagBitsKHR { VK_GEOMETRY_OPAQUE_BIT_KHR = 0x00000001, VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR = 0x00000002, VK_GEOMETRY_OPAQUE_BIT_NV = VK_GEOMETRY_OPAQUE_BIT_KHR, VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV = VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR, VK_GEOMETRY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF } VkGeometryFlagBitsKHR; typedef VkFlags VkGeometryFlagsKHR; typedef VkGeometryFlagsKHR VkGeometryFlagsNV; typedef VkGeometryFlagBitsKHR VkGeometryFlagBitsNV; typedef enum VkGeometryInstanceFlagBitsKHR { VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR = 0x00000001, VK_GEOMETRY_INSTANCE_TRIANGLE_FLIP_FACING_BIT_KHR = 0x00000002, VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR = 0x00000004, VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR = 0x00000008, VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR = VK_GEOMETRY_INSTANCE_TRIANGLE_FLIP_FACING_BIT_KHR, VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR, VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR, VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR, VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV = VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR, VK_GEOMETRY_INSTANCE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF } VkGeometryInstanceFlagBitsKHR; typedef VkFlags VkGeometryInstanceFlagsKHR; typedef VkGeometryInstanceFlagsKHR VkGeometryInstanceFlagsNV; typedef VkGeometryInstanceFlagBitsKHR VkGeometryInstanceFlagBitsNV; typedef enum VkBuildAccelerationStructureFlagBitsKHR { VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR = 0x00000001, VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR = 0x00000002, VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR = 0x00000004, VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR = 0x00000008, VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR = 0x00000010, VK_BUILD_ACCELERATION_STRUCTURE_MOTION_BIT_NV = 0x00000020, VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR, VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR, VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR, VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR, VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR, VK_BUILD_ACCELERATION_STRUCTURE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF } VkBuildAccelerationStructureFlagBitsKHR; typedef VkFlags VkBuildAccelerationStructureFlagsKHR; typedef VkBuildAccelerationStructureFlagsKHR VkBuildAccelerationStructureFlagsNV; typedef VkBuildAccelerationStructureFlagBitsKHR VkBuildAccelerationStructureFlagBitsNV; typedef struct VkRayTracingShaderGroupCreateInfoNV { VkStructureType sType; const void* pNext; VkRayTracingShaderGroupTypeKHR type; uint32_t generalShader; uint32_t closestHitShader; uint32_t anyHitShader; uint32_t intersectionShader; } VkRayTracingShaderGroupCreateInfoNV; typedef struct VkRayTracingPipelineCreateInfoNV { VkStructureType sType; const void* pNext; VkPipelineCreateFlags flags; uint32_t stageCount; const VkPipelineShaderStageCreateInfo* pStages; uint32_t groupCount; const VkRayTracingShaderGroupCreateInfoNV* pGroups; uint32_t maxRecursionDepth; VkPipelineLayout layout; VkPipeline basePipelineHandle; int32_t basePipelineIndex; } VkRayTracingPipelineCreateInfoNV; typedef struct VkGeometryTrianglesNV { VkStructureType sType; const void* pNext; VkBuffer vertexData; VkDeviceSize vertexOffset; uint32_t vertexCount; VkDeviceSize vertexStride; VkFormat vertexFormat; VkBuffer indexData; VkDeviceSize indexOffset; uint32_t indexCount; VkIndexType indexType; VkBuffer transformData; VkDeviceSize transformOffset; } VkGeometryTrianglesNV; typedef struct VkGeometryAABBNV { VkStructureType sType; const void* pNext; VkBuffer aabbData; uint32_t numAABBs; uint32_t stride; VkDeviceSize offset; } VkGeometryAABBNV; typedef struct VkGeometryDataNV { VkGeometryTrianglesNV triangles; VkGeometryAABBNV aabbs; } VkGeometryDataNV; typedef struct VkGeometryNV { VkStructureType sType; const void* pNext; VkGeometryTypeKHR geometryType; VkGeometryDataNV geometry; VkGeometryFlagsKHR flags; } VkGeometryNV; typedef struct VkAccelerationStructureInfoNV { VkStructureType sType; const void* pNext; VkAccelerationStructureTypeNV type; VkBuildAccelerationStructureFlagsNV flags; uint32_t instanceCount; uint32_t geometryCount; const VkGeometryNV* pGeometries; } VkAccelerationStructureInfoNV; typedef struct VkAccelerationStructureCreateInfoNV { VkStructureType sType; const void* pNext; VkDeviceSize compactedSize; VkAccelerationStructureInfoNV info; } VkAccelerationStructureCreateInfoNV; typedef struct VkBindAccelerationStructureMemoryInfoNV { VkStructureType sType; const void* pNext; VkAccelerationStructureNV accelerationStructure; VkDeviceMemory memory; VkDeviceSize memoryOffset; uint32_t deviceIndexCount; const uint32_t* pDeviceIndices; } VkBindAccelerationStructureMemoryInfoNV; typedef struct VkWriteDescriptorSetAccelerationStructureNV { VkStructureType sType; const void* pNext; uint32_t accelerationStructureCount; const VkAccelerationStructureNV* pAccelerationStructures; } VkWriteDescriptorSetAccelerationStructureNV; typedef struct VkAccelerationStructureMemoryRequirementsInfoNV { VkStructureType sType; const void* pNext; VkAccelerationStructureMemoryRequirementsTypeNV type; VkAccelerationStructureNV accelerationStructure; } VkAccelerationStructureMemoryRequirementsInfoNV; typedef struct VkPhysicalDeviceRayTracingPropertiesNV { VkStructureType sType; void* pNext; uint32_t shaderGroupHandleSize; uint32_t maxRecursionDepth; uint32_t maxShaderGroupStride; uint32_t shaderGroupBaseAlignment; uint64_t maxGeometryCount; uint64_t maxInstanceCount; uint64_t maxTriangleCount; uint32_t maxDescriptorSetAccelerationStructures; } VkPhysicalDeviceRayTracingPropertiesNV; typedef struct VkTransformMatrixKHR { float matrix[3][4]; } VkTransformMatrixKHR; typedef VkTransformMatrixKHR VkTransformMatrixNV; typedef struct VkAabbPositionsKHR { float minX; float minY; float minZ; float maxX; float maxY; float maxZ; } VkAabbPositionsKHR; typedef VkAabbPositionsKHR VkAabbPositionsNV; typedef struct VkAccelerationStructureInstanceKHR { VkTransformMatrixKHR transform; uint32_t instanceCustomIndex:24; uint32_t mask:8; uint32_t instanceShaderBindingTableRecordOffset:24; VkGeometryInstanceFlagsKHR flags:8; uint64_t accelerationStructureReference; } VkAccelerationStructureInstanceKHR; typedef VkAccelerationStructureInstanceKHR VkAccelerationStructureInstanceNV; typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureNV)(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure); typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator); typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsNV)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNV)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNV)(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset); typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNV)(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode); typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysNV)(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth); typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesNV)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesNV)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData); typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesNV)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); typedef VkResult (VKAPI_PTR *PFN_vkCompileDeferredNV)(VkDevice device, VkPipeline pipeline, uint32_t shader); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureNV( VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure); VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureNV( VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNV( VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryNV( VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNV( VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset); VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureNV( VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode); VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNV( VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth); VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesNV( VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesKHR( VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesNV( VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); VKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureHandleNV( VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData); VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesNV( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); VKAPI_ATTR VkResult VKAPI_CALL vkCompileDeferredNV( VkDevice device, VkPipeline pipeline, uint32_t shader); #endif #define VK_NV_representative_fragment_test 1 #define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_SPEC_VERSION 2 #define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_EXTENSION_NAME "VK_NV_representative_fragment_test" typedef struct VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV { VkStructureType sType; void* pNext; VkBool32 representativeFragmentTest; } VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV; typedef struct VkPipelineRepresentativeFragmentTestStateCreateInfoNV { VkStructureType sType; const void* pNext; VkBool32 representativeFragmentTestEnable; } VkPipelineRepresentativeFragmentTestStateCreateInfoNV; #define VK_EXT_filter_cubic 1 #define VK_EXT_FILTER_CUBIC_SPEC_VERSION 3 #define VK_EXT_FILTER_CUBIC_EXTENSION_NAME "VK_EXT_filter_cubic" typedef struct VkPhysicalDeviceImageViewImageFormatInfoEXT { VkStructureType sType; void* pNext; VkImageViewType imageViewType; } VkPhysicalDeviceImageViewImageFormatInfoEXT; typedef struct VkFilterCubicImageViewImageFormatPropertiesEXT { VkStructureType sType; void* pNext; VkBool32 filterCubic; VkBool32 filterCubicMinmax; } VkFilterCubicImageViewImageFormatPropertiesEXT; #define VK_QCOM_render_pass_shader_resolve 1 #define VK_QCOM_RENDER_PASS_SHADER_RESOLVE_SPEC_VERSION 4 #define VK_QCOM_RENDER_PASS_SHADER_RESOLVE_EXTENSION_NAME "VK_QCOM_render_pass_shader_resolve" #define VK_EXT_global_priority 1 #define VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION 2 #define VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME "VK_EXT_global_priority" typedef VkQueueGlobalPriorityKHR VkQueueGlobalPriorityEXT; typedef VkDeviceQueueGlobalPriorityCreateInfoKHR VkDeviceQueueGlobalPriorityCreateInfoEXT; #define VK_EXT_external_memory_host 1 #define VK_EXT_EXTERNAL_MEMORY_HOST_SPEC_VERSION 1 #define VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME "VK_EXT_external_memory_host" typedef struct VkImportMemoryHostPointerInfoEXT { VkStructureType sType; const void* pNext; VkExternalMemoryHandleTypeFlagBits handleType; void* pHostPointer; } VkImportMemoryHostPointerInfoEXT; typedef struct VkMemoryHostPointerPropertiesEXT { VkStructureType sType; void* pNext; uint32_t memoryTypeBits; } VkMemoryHostPointerPropertiesEXT; typedef struct VkPhysicalDeviceExternalMemoryHostPropertiesEXT { VkStructureType sType; void* pNext; VkDeviceSize minImportedHostPointerAlignment; } VkPhysicalDeviceExternalMemoryHostPropertiesEXT; typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryHostPointerPropertiesEXT)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryHostPointerPropertiesEXT( VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); #endif #define VK_AMD_buffer_marker 1 #define VK_AMD_BUFFER_MARKER_SPEC_VERSION 1 #define VK_AMD_BUFFER_MARKER_EXTENSION_NAME "VK_AMD_buffer_marker" typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarkerAMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarkerAMD( VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker); #endif #define VK_AMD_pipeline_compiler_control 1 #define VK_AMD_PIPELINE_COMPILER_CONTROL_SPEC_VERSION 1 #define VK_AMD_PIPELINE_COMPILER_CONTROL_EXTENSION_NAME "VK_AMD_pipeline_compiler_control" typedef enum VkPipelineCompilerControlFlagBitsAMD { VK_PIPELINE_COMPILER_CONTROL_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF } VkPipelineCompilerControlFlagBitsAMD; typedef VkFlags VkPipelineCompilerControlFlagsAMD; typedef struct VkPipelineCompilerControlCreateInfoAMD { VkStructureType sType; const void* pNext; VkPipelineCompilerControlFlagsAMD compilerControlFlags; } VkPipelineCompilerControlCreateInfoAMD; #define VK_EXT_calibrated_timestamps 1 #define VK_EXT_CALIBRATED_TIMESTAMPS_SPEC_VERSION 2 #define VK_EXT_CALIBRATED_TIMESTAMPS_EXTENSION_NAME "VK_EXT_calibrated_timestamps" typedef enum VkTimeDomainEXT { VK_TIME_DOMAIN_DEVICE_EXT = 0, VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT = 1, VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT = 2, VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT = 3, VK_TIME_DOMAIN_MAX_ENUM_EXT = 0x7FFFFFFF } VkTimeDomainEXT; typedef struct VkCalibratedTimestampInfoEXT { VkStructureType sType; const void* pNext; VkTimeDomainEXT timeDomain; } VkCalibratedTimestampInfoEXT; typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT)(VkPhysicalDevice physicalDevice, uint32_t* pTimeDomainCount, VkTimeDomainEXT* pTimeDomains); typedef VkResult (VKAPI_PTR *PFN_vkGetCalibratedTimestampsEXT)(VkDevice device, uint32_t timestampCount, const VkCalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( VkPhysicalDevice physicalDevice, uint32_t* pTimeDomainCount, VkTimeDomainEXT* pTimeDomains); VKAPI_ATTR VkResult VKAPI_CALL vkGetCalibratedTimestampsEXT( VkDevice device, uint32_t timestampCount, const VkCalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation); #endif #define VK_AMD_shader_core_properties 1 #define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 2 #define VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME "VK_AMD_shader_core_properties" typedef struct VkPhysicalDeviceShaderCorePropertiesAMD { VkStructureType sType; void* pNext; uint32_t shaderEngineCount; uint32_t shaderArraysPerEngineCount; uint32_t computeUnitsPerShaderArray; uint32_t simdPerComputeUnit; uint32_t wavefrontsPerSimd; uint32_t wavefrontSize; uint32_t sgprsPerSimd; uint32_t minSgprAllocation; uint32_t maxSgprAllocation; uint32_t sgprAllocationGranularity; uint32_t vgprsPerSimd; uint32_t minVgprAllocation; uint32_t maxVgprAllocation; uint32_t vgprAllocationGranularity; } VkPhysicalDeviceShaderCorePropertiesAMD; #define VK_AMD_memory_overallocation_behavior 1 #define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_SPEC_VERSION 1 #define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_EXTENSION_NAME "VK_AMD_memory_overallocation_behavior" typedef enum VkMemoryOverallocationBehaviorAMD { VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD = 0, VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD = 1, VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD = 2, VK_MEMORY_OVERALLOCATION_BEHAVIOR_MAX_ENUM_AMD = 0x7FFFFFFF } VkMemoryOverallocationBehaviorAMD; typedef struct VkDeviceMemoryOverallocationCreateInfoAMD { VkStructureType sType; const void* pNext; VkMemoryOverallocationBehaviorAMD overallocationBehavior; } VkDeviceMemoryOverallocationCreateInfoAMD; #define VK_EXT_vertex_attribute_divisor 1 #define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 3 #define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_EXT_vertex_attribute_divisor" typedef struct VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT { VkStructureType sType; void* pNext; uint32_t maxVertexAttribDivisor; } VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT; typedef struct VkVertexInputBindingDivisorDescriptionEXT { uint32_t binding; uint32_t divisor; } VkVertexInputBindingDivisorDescriptionEXT; typedef struct VkPipelineVertexInputDivisorStateCreateInfoEXT { VkStructureType sType; const void* pNext; uint32_t vertexBindingDivisorCount; const VkVertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors; } VkPipelineVertexInputDivisorStateCreateInfoEXT; typedef struct VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 vertexAttributeInstanceRateDivisor; VkBool32 vertexAttributeInstanceRateZeroDivisor; } VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT; #define VK_EXT_pipeline_creation_feedback 1 #define VK_EXT_PIPELINE_CREATION_FEEDBACK_SPEC_VERSION 1 #define VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME "VK_EXT_pipeline_creation_feedback" typedef VkPipelineCreationFeedbackFlagBits VkPipelineCreationFeedbackFlagBitsEXT; typedef VkPipelineCreationFeedbackFlags VkPipelineCreationFeedbackFlagsEXT; typedef VkPipelineCreationFeedbackCreateInfo VkPipelineCreationFeedbackCreateInfoEXT; typedef VkPipelineCreationFeedback VkPipelineCreationFeedbackEXT; #define VK_NV_shader_subgroup_partitioned 1 #define VK_NV_SHADER_SUBGROUP_PARTITIONED_SPEC_VERSION 1 #define VK_NV_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME "VK_NV_shader_subgroup_partitioned" #define VK_NV_compute_shader_derivatives 1 #define VK_NV_COMPUTE_SHADER_DERIVATIVES_SPEC_VERSION 1 #define VK_NV_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME "VK_NV_compute_shader_derivatives" typedef struct VkPhysicalDeviceComputeShaderDerivativesFeaturesNV { VkStructureType sType; void* pNext; VkBool32 computeDerivativeGroupQuads; VkBool32 computeDerivativeGroupLinear; } VkPhysicalDeviceComputeShaderDerivativesFeaturesNV; #define VK_NV_mesh_shader 1 #define VK_NV_MESH_SHADER_SPEC_VERSION 1 #define VK_NV_MESH_SHADER_EXTENSION_NAME "VK_NV_mesh_shader" typedef struct VkPhysicalDeviceMeshShaderFeaturesNV { VkStructureType sType; void* pNext; VkBool32 taskShader; VkBool32 meshShader; } VkPhysicalDeviceMeshShaderFeaturesNV; typedef struct VkPhysicalDeviceMeshShaderPropertiesNV { VkStructureType sType; void* pNext; uint32_t maxDrawMeshTasksCount; uint32_t maxTaskWorkGroupInvocations; uint32_t maxTaskWorkGroupSize[3]; uint32_t maxTaskTotalMemorySize; uint32_t maxTaskOutputCount; uint32_t maxMeshWorkGroupInvocations; uint32_t maxMeshWorkGroupSize[3]; uint32_t maxMeshTotalMemorySize; uint32_t maxMeshOutputVertices; uint32_t maxMeshOutputPrimitives; uint32_t maxMeshMultiviewViewCount; uint32_t meshOutputPerVertexGranularity; uint32_t meshOutputPerPrimitiveGranularity; } VkPhysicalDeviceMeshShaderPropertiesNV; typedef struct VkDrawMeshTasksIndirectCommandNV { uint32_t taskCount; uint32_t firstTask; } VkDrawMeshTasksIndirectCommandNV; typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksNV)(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask); typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectCountNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksNV( VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask); VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectNV( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectCountNV( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); #endif #define VK_NV_fragment_shader_barycentric 1 #define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION 1 #define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME "VK_NV_fragment_shader_barycentric" typedef VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV; #define VK_NV_shader_image_footprint 1 #define VK_NV_SHADER_IMAGE_FOOTPRINT_SPEC_VERSION 2 #define VK_NV_SHADER_IMAGE_FOOTPRINT_EXTENSION_NAME "VK_NV_shader_image_footprint" typedef struct VkPhysicalDeviceShaderImageFootprintFeaturesNV { VkStructureType sType; void* pNext; VkBool32 imageFootprint; } VkPhysicalDeviceShaderImageFootprintFeaturesNV; #define VK_NV_scissor_exclusive 1 #define VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION 1 #define VK_NV_SCISSOR_EXCLUSIVE_EXTENSION_NAME "VK_NV_scissor_exclusive" typedef struct VkPipelineViewportExclusiveScissorStateCreateInfoNV { VkStructureType sType; const void* pNext; uint32_t exclusiveScissorCount; const VkRect2D* pExclusiveScissors; } VkPipelineViewportExclusiveScissorStateCreateInfoNV; typedef struct VkPhysicalDeviceExclusiveScissorFeaturesNV { VkStructureType sType; void* pNext; VkBool32 exclusiveScissor; } VkPhysicalDeviceExclusiveScissorFeaturesNV; typedef void (VKAPI_PTR *PFN_vkCmdSetExclusiveScissorNV)(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetExclusiveScissorNV( VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors); #endif #define VK_NV_device_diagnostic_checkpoints 1 #define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_SPEC_VERSION 2 #define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME "VK_NV_device_diagnostic_checkpoints" typedef struct VkQueueFamilyCheckpointPropertiesNV { VkStructureType sType; void* pNext; VkPipelineStageFlags checkpointExecutionStageMask; } VkQueueFamilyCheckpointPropertiesNV; typedef struct VkCheckpointDataNV { VkStructureType sType; void* pNext; VkPipelineStageFlagBits stage; void* pCheckpointMarker; } VkCheckpointDataNV; typedef void (VKAPI_PTR *PFN_vkCmdSetCheckpointNV)(VkCommandBuffer commandBuffer, const void* pCheckpointMarker); typedef void (VKAPI_PTR *PFN_vkGetQueueCheckpointDataNV)(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointDataNV* pCheckpointData); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetCheckpointNV( VkCommandBuffer commandBuffer, const void* pCheckpointMarker); VKAPI_ATTR void VKAPI_CALL vkGetQueueCheckpointDataNV( VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointDataNV* pCheckpointData); #endif #define VK_INTEL_shader_integer_functions2 1 #define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_SPEC_VERSION 1 #define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_EXTENSION_NAME "VK_INTEL_shader_integer_functions2" typedef struct VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL { VkStructureType sType; void* pNext; VkBool32 shaderIntegerFunctions2; } VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL; #define VK_INTEL_performance_query 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPerformanceConfigurationINTEL) #define VK_INTEL_PERFORMANCE_QUERY_SPEC_VERSION 2 #define VK_INTEL_PERFORMANCE_QUERY_EXTENSION_NAME "VK_INTEL_performance_query" typedef enum VkPerformanceConfigurationTypeINTEL { VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL = 0, VK_PERFORMANCE_CONFIGURATION_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF } VkPerformanceConfigurationTypeINTEL; typedef enum VkQueryPoolSamplingModeINTEL { VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL = 0, VK_QUERY_POOL_SAMPLING_MODE_MAX_ENUM_INTEL = 0x7FFFFFFF } VkQueryPoolSamplingModeINTEL; typedef enum VkPerformanceOverrideTypeINTEL { VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL = 0, VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL = 1, VK_PERFORMANCE_OVERRIDE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF } VkPerformanceOverrideTypeINTEL; typedef enum VkPerformanceParameterTypeINTEL { VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL = 0, VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL = 1, VK_PERFORMANCE_PARAMETER_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF } VkPerformanceParameterTypeINTEL; typedef enum VkPerformanceValueTypeINTEL { VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL = 0, VK_PERFORMANCE_VALUE_TYPE_UINT64_INTEL = 1, VK_PERFORMANCE_VALUE_TYPE_FLOAT_INTEL = 2, VK_PERFORMANCE_VALUE_TYPE_BOOL_INTEL = 3, VK_PERFORMANCE_VALUE_TYPE_STRING_INTEL = 4, VK_PERFORMANCE_VALUE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF } VkPerformanceValueTypeINTEL; typedef union VkPerformanceValueDataINTEL { uint32_t value32; uint64_t value64; float valueFloat; VkBool32 valueBool; const char* valueString; } VkPerformanceValueDataINTEL; typedef struct VkPerformanceValueINTEL { VkPerformanceValueTypeINTEL type; VkPerformanceValueDataINTEL data; } VkPerformanceValueINTEL; typedef struct VkInitializePerformanceApiInfoINTEL { VkStructureType sType; const void* pNext; void* pUserData; } VkInitializePerformanceApiInfoINTEL; typedef struct VkQueryPoolPerformanceQueryCreateInfoINTEL { VkStructureType sType; const void* pNext; VkQueryPoolSamplingModeINTEL performanceCountersSampling; } VkQueryPoolPerformanceQueryCreateInfoINTEL; typedef VkQueryPoolPerformanceQueryCreateInfoINTEL VkQueryPoolCreateInfoINTEL; typedef struct VkPerformanceMarkerInfoINTEL { VkStructureType sType; const void* pNext; uint64_t marker; } VkPerformanceMarkerInfoINTEL; typedef struct VkPerformanceStreamMarkerInfoINTEL { VkStructureType sType; const void* pNext; uint32_t marker; } VkPerformanceStreamMarkerInfoINTEL; typedef struct VkPerformanceOverrideInfoINTEL { VkStructureType sType; const void* pNext; VkPerformanceOverrideTypeINTEL type; VkBool32 enable; uint64_t parameter; } VkPerformanceOverrideInfoINTEL; typedef struct VkPerformanceConfigurationAcquireInfoINTEL { VkStructureType sType; const void* pNext; VkPerformanceConfigurationTypeINTEL type; } VkPerformanceConfigurationAcquireInfoINTEL; typedef VkResult (VKAPI_PTR *PFN_vkInitializePerformanceApiINTEL)(VkDevice device, const VkInitializePerformanceApiInfoINTEL* pInitializeInfo); typedef void (VKAPI_PTR *PFN_vkUninitializePerformanceApiINTEL)(VkDevice device); typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceMarkerInfoINTEL* pMarkerInfo); typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceStreamMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo); typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceOverrideINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceOverrideInfoINTEL* pOverrideInfo); typedef VkResult (VKAPI_PTR *PFN_vkAcquirePerformanceConfigurationINTEL)(VkDevice device, const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VkPerformanceConfigurationINTEL* pConfiguration); typedef VkResult (VKAPI_PTR *PFN_vkReleasePerformanceConfigurationINTEL)(VkDevice device, VkPerformanceConfigurationINTEL configuration); typedef VkResult (VKAPI_PTR *PFN_vkQueueSetPerformanceConfigurationINTEL)(VkQueue queue, VkPerformanceConfigurationINTEL configuration); typedef VkResult (VKAPI_PTR *PFN_vkGetPerformanceParameterINTEL)(VkDevice device, VkPerformanceParameterTypeINTEL parameter, VkPerformanceValueINTEL* pValue); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkInitializePerformanceApiINTEL( VkDevice device, const VkInitializePerformanceApiInfoINTEL* pInitializeInfo); VKAPI_ATTR void VKAPI_CALL vkUninitializePerformanceApiINTEL( VkDevice device); VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceMarkerINTEL( VkCommandBuffer commandBuffer, const VkPerformanceMarkerInfoINTEL* pMarkerInfo); VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceStreamMarkerINTEL( VkCommandBuffer commandBuffer, const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo); VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceOverrideINTEL( VkCommandBuffer commandBuffer, const VkPerformanceOverrideInfoINTEL* pOverrideInfo); VKAPI_ATTR VkResult VKAPI_CALL vkAcquirePerformanceConfigurationINTEL( VkDevice device, const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VkPerformanceConfigurationINTEL* pConfiguration); VKAPI_ATTR VkResult VKAPI_CALL vkReleasePerformanceConfigurationINTEL( VkDevice device, VkPerformanceConfigurationINTEL configuration); VKAPI_ATTR VkResult VKAPI_CALL vkQueueSetPerformanceConfigurationINTEL( VkQueue queue, VkPerformanceConfigurationINTEL configuration); VKAPI_ATTR VkResult VKAPI_CALL vkGetPerformanceParameterINTEL( VkDevice device, VkPerformanceParameterTypeINTEL parameter, VkPerformanceValueINTEL* pValue); #endif #define VK_EXT_pci_bus_info 1 #define VK_EXT_PCI_BUS_INFO_SPEC_VERSION 2 #define VK_EXT_PCI_BUS_INFO_EXTENSION_NAME "VK_EXT_pci_bus_info" typedef struct VkPhysicalDevicePCIBusInfoPropertiesEXT { VkStructureType sType; void* pNext; uint32_t pciDomain; uint32_t pciBus; uint32_t pciDevice; uint32_t pciFunction; } VkPhysicalDevicePCIBusInfoPropertiesEXT; #define VK_AMD_display_native_hdr 1 #define VK_AMD_DISPLAY_NATIVE_HDR_SPEC_VERSION 1 #define VK_AMD_DISPLAY_NATIVE_HDR_EXTENSION_NAME "VK_AMD_display_native_hdr" typedef struct VkDisplayNativeHdrSurfaceCapabilitiesAMD { VkStructureType sType; void* pNext; VkBool32 localDimmingSupport; } VkDisplayNativeHdrSurfaceCapabilitiesAMD; typedef struct VkSwapchainDisplayNativeHdrCreateInfoAMD { VkStructureType sType; const void* pNext; VkBool32 localDimmingEnable; } VkSwapchainDisplayNativeHdrCreateInfoAMD; typedef void (VKAPI_PTR *PFN_vkSetLocalDimmingAMD)(VkDevice device, VkSwapchainKHR swapChain, VkBool32 localDimmingEnable); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkSetLocalDimmingAMD( VkDevice device, VkSwapchainKHR swapChain, VkBool32 localDimmingEnable); #endif #define VK_EXT_fragment_density_map 1 #define VK_EXT_FRAGMENT_DENSITY_MAP_SPEC_VERSION 2 #define VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME "VK_EXT_fragment_density_map" typedef struct VkPhysicalDeviceFragmentDensityMapFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 fragmentDensityMap; VkBool32 fragmentDensityMapDynamic; VkBool32 fragmentDensityMapNonSubsampledImages; } VkPhysicalDeviceFragmentDensityMapFeaturesEXT; typedef struct VkPhysicalDeviceFragmentDensityMapPropertiesEXT { VkStructureType sType; void* pNext; VkExtent2D minFragmentDensityTexelSize; VkExtent2D maxFragmentDensityTexelSize; VkBool32 fragmentDensityInvocations; } VkPhysicalDeviceFragmentDensityMapPropertiesEXT; typedef struct VkRenderPassFragmentDensityMapCreateInfoEXT { VkStructureType sType; const void* pNext; VkAttachmentReference fragmentDensityMapAttachment; } VkRenderPassFragmentDensityMapCreateInfoEXT; #define VK_EXT_scalar_block_layout 1 #define VK_EXT_SCALAR_BLOCK_LAYOUT_SPEC_VERSION 1 #define VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME "VK_EXT_scalar_block_layout" typedef VkPhysicalDeviceScalarBlockLayoutFeatures VkPhysicalDeviceScalarBlockLayoutFeaturesEXT; #define VK_GOOGLE_hlsl_functionality1 1 #define VK_GOOGLE_HLSL_FUNCTIONALITY_1_SPEC_VERSION 1 #define VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME "VK_GOOGLE_hlsl_functionality1" #define VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION VK_GOOGLE_HLSL_FUNCTIONALITY_1_SPEC_VERSION #define VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME #define VK_GOOGLE_decorate_string 1 #define VK_GOOGLE_DECORATE_STRING_SPEC_VERSION 1 #define VK_GOOGLE_DECORATE_STRING_EXTENSION_NAME "VK_GOOGLE_decorate_string" #define VK_EXT_subgroup_size_control 1 #define VK_EXT_SUBGROUP_SIZE_CONTROL_SPEC_VERSION 2 #define VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME "VK_EXT_subgroup_size_control" typedef VkPhysicalDeviceSubgroupSizeControlFeatures VkPhysicalDeviceSubgroupSizeControlFeaturesEXT; typedef VkPhysicalDeviceSubgroupSizeControlProperties VkPhysicalDeviceSubgroupSizeControlPropertiesEXT; typedef VkPipelineShaderStageRequiredSubgroupSizeCreateInfo VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT; #define VK_AMD_shader_core_properties2 1 #define VK_AMD_SHADER_CORE_PROPERTIES_2_SPEC_VERSION 1 #define VK_AMD_SHADER_CORE_PROPERTIES_2_EXTENSION_NAME "VK_AMD_shader_core_properties2" typedef enum VkShaderCorePropertiesFlagBitsAMD { VK_SHADER_CORE_PROPERTIES_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF } VkShaderCorePropertiesFlagBitsAMD; typedef VkFlags VkShaderCorePropertiesFlagsAMD; typedef struct VkPhysicalDeviceShaderCoreProperties2AMD { VkStructureType sType; void* pNext; VkShaderCorePropertiesFlagsAMD shaderCoreFeatures; uint32_t activeComputeUnitCount; } VkPhysicalDeviceShaderCoreProperties2AMD; #define VK_AMD_device_coherent_memory 1 #define VK_AMD_DEVICE_COHERENT_MEMORY_SPEC_VERSION 1 #define VK_AMD_DEVICE_COHERENT_MEMORY_EXTENSION_NAME "VK_AMD_device_coherent_memory" typedef struct VkPhysicalDeviceCoherentMemoryFeaturesAMD { VkStructureType sType; void* pNext; VkBool32 deviceCoherentMemory; } VkPhysicalDeviceCoherentMemoryFeaturesAMD; #define VK_EXT_shader_image_atomic_int64 1 #define VK_EXT_SHADER_IMAGE_ATOMIC_INT64_SPEC_VERSION 1 #define VK_EXT_SHADER_IMAGE_ATOMIC_INT64_EXTENSION_NAME "VK_EXT_shader_image_atomic_int64" typedef struct VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT { VkStructureType sType; void* pNext; VkBool32 shaderImageInt64Atomics; VkBool32 sparseImageInt64Atomics; } VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT; #define VK_EXT_memory_budget 1 #define VK_EXT_MEMORY_BUDGET_SPEC_VERSION 1 #define VK_EXT_MEMORY_BUDGET_EXTENSION_NAME "VK_EXT_memory_budget" typedef struct VkPhysicalDeviceMemoryBudgetPropertiesEXT { VkStructureType sType; void* pNext; VkDeviceSize heapBudget[VK_MAX_MEMORY_HEAPS]; VkDeviceSize heapUsage[VK_MAX_MEMORY_HEAPS]; } VkPhysicalDeviceMemoryBudgetPropertiesEXT; #define VK_EXT_memory_priority 1 #define VK_EXT_MEMORY_PRIORITY_SPEC_VERSION 1 #define VK_EXT_MEMORY_PRIORITY_EXTENSION_NAME "VK_EXT_memory_priority" typedef struct VkPhysicalDeviceMemoryPriorityFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 memoryPriority; } VkPhysicalDeviceMemoryPriorityFeaturesEXT; typedef struct VkMemoryPriorityAllocateInfoEXT { VkStructureType sType; const void* pNext; float priority; } VkMemoryPriorityAllocateInfoEXT; #define VK_NV_dedicated_allocation_image_aliasing 1 #define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_SPEC_VERSION 1 #define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_EXTENSION_NAME "VK_NV_dedicated_allocation_image_aliasing" typedef struct VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV { VkStructureType sType; void* pNext; VkBool32 dedicatedAllocationImageAliasing; } VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV; #define VK_EXT_buffer_device_address 1 #define VK_EXT_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 2 #define VK_EXT_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME "VK_EXT_buffer_device_address" typedef struct VkPhysicalDeviceBufferDeviceAddressFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 bufferDeviceAddress; VkBool32 bufferDeviceAddressCaptureReplay; VkBool32 bufferDeviceAddressMultiDevice; } VkPhysicalDeviceBufferDeviceAddressFeaturesEXT; typedef VkPhysicalDeviceBufferDeviceAddressFeaturesEXT VkPhysicalDeviceBufferAddressFeaturesEXT; typedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoEXT; typedef struct VkBufferDeviceAddressCreateInfoEXT { VkStructureType sType; const void* pNext; VkDeviceAddress deviceAddress; } VkBufferDeviceAddressCreateInfoEXT; typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressEXT)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressEXT( VkDevice device, const VkBufferDeviceAddressInfo* pInfo); #endif #define VK_EXT_tooling_info 1 #define VK_EXT_TOOLING_INFO_SPEC_VERSION 1 #define VK_EXT_TOOLING_INFO_EXTENSION_NAME "VK_EXT_tooling_info" typedef VkToolPurposeFlagBits VkToolPurposeFlagBitsEXT; typedef VkToolPurposeFlags VkToolPurposeFlagsEXT; typedef VkPhysicalDeviceToolProperties VkPhysicalDeviceToolPropertiesEXT; typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceToolPropertiesEXT)(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolProperties* pToolProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceToolPropertiesEXT( VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolProperties* pToolProperties); #endif #define VK_EXT_separate_stencil_usage 1 #define VK_EXT_SEPARATE_STENCIL_USAGE_SPEC_VERSION 1 #define VK_EXT_SEPARATE_STENCIL_USAGE_EXTENSION_NAME "VK_EXT_separate_stencil_usage" typedef VkImageStencilUsageCreateInfo VkImageStencilUsageCreateInfoEXT; #define VK_EXT_validation_features 1 #define VK_EXT_VALIDATION_FEATURES_SPEC_VERSION 5 #define VK_EXT_VALIDATION_FEATURES_EXTENSION_NAME "VK_EXT_validation_features" typedef enum VkValidationFeatureEnableEXT { VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT = 0, VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT = 1, VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT = 2, VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT = 3, VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT = 4, VK_VALIDATION_FEATURE_ENABLE_MAX_ENUM_EXT = 0x7FFFFFFF } VkValidationFeatureEnableEXT; typedef enum VkValidationFeatureDisableEXT { VK_VALIDATION_FEATURE_DISABLE_ALL_EXT = 0, VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT = 1, VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT = 2, VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT = 3, VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT = 4, VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT = 5, VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT = 6, VK_VALIDATION_FEATURE_DISABLE_SHADER_VALIDATION_CACHE_EXT = 7, VK_VALIDATION_FEATURE_DISABLE_MAX_ENUM_EXT = 0x7FFFFFFF } VkValidationFeatureDisableEXT; typedef struct VkValidationFeaturesEXT { VkStructureType sType; const void* pNext; uint32_t enabledValidationFeatureCount; const VkValidationFeatureEnableEXT* pEnabledValidationFeatures; uint32_t disabledValidationFeatureCount; const VkValidationFeatureDisableEXT* pDisabledValidationFeatures; } VkValidationFeaturesEXT; #define VK_NV_cooperative_matrix 1 #define VK_NV_COOPERATIVE_MATRIX_SPEC_VERSION 1 #define VK_NV_COOPERATIVE_MATRIX_EXTENSION_NAME "VK_NV_cooperative_matrix" typedef enum VkComponentTypeNV { VK_COMPONENT_TYPE_FLOAT16_NV = 0, VK_COMPONENT_TYPE_FLOAT32_NV = 1, VK_COMPONENT_TYPE_FLOAT64_NV = 2, VK_COMPONENT_TYPE_SINT8_NV = 3, VK_COMPONENT_TYPE_SINT16_NV = 4, VK_COMPONENT_TYPE_SINT32_NV = 5, VK_COMPONENT_TYPE_SINT64_NV = 6, VK_COMPONENT_TYPE_UINT8_NV = 7, VK_COMPONENT_TYPE_UINT16_NV = 8, VK_COMPONENT_TYPE_UINT32_NV = 9, VK_COMPONENT_TYPE_UINT64_NV = 10, VK_COMPONENT_TYPE_MAX_ENUM_NV = 0x7FFFFFFF } VkComponentTypeNV; typedef enum VkScopeNV { VK_SCOPE_DEVICE_NV = 1, VK_SCOPE_WORKGROUP_NV = 2, VK_SCOPE_SUBGROUP_NV = 3, VK_SCOPE_QUEUE_FAMILY_NV = 5, VK_SCOPE_MAX_ENUM_NV = 0x7FFFFFFF } VkScopeNV; typedef struct VkCooperativeMatrixPropertiesNV { VkStructureType sType; void* pNext; uint32_t MSize; uint32_t NSize; uint32_t KSize; VkComponentTypeNV AType; VkComponentTypeNV BType; VkComponentTypeNV CType; VkComponentTypeNV DType; VkScopeNV scope; } VkCooperativeMatrixPropertiesNV; typedef struct VkPhysicalDeviceCooperativeMatrixFeaturesNV { VkStructureType sType; void* pNext; VkBool32 cooperativeMatrix; VkBool32 cooperativeMatrixRobustBufferAccess; } VkPhysicalDeviceCooperativeMatrixFeaturesNV; typedef struct VkPhysicalDeviceCooperativeMatrixPropertiesNV { VkStructureType sType; void* pNext; VkShaderStageFlags cooperativeMatrixSupportedStages; } VkPhysicalDeviceCooperativeMatrixPropertiesNV; typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixPropertiesNV* pProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixPropertiesNV* pProperties); #endif #define VK_NV_coverage_reduction_mode 1 #define VK_NV_COVERAGE_REDUCTION_MODE_SPEC_VERSION 1 #define VK_NV_COVERAGE_REDUCTION_MODE_EXTENSION_NAME "VK_NV_coverage_reduction_mode" typedef enum VkCoverageReductionModeNV { VK_COVERAGE_REDUCTION_MODE_MERGE_NV = 0, VK_COVERAGE_REDUCTION_MODE_TRUNCATE_NV = 1, VK_COVERAGE_REDUCTION_MODE_MAX_ENUM_NV = 0x7FFFFFFF } VkCoverageReductionModeNV; typedef VkFlags VkPipelineCoverageReductionStateCreateFlagsNV; typedef struct VkPhysicalDeviceCoverageReductionModeFeaturesNV { VkStructureType sType; void* pNext; VkBool32 coverageReductionMode; } VkPhysicalDeviceCoverageReductionModeFeaturesNV; typedef struct VkPipelineCoverageReductionStateCreateInfoNV { VkStructureType sType; const void* pNext; VkPipelineCoverageReductionStateCreateFlagsNV flags; VkCoverageReductionModeNV coverageReductionMode; } VkPipelineCoverageReductionStateCreateInfoNV; typedef struct VkFramebufferMixedSamplesCombinationNV { VkStructureType sType; void* pNext; VkCoverageReductionModeNV coverageReductionMode; VkSampleCountFlagBits rasterizationSamples; VkSampleCountFlags depthStencilSamples; VkSampleCountFlags colorSamples; } VkFramebufferMixedSamplesCombinationNV; typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV)(VkPhysicalDevice physicalDevice, uint32_t* pCombinationCount, VkFramebufferMixedSamplesCombinationNV* pCombinations); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( VkPhysicalDevice physicalDevice, uint32_t* pCombinationCount, VkFramebufferMixedSamplesCombinationNV* pCombinations); #endif #define VK_EXT_fragment_shader_interlock 1 #define VK_EXT_FRAGMENT_SHADER_INTERLOCK_SPEC_VERSION 1 #define VK_EXT_FRAGMENT_SHADER_INTERLOCK_EXTENSION_NAME "VK_EXT_fragment_shader_interlock" typedef struct VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 fragmentShaderSampleInterlock; VkBool32 fragmentShaderPixelInterlock; VkBool32 fragmentShaderShadingRateInterlock; } VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT; #define VK_EXT_ycbcr_image_arrays 1 #define VK_EXT_YCBCR_IMAGE_ARRAYS_SPEC_VERSION 1 #define VK_EXT_YCBCR_IMAGE_ARRAYS_EXTENSION_NAME "VK_EXT_ycbcr_image_arrays" typedef struct VkPhysicalDeviceYcbcrImageArraysFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 ycbcrImageArrays; } VkPhysicalDeviceYcbcrImageArraysFeaturesEXT; #define VK_EXT_provoking_vertex 1 #define VK_EXT_PROVOKING_VERTEX_SPEC_VERSION 1 #define VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME "VK_EXT_provoking_vertex" typedef enum VkProvokingVertexModeEXT { VK_PROVOKING_VERTEX_MODE_FIRST_VERTEX_EXT = 0, VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT = 1, VK_PROVOKING_VERTEX_MODE_MAX_ENUM_EXT = 0x7FFFFFFF } VkProvokingVertexModeEXT; typedef struct VkPhysicalDeviceProvokingVertexFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 provokingVertexLast; VkBool32 transformFeedbackPreservesProvokingVertex; } VkPhysicalDeviceProvokingVertexFeaturesEXT; typedef struct VkPhysicalDeviceProvokingVertexPropertiesEXT { VkStructureType sType; void* pNext; VkBool32 provokingVertexModePerPipeline; VkBool32 transformFeedbackPreservesTriangleFanProvokingVertex; } VkPhysicalDeviceProvokingVertexPropertiesEXT; typedef struct VkPipelineRasterizationProvokingVertexStateCreateInfoEXT { VkStructureType sType; const void* pNext; VkProvokingVertexModeEXT provokingVertexMode; } VkPipelineRasterizationProvokingVertexStateCreateInfoEXT; #define VK_EXT_headless_surface 1 #define VK_EXT_HEADLESS_SURFACE_SPEC_VERSION 1 #define VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME "VK_EXT_headless_surface" typedef VkFlags VkHeadlessSurfaceCreateFlagsEXT; typedef struct VkHeadlessSurfaceCreateInfoEXT { VkStructureType sType; const void* pNext; VkHeadlessSurfaceCreateFlagsEXT flags; } VkHeadlessSurfaceCreateInfoEXT; typedef VkResult (VKAPI_PTR *PFN_vkCreateHeadlessSurfaceEXT)(VkInstance instance, const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateHeadlessSurfaceEXT( VkInstance instance, const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); #endif #define VK_EXT_line_rasterization 1 #define VK_EXT_LINE_RASTERIZATION_SPEC_VERSION 1 #define VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME "VK_EXT_line_rasterization" typedef enum VkLineRasterizationModeEXT { VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT = 0, VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT = 1, VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT = 2, VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT = 3, VK_LINE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF } VkLineRasterizationModeEXT; typedef struct VkPhysicalDeviceLineRasterizationFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 rectangularLines; VkBool32 bresenhamLines; VkBool32 smoothLines; VkBool32 stippledRectangularLines; VkBool32 stippledBresenhamLines; VkBool32 stippledSmoothLines; } VkPhysicalDeviceLineRasterizationFeaturesEXT; typedef struct VkPhysicalDeviceLineRasterizationPropertiesEXT { VkStructureType sType; void* pNext; uint32_t lineSubPixelPrecisionBits; } VkPhysicalDeviceLineRasterizationPropertiesEXT; typedef struct VkPipelineRasterizationLineStateCreateInfoEXT { VkStructureType sType; const void* pNext; VkLineRasterizationModeEXT lineRasterizationMode; VkBool32 stippledLineEnable; uint32_t lineStippleFactor; uint16_t lineStipplePattern; } VkPipelineRasterizationLineStateCreateInfoEXT; typedef void (VKAPI_PTR *PFN_vkCmdSetLineStippleEXT)(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetLineStippleEXT( VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern); #endif #define VK_EXT_shader_atomic_float 1 #define VK_EXT_SHADER_ATOMIC_FLOAT_SPEC_VERSION 1 #define VK_EXT_SHADER_ATOMIC_FLOAT_EXTENSION_NAME "VK_EXT_shader_atomic_float" typedef struct VkPhysicalDeviceShaderAtomicFloatFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 shaderBufferFloat32Atomics; VkBool32 shaderBufferFloat32AtomicAdd; VkBool32 shaderBufferFloat64Atomics; VkBool32 shaderBufferFloat64AtomicAdd; VkBool32 shaderSharedFloat32Atomics; VkBool32 shaderSharedFloat32AtomicAdd; VkBool32 shaderSharedFloat64Atomics; VkBool32 shaderSharedFloat64AtomicAdd; VkBool32 shaderImageFloat32Atomics; VkBool32 shaderImageFloat32AtomicAdd; VkBool32 sparseImageFloat32Atomics; VkBool32 sparseImageFloat32AtomicAdd; } VkPhysicalDeviceShaderAtomicFloatFeaturesEXT; #define VK_EXT_host_query_reset 1 #define VK_EXT_HOST_QUERY_RESET_SPEC_VERSION 1 #define VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME "VK_EXT_host_query_reset" typedef VkPhysicalDeviceHostQueryResetFeatures VkPhysicalDeviceHostQueryResetFeaturesEXT; typedef void (VKAPI_PTR *PFN_vkResetQueryPoolEXT)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkResetQueryPoolEXT( VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); #endif #define VK_EXT_index_type_uint8 1 #define VK_EXT_INDEX_TYPE_UINT8_SPEC_VERSION 1 #define VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME "VK_EXT_index_type_uint8" typedef struct VkPhysicalDeviceIndexTypeUint8FeaturesEXT { VkStructureType sType; void* pNext; VkBool32 indexTypeUint8; } VkPhysicalDeviceIndexTypeUint8FeaturesEXT; #define VK_EXT_extended_dynamic_state 1 #define VK_EXT_EXTENDED_DYNAMIC_STATE_SPEC_VERSION 1 #define VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME "VK_EXT_extended_dynamic_state" typedef struct VkPhysicalDeviceExtendedDynamicStateFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 extendedDynamicState; } VkPhysicalDeviceExtendedDynamicStateFeaturesEXT; typedef void (VKAPI_PTR *PFN_vkCmdSetCullModeEXT)(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode); typedef void (VKAPI_PTR *PFN_vkCmdSetFrontFaceEXT)(VkCommandBuffer commandBuffer, VkFrontFace frontFace); typedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveTopologyEXT)(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology); typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWithCountEXT)(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports); typedef void (VKAPI_PTR *PFN_vkCmdSetScissorWithCountEXT)(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors); typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers2EXT)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides); typedef void (VKAPI_PTR *PFN_vkCmdSetDepthTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable); typedef void (VKAPI_PTR *PFN_vkCmdSetDepthWriteEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable); typedef void (VKAPI_PTR *PFN_vkCmdSetDepthCompareOpEXT)(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp); typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBoundsTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable); typedef void (VKAPI_PTR *PFN_vkCmdSetStencilTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable); typedef void (VKAPI_PTR *PFN_vkCmdSetStencilOpEXT)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetCullModeEXT( VkCommandBuffer commandBuffer, VkCullModeFlags cullMode); VKAPI_ATTR void VKAPI_CALL vkCmdSetFrontFaceEXT( VkCommandBuffer commandBuffer, VkFrontFace frontFace); VKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveTopologyEXT( VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology); VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWithCountEXT( VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports); VKAPI_ATTR void VKAPI_CALL vkCmdSetScissorWithCountEXT( VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors); VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers2EXT( VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides); VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthTestEnableEXT( VkCommandBuffer commandBuffer, VkBool32 depthTestEnable); VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthWriteEnableEXT( VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable); VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthCompareOpEXT( VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp); VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBoundsTestEnableEXT( VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable); VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilTestEnableEXT( VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable); VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilOpEXT( VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp); #endif #define VK_EXT_shader_atomic_float2 1 #define VK_EXT_SHADER_ATOMIC_FLOAT_2_SPEC_VERSION 1 #define VK_EXT_SHADER_ATOMIC_FLOAT_2_EXTENSION_NAME "VK_EXT_shader_atomic_float2" typedef struct VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT { VkStructureType sType; void* pNext; VkBool32 shaderBufferFloat16Atomics; VkBool32 shaderBufferFloat16AtomicAdd; VkBool32 shaderBufferFloat16AtomicMinMax; VkBool32 shaderBufferFloat32AtomicMinMax; VkBool32 shaderBufferFloat64AtomicMinMax; VkBool32 shaderSharedFloat16Atomics; VkBool32 shaderSharedFloat16AtomicAdd; VkBool32 shaderSharedFloat16AtomicMinMax; VkBool32 shaderSharedFloat32AtomicMinMax; VkBool32 shaderSharedFloat64AtomicMinMax; VkBool32 shaderImageFloat32AtomicMinMax; VkBool32 sparseImageFloat32AtomicMinMax; } VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT; #define VK_EXT_shader_demote_to_helper_invocation 1 #define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION 1 #define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME "VK_EXT_shader_demote_to_helper_invocation" typedef VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT; #define VK_NV_device_generated_commands 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutNV) #define VK_NV_DEVICE_GENERATED_COMMANDS_SPEC_VERSION 3 #define VK_NV_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME "VK_NV_device_generated_commands" typedef enum VkIndirectCommandsTokenTypeNV { VK_INDIRECT_COMMANDS_TOKEN_TYPE_SHADER_GROUP_NV = 0, VK_INDIRECT_COMMANDS_TOKEN_TYPE_STATE_FLAGS_NV = 1, VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NV = 2, VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NV = 3, VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV = 4, VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV = 5, VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV = 6, VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV = 7, VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_NV = 0x7FFFFFFF } VkIndirectCommandsTokenTypeNV; typedef enum VkIndirectStateFlagBitsNV { VK_INDIRECT_STATE_FLAG_FRONTFACE_BIT_NV = 0x00000001, VK_INDIRECT_STATE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF } VkIndirectStateFlagBitsNV; typedef VkFlags VkIndirectStateFlagsNV; typedef enum VkIndirectCommandsLayoutUsageFlagBitsNV { VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EXPLICIT_PREPROCESS_BIT_NV = 0x00000001, VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NV = 0x00000002, VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NV = 0x00000004, VK_INDIRECT_COMMANDS_LAYOUT_USAGE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF } VkIndirectCommandsLayoutUsageFlagBitsNV; typedef VkFlags VkIndirectCommandsLayoutUsageFlagsNV; typedef struct VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV { VkStructureType sType; void* pNext; uint32_t maxGraphicsShaderGroupCount; uint32_t maxIndirectSequenceCount; uint32_t maxIndirectCommandsTokenCount; uint32_t maxIndirectCommandsStreamCount; uint32_t maxIndirectCommandsTokenOffset; uint32_t maxIndirectCommandsStreamStride; uint32_t minSequencesCountBufferOffsetAlignment; uint32_t minSequencesIndexBufferOffsetAlignment; uint32_t minIndirectCommandsBufferOffsetAlignment; } VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV; typedef struct VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV { VkStructureType sType; void* pNext; VkBool32 deviceGeneratedCommands; } VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV; typedef struct VkGraphicsShaderGroupCreateInfoNV { VkStructureType sType; const void* pNext; uint32_t stageCount; const VkPipelineShaderStageCreateInfo* pStages; const VkPipelineVertexInputStateCreateInfo* pVertexInputState; const VkPipelineTessellationStateCreateInfo* pTessellationState; } VkGraphicsShaderGroupCreateInfoNV; typedef struct VkGraphicsPipelineShaderGroupsCreateInfoNV { VkStructureType sType; const void* pNext; uint32_t groupCount; const VkGraphicsShaderGroupCreateInfoNV* pGroups; uint32_t pipelineCount; const VkPipeline* pPipelines; } VkGraphicsPipelineShaderGroupsCreateInfoNV; typedef struct VkBindShaderGroupIndirectCommandNV { uint32_t groupIndex; } VkBindShaderGroupIndirectCommandNV; typedef struct VkBindIndexBufferIndirectCommandNV { VkDeviceAddress bufferAddress; uint32_t size; VkIndexType indexType; } VkBindIndexBufferIndirectCommandNV; typedef struct VkBindVertexBufferIndirectCommandNV { VkDeviceAddress bufferAddress; uint32_t size; uint32_t stride; } VkBindVertexBufferIndirectCommandNV; typedef struct VkSetStateFlagsIndirectCommandNV { uint32_t data; } VkSetStateFlagsIndirectCommandNV; typedef struct VkIndirectCommandsStreamNV { VkBuffer buffer; VkDeviceSize offset; } VkIndirectCommandsStreamNV; typedef struct VkIndirectCommandsLayoutTokenNV { VkStructureType sType; const void* pNext; VkIndirectCommandsTokenTypeNV tokenType; uint32_t stream; uint32_t offset; uint32_t vertexBindingUnit; VkBool32 vertexDynamicStride; VkPipelineLayout pushconstantPipelineLayout; VkShaderStageFlags pushconstantShaderStageFlags; uint32_t pushconstantOffset; uint32_t pushconstantSize; VkIndirectStateFlagsNV indirectStateFlags; uint32_t indexTypeCount; const VkIndexType* pIndexTypes; const uint32_t* pIndexTypeValues; } VkIndirectCommandsLayoutTokenNV; typedef struct VkIndirectCommandsLayoutCreateInfoNV { VkStructureType sType; const void* pNext; VkIndirectCommandsLayoutUsageFlagsNV flags; VkPipelineBindPoint pipelineBindPoint; uint32_t tokenCount; const VkIndirectCommandsLayoutTokenNV* pTokens; uint32_t streamCount; const uint32_t* pStreamStrides; } VkIndirectCommandsLayoutCreateInfoNV; typedef struct VkGeneratedCommandsInfoNV { VkStructureType sType; const void* pNext; VkPipelineBindPoint pipelineBindPoint; VkPipeline pipeline; VkIndirectCommandsLayoutNV indirectCommandsLayout; uint32_t streamCount; const VkIndirectCommandsStreamNV* pStreams; uint32_t sequencesCount; VkBuffer preprocessBuffer; VkDeviceSize preprocessOffset; VkDeviceSize preprocessSize; VkBuffer sequencesCountBuffer; VkDeviceSize sequencesCountOffset; VkBuffer sequencesIndexBuffer; VkDeviceSize sequencesIndexOffset; } VkGeneratedCommandsInfoNV; typedef struct VkGeneratedCommandsMemoryRequirementsInfoNV { VkStructureType sType; const void* pNext; VkPipelineBindPoint pipelineBindPoint; VkPipeline pipeline; VkIndirectCommandsLayoutNV indirectCommandsLayout; uint32_t maxSequencesCount; } VkGeneratedCommandsMemoryRequirementsInfoNV; typedef void (VKAPI_PTR *PFN_vkGetGeneratedCommandsMemoryRequirementsNV)(VkDevice device, const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2* pMemoryRequirements); typedef void (VKAPI_PTR *PFN_vkCmdPreprocessGeneratedCommandsNV)(VkCommandBuffer commandBuffer, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); typedef void (VKAPI_PTR *PFN_vkCmdExecuteGeneratedCommandsNV)(VkCommandBuffer commandBuffer, VkBool32 isPreprocessed, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); typedef void (VKAPI_PTR *PFN_vkCmdBindPipelineShaderGroupNV)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline, uint32_t groupIndex); typedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectCommandsLayoutNV)(VkDevice device, const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNV* pIndirectCommandsLayout); typedef void (VKAPI_PTR *PFN_vkDestroyIndirectCommandsLayoutNV)(VkDevice device, VkIndirectCommandsLayoutNV indirectCommandsLayout, const VkAllocationCallbacks* pAllocator); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkGetGeneratedCommandsMemoryRequirementsNV( VkDevice device, const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2* pMemoryRequirements); VKAPI_ATTR void VKAPI_CALL vkCmdPreprocessGeneratedCommandsNV( VkCommandBuffer commandBuffer, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); VKAPI_ATTR void VKAPI_CALL vkCmdExecuteGeneratedCommandsNV( VkCommandBuffer commandBuffer, VkBool32 isPreprocessed, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); VKAPI_ATTR void VKAPI_CALL vkCmdBindPipelineShaderGroupNV( VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline, uint32_t groupIndex); VKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectCommandsLayoutNV( VkDevice device, const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNV* pIndirectCommandsLayout); VKAPI_ATTR void VKAPI_CALL vkDestroyIndirectCommandsLayoutNV( VkDevice device, VkIndirectCommandsLayoutNV indirectCommandsLayout, const VkAllocationCallbacks* pAllocator); #endif #define VK_NV_inherited_viewport_scissor 1 #define VK_NV_INHERITED_VIEWPORT_SCISSOR_SPEC_VERSION 1 #define VK_NV_INHERITED_VIEWPORT_SCISSOR_EXTENSION_NAME "VK_NV_inherited_viewport_scissor" typedef struct VkPhysicalDeviceInheritedViewportScissorFeaturesNV { VkStructureType sType; void* pNext; VkBool32 inheritedViewportScissor2D; } VkPhysicalDeviceInheritedViewportScissorFeaturesNV; typedef struct VkCommandBufferInheritanceViewportScissorInfoNV { VkStructureType sType; const void* pNext; VkBool32 viewportScissor2D; uint32_t viewportDepthCount; const VkViewport* pViewportDepths; } VkCommandBufferInheritanceViewportScissorInfoNV; #define VK_EXT_texel_buffer_alignment 1 #define VK_EXT_TEXEL_BUFFER_ALIGNMENT_SPEC_VERSION 1 #define VK_EXT_TEXEL_BUFFER_ALIGNMENT_EXTENSION_NAME "VK_EXT_texel_buffer_alignment" typedef struct VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 texelBufferAlignment; } VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT; typedef VkPhysicalDeviceTexelBufferAlignmentProperties VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT; #define VK_QCOM_render_pass_transform 1 #define VK_QCOM_RENDER_PASS_TRANSFORM_SPEC_VERSION 3 #define VK_QCOM_RENDER_PASS_TRANSFORM_EXTENSION_NAME "VK_QCOM_render_pass_transform" typedef struct VkRenderPassTransformBeginInfoQCOM { VkStructureType sType; void* pNext; VkSurfaceTransformFlagBitsKHR transform; } VkRenderPassTransformBeginInfoQCOM; typedef struct VkCommandBufferInheritanceRenderPassTransformInfoQCOM { VkStructureType sType; void* pNext; VkSurfaceTransformFlagBitsKHR transform; VkRect2D renderArea; } VkCommandBufferInheritanceRenderPassTransformInfoQCOM; #define VK_EXT_device_memory_report 1 #define VK_EXT_DEVICE_MEMORY_REPORT_SPEC_VERSION 2 #define VK_EXT_DEVICE_MEMORY_REPORT_EXTENSION_NAME "VK_EXT_device_memory_report" typedef enum VkDeviceMemoryReportEventTypeEXT { VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT = 0, VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT = 1, VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT = 2, VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT = 3, VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT = 4, VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF } VkDeviceMemoryReportEventTypeEXT; typedef VkFlags VkDeviceMemoryReportFlagsEXT; typedef struct VkPhysicalDeviceDeviceMemoryReportFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 deviceMemoryReport; } VkPhysicalDeviceDeviceMemoryReportFeaturesEXT; typedef struct VkDeviceMemoryReportCallbackDataEXT { VkStructureType sType; void* pNext; VkDeviceMemoryReportFlagsEXT flags; VkDeviceMemoryReportEventTypeEXT type; uint64_t memoryObjectId; VkDeviceSize size; VkObjectType objectType; uint64_t objectHandle; uint32_t heapIndex; } VkDeviceMemoryReportCallbackDataEXT; typedef void (VKAPI_PTR *PFN_vkDeviceMemoryReportCallbackEXT)( const VkDeviceMemoryReportCallbackDataEXT* pCallbackData, void* pUserData); typedef struct VkDeviceDeviceMemoryReportCreateInfoEXT { VkStructureType sType; const void* pNext; VkDeviceMemoryReportFlagsEXT flags; PFN_vkDeviceMemoryReportCallbackEXT pfnUserCallback; void* pUserData; } VkDeviceDeviceMemoryReportCreateInfoEXT; #define VK_EXT_acquire_drm_display 1 #define VK_EXT_ACQUIRE_DRM_DISPLAY_SPEC_VERSION 1 #define VK_EXT_ACQUIRE_DRM_DISPLAY_EXTENSION_NAME "VK_EXT_acquire_drm_display" typedef VkResult (VKAPI_PTR *PFN_vkAcquireDrmDisplayEXT)(VkPhysicalDevice physicalDevice, int32_t drmFd, VkDisplayKHR display); typedef VkResult (VKAPI_PTR *PFN_vkGetDrmDisplayEXT)(VkPhysicalDevice physicalDevice, int32_t drmFd, uint32_t connectorId, VkDisplayKHR* display); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkAcquireDrmDisplayEXT( VkPhysicalDevice physicalDevice, int32_t drmFd, VkDisplayKHR display); VKAPI_ATTR VkResult VKAPI_CALL vkGetDrmDisplayEXT( VkPhysicalDevice physicalDevice, int32_t drmFd, uint32_t connectorId, VkDisplayKHR* display); #endif #define VK_EXT_robustness2 1 #define VK_EXT_ROBUSTNESS_2_SPEC_VERSION 1 #define VK_EXT_ROBUSTNESS_2_EXTENSION_NAME "VK_EXT_robustness2" typedef struct VkPhysicalDeviceRobustness2FeaturesEXT { VkStructureType sType; void* pNext; VkBool32 robustBufferAccess2; VkBool32 robustImageAccess2; VkBool32 nullDescriptor; } VkPhysicalDeviceRobustness2FeaturesEXT; typedef struct VkPhysicalDeviceRobustness2PropertiesEXT { VkStructureType sType; void* pNext; VkDeviceSize robustStorageBufferAccessSizeAlignment; VkDeviceSize robustUniformBufferAccessSizeAlignment; } VkPhysicalDeviceRobustness2PropertiesEXT; #define VK_EXT_custom_border_color 1 #define VK_EXT_CUSTOM_BORDER_COLOR_SPEC_VERSION 12 #define VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME "VK_EXT_custom_border_color" typedef struct VkSamplerCustomBorderColorCreateInfoEXT { VkStructureType sType; const void* pNext; VkClearColorValue customBorderColor; VkFormat format; } VkSamplerCustomBorderColorCreateInfoEXT; typedef struct VkPhysicalDeviceCustomBorderColorPropertiesEXT { VkStructureType sType; void* pNext; uint32_t maxCustomBorderColorSamplers; } VkPhysicalDeviceCustomBorderColorPropertiesEXT; typedef struct VkPhysicalDeviceCustomBorderColorFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 customBorderColors; VkBool32 customBorderColorWithoutFormat; } VkPhysicalDeviceCustomBorderColorFeaturesEXT; #define VK_GOOGLE_user_type 1 #define VK_GOOGLE_USER_TYPE_SPEC_VERSION 1 #define VK_GOOGLE_USER_TYPE_EXTENSION_NAME "VK_GOOGLE_user_type" #define VK_EXT_private_data 1 typedef VkPrivateDataSlot VkPrivateDataSlotEXT; #define VK_EXT_PRIVATE_DATA_SPEC_VERSION 1 #define VK_EXT_PRIVATE_DATA_EXTENSION_NAME "VK_EXT_private_data" typedef VkPrivateDataSlotCreateFlags VkPrivateDataSlotCreateFlagsEXT; typedef VkPhysicalDevicePrivateDataFeatures VkPhysicalDevicePrivateDataFeaturesEXT; typedef VkDevicePrivateDataCreateInfo VkDevicePrivateDataCreateInfoEXT; typedef VkPrivateDataSlotCreateInfo VkPrivateDataSlotCreateInfoEXT; typedef VkResult (VKAPI_PTR *PFN_vkCreatePrivateDataSlotEXT)(VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot); typedef void (VKAPI_PTR *PFN_vkDestroyPrivateDataSlotEXT)(VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator); typedef VkResult (VKAPI_PTR *PFN_vkSetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data); typedef void (VKAPI_PTR *PFN_vkGetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreatePrivateDataSlotEXT( VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot); VKAPI_ATTR void VKAPI_CALL vkDestroyPrivateDataSlotEXT( VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR VkResult VKAPI_CALL vkSetPrivateDataEXT( VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data); VKAPI_ATTR void VKAPI_CALL vkGetPrivateDataEXT( VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData); #endif #define VK_EXT_pipeline_creation_cache_control 1 #define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_SPEC_VERSION 3 #define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_EXTENSION_NAME "VK_EXT_pipeline_creation_cache_control" typedef VkPhysicalDevicePipelineCreationCacheControlFeatures VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT; #define VK_NV_device_diagnostics_config 1 #define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_SPEC_VERSION 2 #define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME "VK_NV_device_diagnostics_config" typedef enum VkDeviceDiagnosticsConfigFlagBitsNV { VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV = 0x00000001, VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV = 0x00000002, VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV = 0x00000004, VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_ERROR_REPORTING_BIT_NV = 0x00000008, VK_DEVICE_DIAGNOSTICS_CONFIG_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF } VkDeviceDiagnosticsConfigFlagBitsNV; typedef VkFlags VkDeviceDiagnosticsConfigFlagsNV; typedef struct VkPhysicalDeviceDiagnosticsConfigFeaturesNV { VkStructureType sType; void* pNext; VkBool32 diagnosticsConfig; } VkPhysicalDeviceDiagnosticsConfigFeaturesNV; typedef struct VkDeviceDiagnosticsConfigCreateInfoNV { VkStructureType sType; const void* pNext; VkDeviceDiagnosticsConfigFlagsNV flags; } VkDeviceDiagnosticsConfigCreateInfoNV; #define VK_QCOM_render_pass_store_ops 1 #define VK_QCOM_RENDER_PASS_STORE_OPS_SPEC_VERSION 2 #define VK_QCOM_RENDER_PASS_STORE_OPS_EXTENSION_NAME "VK_QCOM_render_pass_store_ops" #define VK_EXT_graphics_pipeline_library 1 #define VK_EXT_GRAPHICS_PIPELINE_LIBRARY_SPEC_VERSION 1 #define VK_EXT_GRAPHICS_PIPELINE_LIBRARY_EXTENSION_NAME "VK_EXT_graphics_pipeline_library" typedef enum VkGraphicsPipelineLibraryFlagBitsEXT { VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT = 0x00000001, VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT = 0x00000002, VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT = 0x00000004, VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT = 0x00000008, VK_GRAPHICS_PIPELINE_LIBRARY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF } VkGraphicsPipelineLibraryFlagBitsEXT; typedef VkFlags VkGraphicsPipelineLibraryFlagsEXT; typedef struct VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 graphicsPipelineLibrary; } VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT; typedef struct VkPhysicalDeviceGraphicsPipelineLibraryPropertiesEXT { VkStructureType sType; void* pNext; VkBool32 graphicsPipelineLibraryFastLinking; VkBool32 graphicsPipelineLibraryIndependentInterpolationDecoration; } VkPhysicalDeviceGraphicsPipelineLibraryPropertiesEXT; typedef struct VkGraphicsPipelineLibraryCreateInfoEXT { VkStructureType sType; void* pNext; VkGraphicsPipelineLibraryFlagsEXT flags; } VkGraphicsPipelineLibraryCreateInfoEXT; #define VK_AMD_shader_early_and_late_fragment_tests 1 #define VK_AMD_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_SPEC_VERSION 1 #define VK_AMD_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_EXTENSION_NAME "VK_AMD_shader_early_and_late_fragment_tests" typedef struct VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD { VkStructureType sType; void* pNext; VkBool32 shaderEarlyAndLateFragmentTests; } VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD; #define VK_NV_fragment_shading_rate_enums 1 #define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_SPEC_VERSION 1 #define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_EXTENSION_NAME "VK_NV_fragment_shading_rate_enums" typedef enum VkFragmentShadingRateTypeNV { VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV = 0, VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV = 1, VK_FRAGMENT_SHADING_RATE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF } VkFragmentShadingRateTypeNV; typedef enum VkFragmentShadingRateNV { VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV = 0, VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV = 1, VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV = 4, VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV = 5, VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV = 6, VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV = 9, VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV = 10, VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV = 11, VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV = 12, VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV = 13, VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV = 14, VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV = 15, VK_FRAGMENT_SHADING_RATE_MAX_ENUM_NV = 0x7FFFFFFF } VkFragmentShadingRateNV; typedef struct VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV { VkStructureType sType; void* pNext; VkBool32 fragmentShadingRateEnums; VkBool32 supersampleFragmentShadingRates; VkBool32 noInvocationFragmentShadingRates; } VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV; typedef struct VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV { VkStructureType sType; void* pNext; VkSampleCountFlagBits maxFragmentShadingRateInvocationCount; } VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV; typedef struct VkPipelineFragmentShadingRateEnumStateCreateInfoNV { VkStructureType sType; const void* pNext; VkFragmentShadingRateTypeNV shadingRateType; VkFragmentShadingRateNV shadingRate; VkFragmentShadingRateCombinerOpKHR combinerOps[2]; } VkPipelineFragmentShadingRateEnumStateCreateInfoNV; typedef void (VKAPI_PTR *PFN_vkCmdSetFragmentShadingRateEnumNV)(VkCommandBuffer commandBuffer, VkFragmentShadingRateNV shadingRate, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetFragmentShadingRateEnumNV( VkCommandBuffer commandBuffer, VkFragmentShadingRateNV shadingRate, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); #endif #define VK_NV_ray_tracing_motion_blur 1 #define VK_NV_RAY_TRACING_MOTION_BLUR_SPEC_VERSION 1 #define VK_NV_RAY_TRACING_MOTION_BLUR_EXTENSION_NAME "VK_NV_ray_tracing_motion_blur" typedef enum VkAccelerationStructureMotionInstanceTypeNV { VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_STATIC_NV = 0, VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_MATRIX_MOTION_NV = 1, VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_SRT_MOTION_NV = 2, VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF } VkAccelerationStructureMotionInstanceTypeNV; typedef VkFlags VkAccelerationStructureMotionInfoFlagsNV; typedef VkFlags VkAccelerationStructureMotionInstanceFlagsNV; typedef union VkDeviceOrHostAddressConstKHR { VkDeviceAddress deviceAddress; const void* hostAddress; } VkDeviceOrHostAddressConstKHR; typedef struct VkAccelerationStructureGeometryMotionTrianglesDataNV { VkStructureType sType; const void* pNext; VkDeviceOrHostAddressConstKHR vertexData; } VkAccelerationStructureGeometryMotionTrianglesDataNV; typedef struct VkAccelerationStructureMotionInfoNV { VkStructureType sType; const void* pNext; uint32_t maxInstances; VkAccelerationStructureMotionInfoFlagsNV flags; } VkAccelerationStructureMotionInfoNV; typedef struct VkAccelerationStructureMatrixMotionInstanceNV { VkTransformMatrixKHR transformT0; VkTransformMatrixKHR transformT1; uint32_t instanceCustomIndex:24; uint32_t mask:8; uint32_t instanceShaderBindingTableRecordOffset:24; VkGeometryInstanceFlagsKHR flags:8; uint64_t accelerationStructureReference; } VkAccelerationStructureMatrixMotionInstanceNV; typedef struct VkSRTDataNV { float sx; float a; float b; float pvx; float sy; float c; float pvy; float sz; float pvz; float qx; float qy; float qz; float qw; float tx; float ty; float tz; } VkSRTDataNV; typedef struct VkAccelerationStructureSRTMotionInstanceNV { VkSRTDataNV transformT0; VkSRTDataNV transformT1; uint32_t instanceCustomIndex:24; uint32_t mask:8; uint32_t instanceShaderBindingTableRecordOffset:24; VkGeometryInstanceFlagsKHR flags:8; uint64_t accelerationStructureReference; } VkAccelerationStructureSRTMotionInstanceNV; typedef union VkAccelerationStructureMotionInstanceDataNV { VkAccelerationStructureInstanceKHR staticInstance; VkAccelerationStructureMatrixMotionInstanceNV matrixMotionInstance; VkAccelerationStructureSRTMotionInstanceNV srtMotionInstance; } VkAccelerationStructureMotionInstanceDataNV; typedef struct VkAccelerationStructureMotionInstanceNV { VkAccelerationStructureMotionInstanceTypeNV type; VkAccelerationStructureMotionInstanceFlagsNV flags; VkAccelerationStructureMotionInstanceDataNV data; } VkAccelerationStructureMotionInstanceNV; typedef struct VkPhysicalDeviceRayTracingMotionBlurFeaturesNV { VkStructureType sType; void* pNext; VkBool32 rayTracingMotionBlur; VkBool32 rayTracingMotionBlurPipelineTraceRaysIndirect; } VkPhysicalDeviceRayTracingMotionBlurFeaturesNV; #define VK_EXT_ycbcr_2plane_444_formats 1 #define VK_EXT_YCBCR_2PLANE_444_FORMATS_SPEC_VERSION 1 #define VK_EXT_YCBCR_2PLANE_444_FORMATS_EXTENSION_NAME "VK_EXT_ycbcr_2plane_444_formats" typedef struct VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 ycbcr2plane444Formats; } VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT; #define VK_EXT_fragment_density_map2 1 #define VK_EXT_FRAGMENT_DENSITY_MAP_2_SPEC_VERSION 1 #define VK_EXT_FRAGMENT_DENSITY_MAP_2_EXTENSION_NAME "VK_EXT_fragment_density_map2" typedef struct VkPhysicalDeviceFragmentDensityMap2FeaturesEXT { VkStructureType sType; void* pNext; VkBool32 fragmentDensityMapDeferred; } VkPhysicalDeviceFragmentDensityMap2FeaturesEXT; typedef struct VkPhysicalDeviceFragmentDensityMap2PropertiesEXT { VkStructureType sType; void* pNext; VkBool32 subsampledLoads; VkBool32 subsampledCoarseReconstructionEarlyAccess; uint32_t maxSubsampledArrayLayers; uint32_t maxDescriptorSetSubsampledSamplers; } VkPhysicalDeviceFragmentDensityMap2PropertiesEXT; #define VK_QCOM_rotated_copy_commands 1 #define VK_QCOM_ROTATED_COPY_COMMANDS_SPEC_VERSION 1 #define VK_QCOM_ROTATED_COPY_COMMANDS_EXTENSION_NAME "VK_QCOM_rotated_copy_commands" typedef struct VkCopyCommandTransformInfoQCOM { VkStructureType sType; const void* pNext; VkSurfaceTransformFlagBitsKHR transform; } VkCopyCommandTransformInfoQCOM; #define VK_EXT_image_robustness 1 #define VK_EXT_IMAGE_ROBUSTNESS_SPEC_VERSION 1 #define VK_EXT_IMAGE_ROBUSTNESS_EXTENSION_NAME "VK_EXT_image_robustness" typedef VkPhysicalDeviceImageRobustnessFeatures VkPhysicalDeviceImageRobustnessFeaturesEXT; #define VK_EXT_image_compression_control 1 #define VK_EXT_IMAGE_COMPRESSION_CONTROL_SPEC_VERSION 1 #define VK_EXT_IMAGE_COMPRESSION_CONTROL_EXTENSION_NAME "VK_EXT_image_compression_control" typedef enum VkImageCompressionFlagBitsEXT { VK_IMAGE_COMPRESSION_DEFAULT_EXT = 0, VK_IMAGE_COMPRESSION_FIXED_RATE_DEFAULT_EXT = 0x00000001, VK_IMAGE_COMPRESSION_FIXED_RATE_EXPLICIT_EXT = 0x00000002, VK_IMAGE_COMPRESSION_DISABLED_EXT = 0x00000004, VK_IMAGE_COMPRESSION_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF } VkImageCompressionFlagBitsEXT; typedef VkFlags VkImageCompressionFlagsEXT; typedef enum VkImageCompressionFixedRateFlagBitsEXT { VK_IMAGE_COMPRESSION_FIXED_RATE_NONE_EXT = 0, VK_IMAGE_COMPRESSION_FIXED_RATE_1BPC_BIT_EXT = 0x00000001, VK_IMAGE_COMPRESSION_FIXED_RATE_2BPC_BIT_EXT = 0x00000002, VK_IMAGE_COMPRESSION_FIXED_RATE_3BPC_BIT_EXT = 0x00000004, VK_IMAGE_COMPRESSION_FIXED_RATE_4BPC_BIT_EXT = 0x00000008, VK_IMAGE_COMPRESSION_FIXED_RATE_5BPC_BIT_EXT = 0x00000010, VK_IMAGE_COMPRESSION_FIXED_RATE_6BPC_BIT_EXT = 0x00000020, VK_IMAGE_COMPRESSION_FIXED_RATE_7BPC_BIT_EXT = 0x00000040, VK_IMAGE_COMPRESSION_FIXED_RATE_8BPC_BIT_EXT = 0x00000080, VK_IMAGE_COMPRESSION_FIXED_RATE_9BPC_BIT_EXT = 0x00000100, VK_IMAGE_COMPRESSION_FIXED_RATE_10BPC_BIT_EXT = 0x00000200, VK_IMAGE_COMPRESSION_FIXED_RATE_11BPC_BIT_EXT = 0x00000400, VK_IMAGE_COMPRESSION_FIXED_RATE_12BPC_BIT_EXT = 0x00000800, VK_IMAGE_COMPRESSION_FIXED_RATE_13BPC_BIT_EXT = 0x00001000, VK_IMAGE_COMPRESSION_FIXED_RATE_14BPC_BIT_EXT = 0x00002000, VK_IMAGE_COMPRESSION_FIXED_RATE_15BPC_BIT_EXT = 0x00004000, VK_IMAGE_COMPRESSION_FIXED_RATE_16BPC_BIT_EXT = 0x00008000, VK_IMAGE_COMPRESSION_FIXED_RATE_17BPC_BIT_EXT = 0x00010000, VK_IMAGE_COMPRESSION_FIXED_RATE_18BPC_BIT_EXT = 0x00020000, VK_IMAGE_COMPRESSION_FIXED_RATE_19BPC_BIT_EXT = 0x00040000, VK_IMAGE_COMPRESSION_FIXED_RATE_20BPC_BIT_EXT = 0x00080000, VK_IMAGE_COMPRESSION_FIXED_RATE_21BPC_BIT_EXT = 0x00100000, VK_IMAGE_COMPRESSION_FIXED_RATE_22BPC_BIT_EXT = 0x00200000, VK_IMAGE_COMPRESSION_FIXED_RATE_23BPC_BIT_EXT = 0x00400000, VK_IMAGE_COMPRESSION_FIXED_RATE_24BPC_BIT_EXT = 0x00800000, VK_IMAGE_COMPRESSION_FIXED_RATE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF } VkImageCompressionFixedRateFlagBitsEXT; typedef VkFlags VkImageCompressionFixedRateFlagsEXT; typedef struct VkPhysicalDeviceImageCompressionControlFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 imageCompressionControl; } VkPhysicalDeviceImageCompressionControlFeaturesEXT; typedef struct VkImageCompressionControlEXT { VkStructureType sType; const void* pNext; VkImageCompressionFlagsEXT flags; uint32_t compressionControlPlaneCount; VkImageCompressionFixedRateFlagsEXT* pFixedRateFlags; } VkImageCompressionControlEXT; typedef struct VkSubresourceLayout2EXT { VkStructureType sType; void* pNext; VkSubresourceLayout subresourceLayout; } VkSubresourceLayout2EXT; typedef struct VkImageSubresource2EXT { VkStructureType sType; void* pNext; VkImageSubresource imageSubresource; } VkImageSubresource2EXT; typedef struct VkImageCompressionPropertiesEXT { VkStructureType sType; void* pNext; VkImageCompressionFlagsEXT imageCompressionFlags; VkImageCompressionFixedRateFlagsEXT imageCompressionFixedRateFlags; } VkImageCompressionPropertiesEXT; typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout2EXT)(VkDevice device, VkImage image, const VkImageSubresource2EXT* pSubresource, VkSubresourceLayout2EXT* pLayout); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout2EXT( VkDevice device, VkImage image, const VkImageSubresource2EXT* pSubresource, VkSubresourceLayout2EXT* pLayout); #endif #define VK_EXT_attachment_feedback_loop_layout 1 #define VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_SPEC_VERSION 2 #define VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_EXTENSION_NAME "VK_EXT_attachment_feedback_loop_layout" typedef struct VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 attachmentFeedbackLoopLayout; } VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT; #define VK_EXT_4444_formats 1 #define VK_EXT_4444_FORMATS_SPEC_VERSION 1 #define VK_EXT_4444_FORMATS_EXTENSION_NAME "VK_EXT_4444_formats" typedef struct VkPhysicalDevice4444FormatsFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 formatA4R4G4B4; VkBool32 formatA4B4G4R4; } VkPhysicalDevice4444FormatsFeaturesEXT; #define VK_ARM_rasterization_order_attachment_access 1 #define VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_SPEC_VERSION 1 #define VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME "VK_ARM_rasterization_order_attachment_access" typedef struct VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM { VkStructureType sType; void* pNext; VkBool32 rasterizationOrderColorAttachmentAccess; VkBool32 rasterizationOrderDepthAttachmentAccess; VkBool32 rasterizationOrderStencilAttachmentAccess; } VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM; #define VK_EXT_rgba10x6_formats 1 #define VK_EXT_RGBA10X6_FORMATS_SPEC_VERSION 1 #define VK_EXT_RGBA10X6_FORMATS_EXTENSION_NAME "VK_EXT_rgba10x6_formats" typedef struct VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 formatRgba10x6WithoutYCbCrSampler; } VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT; #define VK_NV_acquire_winrt_display 1 #define VK_NV_ACQUIRE_WINRT_DISPLAY_SPEC_VERSION 1 #define VK_NV_ACQUIRE_WINRT_DISPLAY_EXTENSION_NAME "VK_NV_acquire_winrt_display" typedef VkResult (VKAPI_PTR *PFN_vkAcquireWinrtDisplayNV)(VkPhysicalDevice physicalDevice, VkDisplayKHR display); typedef VkResult (VKAPI_PTR *PFN_vkGetWinrtDisplayNV)(VkPhysicalDevice physicalDevice, uint32_t deviceRelativeId, VkDisplayKHR* pDisplay); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkAcquireWinrtDisplayNV( VkPhysicalDevice physicalDevice, VkDisplayKHR display); VKAPI_ATTR VkResult VKAPI_CALL vkGetWinrtDisplayNV( VkPhysicalDevice physicalDevice, uint32_t deviceRelativeId, VkDisplayKHR* pDisplay); #endif #define VK_VALVE_mutable_descriptor_type 1 #define VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_SPEC_VERSION 1 #define VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME "VK_VALVE_mutable_descriptor_type" typedef struct VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE { VkStructureType sType; void* pNext; VkBool32 mutableDescriptorType; } VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE; typedef struct VkMutableDescriptorTypeListVALVE { uint32_t descriptorTypeCount; const VkDescriptorType* pDescriptorTypes; } VkMutableDescriptorTypeListVALVE; typedef struct VkMutableDescriptorTypeCreateInfoVALVE { VkStructureType sType; const void* pNext; uint32_t mutableDescriptorTypeListCount; const VkMutableDescriptorTypeListVALVE* pMutableDescriptorTypeLists; } VkMutableDescriptorTypeCreateInfoVALVE; #define VK_EXT_vertex_input_dynamic_state 1 #define VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_SPEC_VERSION 2 #define VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME "VK_EXT_vertex_input_dynamic_state" typedef struct VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 vertexInputDynamicState; } VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT; typedef struct VkVertexInputBindingDescription2EXT { VkStructureType sType; void* pNext; uint32_t binding; uint32_t stride; VkVertexInputRate inputRate; uint32_t divisor; } VkVertexInputBindingDescription2EXT; typedef struct VkVertexInputAttributeDescription2EXT { VkStructureType sType; void* pNext; uint32_t location; uint32_t binding; VkFormat format; uint32_t offset; } VkVertexInputAttributeDescription2EXT; typedef void (VKAPI_PTR *PFN_vkCmdSetVertexInputEXT)(VkCommandBuffer commandBuffer, uint32_t vertexBindingDescriptionCount, const VkVertexInputBindingDescription2EXT* pVertexBindingDescriptions, uint32_t vertexAttributeDescriptionCount, const VkVertexInputAttributeDescription2EXT* pVertexAttributeDescriptions); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetVertexInputEXT( VkCommandBuffer commandBuffer, uint32_t vertexBindingDescriptionCount, const VkVertexInputBindingDescription2EXT* pVertexBindingDescriptions, uint32_t vertexAttributeDescriptionCount, const VkVertexInputAttributeDescription2EXT* pVertexAttributeDescriptions); #endif #define VK_EXT_physical_device_drm 1 #define VK_EXT_PHYSICAL_DEVICE_DRM_SPEC_VERSION 1 #define VK_EXT_PHYSICAL_DEVICE_DRM_EXTENSION_NAME "VK_EXT_physical_device_drm" typedef struct VkPhysicalDeviceDrmPropertiesEXT { VkStructureType sType; void* pNext; VkBool32 hasPrimary; VkBool32 hasRender; int64_t primaryMajor; int64_t primaryMinor; int64_t renderMajor; int64_t renderMinor; } VkPhysicalDeviceDrmPropertiesEXT; #define VK_EXT_depth_clip_control 1 #define VK_EXT_DEPTH_CLIP_CONTROL_SPEC_VERSION 1 #define VK_EXT_DEPTH_CLIP_CONTROL_EXTENSION_NAME "VK_EXT_depth_clip_control" typedef struct VkPhysicalDeviceDepthClipControlFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 depthClipControl; } VkPhysicalDeviceDepthClipControlFeaturesEXT; typedef struct VkPipelineViewportDepthClipControlCreateInfoEXT { VkStructureType sType; const void* pNext; VkBool32 negativeOneToOne; } VkPipelineViewportDepthClipControlCreateInfoEXT; #define VK_EXT_primitive_topology_list_restart 1 #define VK_EXT_PRIMITIVE_TOPOLOGY_LIST_RESTART_SPEC_VERSION 1 #define VK_EXT_PRIMITIVE_TOPOLOGY_LIST_RESTART_EXTENSION_NAME "VK_EXT_primitive_topology_list_restart" typedef struct VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 primitiveTopologyListRestart; VkBool32 primitiveTopologyPatchListRestart; } VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT; #define VK_HUAWEI_subpass_shading 1 #define VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION 2 #define VK_HUAWEI_SUBPASS_SHADING_EXTENSION_NAME "VK_HUAWEI_subpass_shading" typedef struct VkSubpassShadingPipelineCreateInfoHUAWEI { VkStructureType sType; void* pNext; VkRenderPass renderPass; uint32_t subpass; } VkSubpassShadingPipelineCreateInfoHUAWEI; typedef struct VkPhysicalDeviceSubpassShadingFeaturesHUAWEI { VkStructureType sType; void* pNext; VkBool32 subpassShading; } VkPhysicalDeviceSubpassShadingFeaturesHUAWEI; typedef struct VkPhysicalDeviceSubpassShadingPropertiesHUAWEI { VkStructureType sType; void* pNext; uint32_t maxSubpassShadingWorkgroupSizeAspectRatio; } VkPhysicalDeviceSubpassShadingPropertiesHUAWEI; typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI)(VkDevice device, VkRenderPass renderpass, VkExtent2D* pMaxWorkgroupSize); typedef void (VKAPI_PTR *PFN_vkCmdSubpassShadingHUAWEI)(VkCommandBuffer commandBuffer); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI( VkDevice device, VkRenderPass renderpass, VkExtent2D* pMaxWorkgroupSize); VKAPI_ATTR void VKAPI_CALL vkCmdSubpassShadingHUAWEI( VkCommandBuffer commandBuffer); #endif #define VK_HUAWEI_invocation_mask 1 #define VK_HUAWEI_INVOCATION_MASK_SPEC_VERSION 1 #define VK_HUAWEI_INVOCATION_MASK_EXTENSION_NAME "VK_HUAWEI_invocation_mask" typedef struct VkPhysicalDeviceInvocationMaskFeaturesHUAWEI { VkStructureType sType; void* pNext; VkBool32 invocationMask; } VkPhysicalDeviceInvocationMaskFeaturesHUAWEI; typedef void (VKAPI_PTR *PFN_vkCmdBindInvocationMaskHUAWEI)(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdBindInvocationMaskHUAWEI( VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout); #endif #define VK_NV_external_memory_rdma 1 typedef void* VkRemoteAddressNV; #define VK_NV_EXTERNAL_MEMORY_RDMA_SPEC_VERSION 1 #define VK_NV_EXTERNAL_MEMORY_RDMA_EXTENSION_NAME "VK_NV_external_memory_rdma" typedef struct VkMemoryGetRemoteAddressInfoNV { VkStructureType sType; const void* pNext; VkDeviceMemory memory; VkExternalMemoryHandleTypeFlagBits handleType; } VkMemoryGetRemoteAddressInfoNV; typedef struct VkPhysicalDeviceExternalMemoryRDMAFeaturesNV { VkStructureType sType; void* pNext; VkBool32 externalMemoryRDMA; } VkPhysicalDeviceExternalMemoryRDMAFeaturesNV; typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryRemoteAddressNV)(VkDevice device, const VkMemoryGetRemoteAddressInfoNV* pMemoryGetRemoteAddressInfo, VkRemoteAddressNV* pAddress); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryRemoteAddressNV( VkDevice device, const VkMemoryGetRemoteAddressInfoNV* pMemoryGetRemoteAddressInfo, VkRemoteAddressNV* pAddress); #endif #define VK_EXT_pipeline_properties 1 #define VK_EXT_PIPELINE_PROPERTIES_SPEC_VERSION 1 #define VK_EXT_PIPELINE_PROPERTIES_EXTENSION_NAME "VK_EXT_pipeline_properties" typedef VkPipelineInfoKHR VkPipelineInfoEXT; typedef struct VkPipelinePropertiesIdentifierEXT { VkStructureType sType; void* pNext; uint8_t pipelineIdentifier[VK_UUID_SIZE]; } VkPipelinePropertiesIdentifierEXT; typedef struct VkPhysicalDevicePipelinePropertiesFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 pipelinePropertiesIdentifier; } VkPhysicalDevicePipelinePropertiesFeaturesEXT; typedef VkResult (VKAPI_PTR *PFN_vkGetPipelinePropertiesEXT)(VkDevice device, const VkPipelineInfoEXT* pPipelineInfo, VkBaseOutStructure* pPipelineProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelinePropertiesEXT( VkDevice device, const VkPipelineInfoEXT* pPipelineInfo, VkBaseOutStructure* pPipelineProperties); #endif #define VK_EXT_multisampled_render_to_single_sampled 1 #define VK_EXT_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_SPEC_VERSION 1 #define VK_EXT_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_EXTENSION_NAME "VK_EXT_multisampled_render_to_single_sampled" typedef struct VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 multisampledRenderToSingleSampled; } VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT; typedef struct VkSubpassResolvePerformanceQueryEXT { VkStructureType sType; void* pNext; VkBool32 optimal; } VkSubpassResolvePerformanceQueryEXT; typedef struct VkMultisampledRenderToSingleSampledInfoEXT { VkStructureType sType; const void* pNext; VkBool32 multisampledRenderToSingleSampledEnable; VkSampleCountFlagBits rasterizationSamples; } VkMultisampledRenderToSingleSampledInfoEXT; #define VK_EXT_extended_dynamic_state2 1 #define VK_EXT_EXTENDED_DYNAMIC_STATE_2_SPEC_VERSION 1 #define VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME "VK_EXT_extended_dynamic_state2" typedef struct VkPhysicalDeviceExtendedDynamicState2FeaturesEXT { VkStructureType sType; void* pNext; VkBool32 extendedDynamicState2; VkBool32 extendedDynamicState2LogicOp; VkBool32 extendedDynamicState2PatchControlPoints; } VkPhysicalDeviceExtendedDynamicState2FeaturesEXT; typedef void (VKAPI_PTR *PFN_vkCmdSetPatchControlPointsEXT)(VkCommandBuffer commandBuffer, uint32_t patchControlPoints); typedef void (VKAPI_PTR *PFN_vkCmdSetRasterizerDiscardEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable); typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBiasEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable); typedef void (VKAPI_PTR *PFN_vkCmdSetLogicOpEXT)(VkCommandBuffer commandBuffer, VkLogicOp logicOp); typedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveRestartEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetPatchControlPointsEXT( VkCommandBuffer commandBuffer, uint32_t patchControlPoints); VKAPI_ATTR void VKAPI_CALL vkCmdSetRasterizerDiscardEnableEXT( VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable); VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBiasEnableEXT( VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable); VKAPI_ATTR void VKAPI_CALL vkCmdSetLogicOpEXT( VkCommandBuffer commandBuffer, VkLogicOp logicOp); VKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveRestartEnableEXT( VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable); #endif #define VK_EXT_color_write_enable 1 #define VK_EXT_COLOR_WRITE_ENABLE_SPEC_VERSION 1 #define VK_EXT_COLOR_WRITE_ENABLE_EXTENSION_NAME "VK_EXT_color_write_enable" typedef struct VkPhysicalDeviceColorWriteEnableFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 colorWriteEnable; } VkPhysicalDeviceColorWriteEnableFeaturesEXT; typedef struct VkPipelineColorWriteCreateInfoEXT { VkStructureType sType; const void* pNext; uint32_t attachmentCount; const VkBool32* pColorWriteEnables; } VkPipelineColorWriteCreateInfoEXT; typedef void (VKAPI_PTR *PFN_vkCmdSetColorWriteEnableEXT)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkBool32* pColorWriteEnables); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetColorWriteEnableEXT( VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkBool32* pColorWriteEnables); #endif #define VK_EXT_primitives_generated_query 1 #define VK_EXT_PRIMITIVES_GENERATED_QUERY_SPEC_VERSION 1 #define VK_EXT_PRIMITIVES_GENERATED_QUERY_EXTENSION_NAME "VK_EXT_primitives_generated_query" typedef struct VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 primitivesGeneratedQuery; VkBool32 primitivesGeneratedQueryWithRasterizerDiscard; VkBool32 primitivesGeneratedQueryWithNonZeroStreams; } VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT; #define VK_EXT_global_priority_query 1 #define VK_EXT_GLOBAL_PRIORITY_QUERY_SPEC_VERSION 1 #define VK_EXT_GLOBAL_PRIORITY_QUERY_EXTENSION_NAME "VK_EXT_global_priority_query" #define VK_MAX_GLOBAL_PRIORITY_SIZE_EXT VK_MAX_GLOBAL_PRIORITY_SIZE_KHR typedef VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT; typedef VkQueueFamilyGlobalPriorityPropertiesKHR VkQueueFamilyGlobalPriorityPropertiesEXT; #define VK_EXT_image_view_min_lod 1 #define VK_EXT_IMAGE_VIEW_MIN_LOD_SPEC_VERSION 1 #define VK_EXT_IMAGE_VIEW_MIN_LOD_EXTENSION_NAME "VK_EXT_image_view_min_lod" typedef struct VkPhysicalDeviceImageViewMinLodFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 minLod; } VkPhysicalDeviceImageViewMinLodFeaturesEXT; typedef struct VkImageViewMinLodCreateInfoEXT { VkStructureType sType; const void* pNext; float minLod; } VkImageViewMinLodCreateInfoEXT; #define VK_EXT_multi_draw 1 #define VK_EXT_MULTI_DRAW_SPEC_VERSION 1 #define VK_EXT_MULTI_DRAW_EXTENSION_NAME "VK_EXT_multi_draw" typedef struct VkPhysicalDeviceMultiDrawFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 multiDraw; } VkPhysicalDeviceMultiDrawFeaturesEXT; typedef struct VkPhysicalDeviceMultiDrawPropertiesEXT { VkStructureType sType; void* pNext; uint32_t maxMultiDrawCount; } VkPhysicalDeviceMultiDrawPropertiesEXT; typedef struct VkMultiDrawInfoEXT { uint32_t firstVertex; uint32_t vertexCount; } VkMultiDrawInfoEXT; typedef struct VkMultiDrawIndexedInfoEXT { uint32_t firstIndex; uint32_t indexCount; int32_t vertexOffset; } VkMultiDrawIndexedInfoEXT; typedef void (VKAPI_PTR *PFN_vkCmdDrawMultiEXT)(VkCommandBuffer commandBuffer, uint32_t drawCount, const VkMultiDrawInfoEXT* pVertexInfo, uint32_t instanceCount, uint32_t firstInstance, uint32_t stride); typedef void (VKAPI_PTR *PFN_vkCmdDrawMultiIndexedEXT)(VkCommandBuffer commandBuffer, uint32_t drawCount, const VkMultiDrawIndexedInfoEXT* pIndexInfo, uint32_t instanceCount, uint32_t firstInstance, uint32_t stride, const int32_t* pVertexOffset); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdDrawMultiEXT( VkCommandBuffer commandBuffer, uint32_t drawCount, const VkMultiDrawInfoEXT* pVertexInfo, uint32_t instanceCount, uint32_t firstInstance, uint32_t stride); VKAPI_ATTR void VKAPI_CALL vkCmdDrawMultiIndexedEXT( VkCommandBuffer commandBuffer, uint32_t drawCount, const VkMultiDrawIndexedInfoEXT* pIndexInfo, uint32_t instanceCount, uint32_t firstInstance, uint32_t stride, const int32_t* pVertexOffset); #endif #define VK_EXT_image_2d_view_of_3d 1 #define VK_EXT_IMAGE_2D_VIEW_OF_3D_SPEC_VERSION 1 #define VK_EXT_IMAGE_2D_VIEW_OF_3D_EXTENSION_NAME "VK_EXT_image_2d_view_of_3d" typedef struct VkPhysicalDeviceImage2DViewOf3DFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 image2DViewOf3D; VkBool32 sampler2DViewOf3D; } VkPhysicalDeviceImage2DViewOf3DFeaturesEXT; #define VK_EXT_load_store_op_none 1 #define VK_EXT_LOAD_STORE_OP_NONE_SPEC_VERSION 1 #define VK_EXT_LOAD_STORE_OP_NONE_EXTENSION_NAME "VK_EXT_load_store_op_none" #define VK_EXT_border_color_swizzle 1 #define VK_EXT_BORDER_COLOR_SWIZZLE_SPEC_VERSION 1 #define VK_EXT_BORDER_COLOR_SWIZZLE_EXTENSION_NAME "VK_EXT_border_color_swizzle" typedef struct VkPhysicalDeviceBorderColorSwizzleFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 borderColorSwizzle; VkBool32 borderColorSwizzleFromImage; } VkPhysicalDeviceBorderColorSwizzleFeaturesEXT; typedef struct VkSamplerBorderColorComponentMappingCreateInfoEXT { VkStructureType sType; const void* pNext; VkComponentMapping components; VkBool32 srgb; } VkSamplerBorderColorComponentMappingCreateInfoEXT; #define VK_EXT_pageable_device_local_memory 1 #define VK_EXT_PAGEABLE_DEVICE_LOCAL_MEMORY_SPEC_VERSION 1 #define VK_EXT_PAGEABLE_DEVICE_LOCAL_MEMORY_EXTENSION_NAME "VK_EXT_pageable_device_local_memory" typedef struct VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 pageableDeviceLocalMemory; } VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT; typedef void (VKAPI_PTR *PFN_vkSetDeviceMemoryPriorityEXT)(VkDevice device, VkDeviceMemory memory, float priority); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkSetDeviceMemoryPriorityEXT( VkDevice device, VkDeviceMemory memory, float priority); #endif #define VK_VALVE_descriptor_set_host_mapping 1 #define VK_VALVE_DESCRIPTOR_SET_HOST_MAPPING_SPEC_VERSION 1 #define VK_VALVE_DESCRIPTOR_SET_HOST_MAPPING_EXTENSION_NAME "VK_VALVE_descriptor_set_host_mapping" typedef struct VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE { VkStructureType sType; void* pNext; VkBool32 descriptorSetHostMapping; } VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE; typedef struct VkDescriptorSetBindingReferenceVALVE { VkStructureType sType; const void* pNext; VkDescriptorSetLayout descriptorSetLayout; uint32_t binding; } VkDescriptorSetBindingReferenceVALVE; typedef struct VkDescriptorSetLayoutHostMappingInfoVALVE { VkStructureType sType; void* pNext; size_t descriptorOffset; uint32_t descriptorSize; } VkDescriptorSetLayoutHostMappingInfoVALVE; typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE)(VkDevice device, const VkDescriptorSetBindingReferenceVALVE* pBindingReference, VkDescriptorSetLayoutHostMappingInfoVALVE* pHostMapping); typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetHostMappingVALVE)(VkDevice device, VkDescriptorSet descriptorSet, void** ppData); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutHostMappingInfoVALVE( VkDevice device, const VkDescriptorSetBindingReferenceVALVE* pBindingReference, VkDescriptorSetLayoutHostMappingInfoVALVE* pHostMapping); VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetHostMappingVALVE( VkDevice device, VkDescriptorSet descriptorSet, void** ppData); #endif #define VK_EXT_non_seamless_cube_map 1 #define VK_EXT_NON_SEAMLESS_CUBE_MAP_SPEC_VERSION 1 #define VK_EXT_NON_SEAMLESS_CUBE_MAP_EXTENSION_NAME "VK_EXT_non_seamless_cube_map" typedef struct VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 nonSeamlessCubeMap; } VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT; #define VK_QCOM_fragment_density_map_offset 1 #define VK_QCOM_FRAGMENT_DENSITY_MAP_OFFSET_SPEC_VERSION 1 #define VK_QCOM_FRAGMENT_DENSITY_MAP_OFFSET_EXTENSION_NAME "VK_QCOM_fragment_density_map_offset" typedef struct VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM { VkStructureType sType; void* pNext; VkBool32 fragmentDensityMapOffset; } VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM; typedef struct VkPhysicalDeviceFragmentDensityMapOffsetPropertiesQCOM { VkStructureType sType; void* pNext; VkExtent2D fragmentDensityOffsetGranularity; } VkPhysicalDeviceFragmentDensityMapOffsetPropertiesQCOM; typedef struct VkSubpassFragmentDensityMapOffsetEndInfoQCOM { VkStructureType sType; const void* pNext; uint32_t fragmentDensityOffsetCount; const VkOffset2D* pFragmentDensityOffsets; } VkSubpassFragmentDensityMapOffsetEndInfoQCOM; #define VK_NV_linear_color_attachment 1 #define VK_NV_LINEAR_COLOR_ATTACHMENT_SPEC_VERSION 1 #define VK_NV_LINEAR_COLOR_ATTACHMENT_EXTENSION_NAME "VK_NV_linear_color_attachment" typedef struct VkPhysicalDeviceLinearColorAttachmentFeaturesNV { VkStructureType sType; void* pNext; VkBool32 linearColorAttachment; } VkPhysicalDeviceLinearColorAttachmentFeaturesNV; #define VK_GOOGLE_surfaceless_query 1 #define VK_GOOGLE_SURFACELESS_QUERY_SPEC_VERSION 1 #define VK_GOOGLE_SURFACELESS_QUERY_EXTENSION_NAME "VK_GOOGLE_surfaceless_query" #define VK_EXT_image_compression_control_swapchain 1 #define VK_EXT_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_SPEC_VERSION 1 #define VK_EXT_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_EXTENSION_NAME "VK_EXT_image_compression_control_swapchain" typedef struct VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 imageCompressionControlSwapchain; } VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT; #define VK_QCOM_image_processing 1 #define VK_QCOM_IMAGE_PROCESSING_SPEC_VERSION 1 #define VK_QCOM_IMAGE_PROCESSING_EXTENSION_NAME "VK_QCOM_image_processing" typedef struct VkImageViewSampleWeightCreateInfoQCOM { VkStructureType sType; const void* pNext; VkOffset2D filterCenter; VkExtent2D filterSize; uint32_t numPhases; } VkImageViewSampleWeightCreateInfoQCOM; typedef struct VkPhysicalDeviceImageProcessingFeaturesQCOM { VkStructureType sType; void* pNext; VkBool32 textureSampleWeighted; VkBool32 textureBoxFilter; VkBool32 textureBlockMatch; } VkPhysicalDeviceImageProcessingFeaturesQCOM; typedef struct VkPhysicalDeviceImageProcessingPropertiesQCOM { VkStructureType sType; void* pNext; uint32_t maxWeightFilterPhases; VkExtent2D maxWeightFilterDimension; VkExtent2D maxBlockMatchRegion; VkExtent2D maxBoxFilterBlockSize; } VkPhysicalDeviceImageProcessingPropertiesQCOM; #define VK_EXT_subpass_merge_feedback 1 #define VK_EXT_SUBPASS_MERGE_FEEDBACK_SPEC_VERSION 2 #define VK_EXT_SUBPASS_MERGE_FEEDBACK_EXTENSION_NAME "VK_EXT_subpass_merge_feedback" typedef enum VkSubpassMergeStatusEXT { VK_SUBPASS_MERGE_STATUS_MERGED_EXT = 0, VK_SUBPASS_MERGE_STATUS_DISALLOWED_EXT = 1, VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SIDE_EFFECTS_EXT = 2, VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SAMPLES_MISMATCH_EXT = 3, VK_SUBPASS_MERGE_STATUS_NOT_MERGED_VIEWS_MISMATCH_EXT = 4, VK_SUBPASS_MERGE_STATUS_NOT_MERGED_ALIASING_EXT = 5, VK_SUBPASS_MERGE_STATUS_NOT_MERGED_DEPENDENCIES_EXT = 6, VK_SUBPASS_MERGE_STATUS_NOT_MERGED_INCOMPATIBLE_INPUT_ATTACHMENT_EXT = 7, VK_SUBPASS_MERGE_STATUS_NOT_MERGED_TOO_MANY_ATTACHMENTS_EXT = 8, VK_SUBPASS_MERGE_STATUS_NOT_MERGED_INSUFFICIENT_STORAGE_EXT = 9, VK_SUBPASS_MERGE_STATUS_NOT_MERGED_DEPTH_STENCIL_COUNT_EXT = 10, VK_SUBPASS_MERGE_STATUS_NOT_MERGED_RESOLVE_ATTACHMENT_REUSE_EXT = 11, VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SINGLE_SUBPASS_EXT = 12, VK_SUBPASS_MERGE_STATUS_NOT_MERGED_UNSPECIFIED_EXT = 13, VK_SUBPASS_MERGE_STATUS_MAX_ENUM_EXT = 0x7FFFFFFF } VkSubpassMergeStatusEXT; typedef struct VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 subpassMergeFeedback; } VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT; typedef struct VkRenderPassCreationControlEXT { VkStructureType sType; const void* pNext; VkBool32 disallowMerging; } VkRenderPassCreationControlEXT; typedef struct VkRenderPassCreationFeedbackInfoEXT { uint32_t postMergeSubpassCount; } VkRenderPassCreationFeedbackInfoEXT; typedef struct VkRenderPassCreationFeedbackCreateInfoEXT { VkStructureType sType; const void* pNext; VkRenderPassCreationFeedbackInfoEXT* pRenderPassFeedback; } VkRenderPassCreationFeedbackCreateInfoEXT; typedef struct VkRenderPassSubpassFeedbackInfoEXT { VkSubpassMergeStatusEXT subpassMergeStatus; char description[VK_MAX_DESCRIPTION_SIZE]; uint32_t postMergeIndex; } VkRenderPassSubpassFeedbackInfoEXT; typedef struct VkRenderPassSubpassFeedbackCreateInfoEXT { VkStructureType sType; const void* pNext; VkRenderPassSubpassFeedbackInfoEXT* pSubpassFeedback; } VkRenderPassSubpassFeedbackCreateInfoEXT; #define VK_EXT_shader_module_identifier 1 #define VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT 32U #define VK_EXT_SHADER_MODULE_IDENTIFIER_SPEC_VERSION 1 #define VK_EXT_SHADER_MODULE_IDENTIFIER_EXTENSION_NAME "VK_EXT_shader_module_identifier" typedef struct VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT { VkStructureType sType; void* pNext; VkBool32 shaderModuleIdentifier; } VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT; typedef struct VkPhysicalDeviceShaderModuleIdentifierPropertiesEXT { VkStructureType sType; void* pNext; uint8_t shaderModuleIdentifierAlgorithmUUID[VK_UUID_SIZE]; } VkPhysicalDeviceShaderModuleIdentifierPropertiesEXT; typedef struct VkPipelineShaderStageModuleIdentifierCreateInfoEXT { VkStructureType sType; const void* pNext; uint32_t identifierSize; const uint8_t* pIdentifier; } VkPipelineShaderStageModuleIdentifierCreateInfoEXT; typedef struct VkShaderModuleIdentifierEXT { VkStructureType sType; void* pNext; uint32_t identifierSize; uint8_t identifier[VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT]; } VkShaderModuleIdentifierEXT; typedef void (VKAPI_PTR *PFN_vkGetShaderModuleIdentifierEXT)(VkDevice device, VkShaderModule shaderModule, VkShaderModuleIdentifierEXT* pIdentifier); typedef void (VKAPI_PTR *PFN_vkGetShaderModuleCreateInfoIdentifierEXT)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, VkShaderModuleIdentifierEXT* pIdentifier); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkGetShaderModuleIdentifierEXT( VkDevice device, VkShaderModule shaderModule, VkShaderModuleIdentifierEXT* pIdentifier); VKAPI_ATTR void VKAPI_CALL vkGetShaderModuleCreateInfoIdentifierEXT( VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, VkShaderModuleIdentifierEXT* pIdentifier); #endif #define VK_QCOM_tile_properties 1 #define VK_QCOM_TILE_PROPERTIES_SPEC_VERSION 1 #define VK_QCOM_TILE_PROPERTIES_EXTENSION_NAME "VK_QCOM_tile_properties" typedef struct VkPhysicalDeviceTilePropertiesFeaturesQCOM { VkStructureType sType; void* pNext; VkBool32 tileProperties; } VkPhysicalDeviceTilePropertiesFeaturesQCOM; typedef struct VkTilePropertiesQCOM { VkStructureType sType; void* pNext; VkExtent3D tileSize; VkExtent2D apronSize; VkOffset2D origin; } VkTilePropertiesQCOM; typedef VkResult (VKAPI_PTR *PFN_vkGetFramebufferTilePropertiesQCOM)(VkDevice device, VkFramebuffer framebuffer, uint32_t* pPropertiesCount, VkTilePropertiesQCOM* pProperties); typedef VkResult (VKAPI_PTR *PFN_vkGetDynamicRenderingTilePropertiesQCOM)(VkDevice device, const VkRenderingInfo* pRenderingInfo, VkTilePropertiesQCOM* pProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetFramebufferTilePropertiesQCOM( VkDevice device, VkFramebuffer framebuffer, uint32_t* pPropertiesCount, VkTilePropertiesQCOM* pProperties); VKAPI_ATTR VkResult VKAPI_CALL vkGetDynamicRenderingTilePropertiesQCOM( VkDevice device, const VkRenderingInfo* pRenderingInfo, VkTilePropertiesQCOM* pProperties); #endif #define VK_SEC_amigo_profiling 1 #define VK_SEC_AMIGO_PROFILING_SPEC_VERSION 1 #define VK_SEC_AMIGO_PROFILING_EXTENSION_NAME "VK_SEC_amigo_profiling" typedef struct VkPhysicalDeviceAmigoProfilingFeaturesSEC { VkStructureType sType; void* pNext; VkBool32 amigoProfiling; } VkPhysicalDeviceAmigoProfilingFeaturesSEC; typedef struct VkAmigoProfilingSubmitInfoSEC { VkStructureType sType; const void* pNext; uint64_t firstDrawTimestamp; uint64_t swapBufferTimestamp; } VkAmigoProfilingSubmitInfoSEC; #define VK_KHR_acceleration_structure 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureKHR) #define VK_KHR_ACCELERATION_STRUCTURE_SPEC_VERSION 13 #define VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME "VK_KHR_acceleration_structure" typedef enum VkBuildAccelerationStructureModeKHR { VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR = 0, VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR = 1, VK_BUILD_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_KHR = 0x7FFFFFFF } VkBuildAccelerationStructureModeKHR; typedef enum VkAccelerationStructureBuildTypeKHR { VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR = 0, VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR = 1, VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR = 2, VK_ACCELERATION_STRUCTURE_BUILD_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF } VkAccelerationStructureBuildTypeKHR; typedef enum VkAccelerationStructureCompatibilityKHR { VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR = 0, VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR = 1, VK_ACCELERATION_STRUCTURE_COMPATIBILITY_MAX_ENUM_KHR = 0x7FFFFFFF } VkAccelerationStructureCompatibilityKHR; typedef enum VkAccelerationStructureCreateFlagBitsKHR { VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = 0x00000001, VK_ACCELERATION_STRUCTURE_CREATE_MOTION_BIT_NV = 0x00000004, VK_ACCELERATION_STRUCTURE_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF } VkAccelerationStructureCreateFlagBitsKHR; typedef VkFlags VkAccelerationStructureCreateFlagsKHR; typedef union VkDeviceOrHostAddressKHR { VkDeviceAddress deviceAddress; void* hostAddress; } VkDeviceOrHostAddressKHR; typedef struct VkAccelerationStructureBuildRangeInfoKHR { uint32_t primitiveCount; uint32_t primitiveOffset; uint32_t firstVertex; uint32_t transformOffset; } VkAccelerationStructureBuildRangeInfoKHR; typedef struct VkAccelerationStructureGeometryTrianglesDataKHR { VkStructureType sType; const void* pNext; VkFormat vertexFormat; VkDeviceOrHostAddressConstKHR vertexData; VkDeviceSize vertexStride; uint32_t maxVertex; VkIndexType indexType; VkDeviceOrHostAddressConstKHR indexData; VkDeviceOrHostAddressConstKHR transformData; } VkAccelerationStructureGeometryTrianglesDataKHR; typedef struct VkAccelerationStructureGeometryAabbsDataKHR { VkStructureType sType; const void* pNext; VkDeviceOrHostAddressConstKHR data; VkDeviceSize stride; } VkAccelerationStructureGeometryAabbsDataKHR; typedef struct VkAccelerationStructureGeometryInstancesDataKHR { VkStructureType sType; const void* pNext; VkBool32 arrayOfPointers; VkDeviceOrHostAddressConstKHR data; } VkAccelerationStructureGeometryInstancesDataKHR; typedef union VkAccelerationStructureGeometryDataKHR { VkAccelerationStructureGeometryTrianglesDataKHR triangles; VkAccelerationStructureGeometryAabbsDataKHR aabbs; VkAccelerationStructureGeometryInstancesDataKHR instances; } VkAccelerationStructureGeometryDataKHR; typedef struct VkAccelerationStructureGeometryKHR { VkStructureType sType; const void* pNext; VkGeometryTypeKHR geometryType; VkAccelerationStructureGeometryDataKHR geometry; VkGeometryFlagsKHR flags; } VkAccelerationStructureGeometryKHR; typedef struct VkAccelerationStructureBuildGeometryInfoKHR { VkStructureType sType; const void* pNext; VkAccelerationStructureTypeKHR type; VkBuildAccelerationStructureFlagsKHR flags; VkBuildAccelerationStructureModeKHR mode; VkAccelerationStructureKHR srcAccelerationStructure; VkAccelerationStructureKHR dstAccelerationStructure; uint32_t geometryCount; const VkAccelerationStructureGeometryKHR* pGeometries; const VkAccelerationStructureGeometryKHR* const* ppGeometries; VkDeviceOrHostAddressKHR scratchData; } VkAccelerationStructureBuildGeometryInfoKHR; typedef struct VkAccelerationStructureCreateInfoKHR { VkStructureType sType; const void* pNext; VkAccelerationStructureCreateFlagsKHR createFlags; VkBuffer buffer; VkDeviceSize offset; VkDeviceSize size; VkAccelerationStructureTypeKHR type; VkDeviceAddress deviceAddress; } VkAccelerationStructureCreateInfoKHR; typedef struct VkWriteDescriptorSetAccelerationStructureKHR { VkStructureType sType; const void* pNext; uint32_t accelerationStructureCount; const VkAccelerationStructureKHR* pAccelerationStructures; } VkWriteDescriptorSetAccelerationStructureKHR; typedef struct VkPhysicalDeviceAccelerationStructureFeaturesKHR { VkStructureType sType; void* pNext; VkBool32 accelerationStructure; VkBool32 accelerationStructureCaptureReplay; VkBool32 accelerationStructureIndirectBuild; VkBool32 accelerationStructureHostCommands; VkBool32 descriptorBindingAccelerationStructureUpdateAfterBind; } VkPhysicalDeviceAccelerationStructureFeaturesKHR; typedef struct VkPhysicalDeviceAccelerationStructurePropertiesKHR { VkStructureType sType; void* pNext; uint64_t maxGeometryCount; uint64_t maxInstanceCount; uint64_t maxPrimitiveCount; uint32_t maxPerStageDescriptorAccelerationStructures; uint32_t maxPerStageDescriptorUpdateAfterBindAccelerationStructures; uint32_t maxDescriptorSetAccelerationStructures; uint32_t maxDescriptorSetUpdateAfterBindAccelerationStructures; uint32_t minAccelerationStructureScratchOffsetAlignment; } VkPhysicalDeviceAccelerationStructurePropertiesKHR; typedef struct VkAccelerationStructureDeviceAddressInfoKHR { VkStructureType sType; const void* pNext; VkAccelerationStructureKHR accelerationStructure; } VkAccelerationStructureDeviceAddressInfoKHR; typedef struct VkAccelerationStructureVersionInfoKHR { VkStructureType sType; const void* pNext; const uint8_t* pVersionData; } VkAccelerationStructureVersionInfoKHR; typedef struct VkCopyAccelerationStructureToMemoryInfoKHR { VkStructureType sType; const void* pNext; VkAccelerationStructureKHR src; VkDeviceOrHostAddressKHR dst; VkCopyAccelerationStructureModeKHR mode; } VkCopyAccelerationStructureToMemoryInfoKHR; typedef struct VkCopyMemoryToAccelerationStructureInfoKHR { VkStructureType sType; const void* pNext; VkDeviceOrHostAddressConstKHR src; VkAccelerationStructureKHR dst; VkCopyAccelerationStructureModeKHR mode; } VkCopyMemoryToAccelerationStructureInfoKHR; typedef struct VkCopyAccelerationStructureInfoKHR { VkStructureType sType; const void* pNext; VkAccelerationStructureKHR src; VkAccelerationStructureKHR dst; VkCopyAccelerationStructureModeKHR mode; } VkCopyAccelerationStructureInfoKHR; typedef struct VkAccelerationStructureBuildSizesInfoKHR { VkStructureType sType; const void* pNext; VkDeviceSize accelerationStructureSize; VkDeviceSize updateScratchSize; VkDeviceSize buildScratchSize; } VkAccelerationStructureBuildSizesInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureKHR)(VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure); typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureKHR)(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator); typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructuresKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructuresIndirectKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkDeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const* ppMaxPrimitiveCounts); typedef VkResult (VKAPI_PTR *PFN_vkBuildAccelerationStructuresKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo); typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureToMemoryKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToAccelerationStructureKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); typedef VkResult (VKAPI_PTR *PFN_vkWriteAccelerationStructuresPropertiesKHR)(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride); typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo); typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureToMemoryKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryToAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetAccelerationStructureDeviceAddressKHR)(VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); typedef void (VKAPI_PTR *PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)(VkDevice device, const VkAccelerationStructureVersionInfoKHR* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility); typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureBuildSizesKHR)(VkDevice device, VkAccelerationStructureBuildTypeKHR buildType, const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureKHR( VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure); VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureKHR( VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator); VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructuresKHR( VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructuresIndirectKHR( VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkDeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const* ppMaxPrimitiveCounts); VKAPI_ATTR VkResult VKAPI_CALL vkBuildAccelerationStructuresKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo); VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureToMemoryKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToAccelerationStructureKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); VKAPI_ATTR VkResult VKAPI_CALL vkWriteAccelerationStructuresPropertiesKHR( VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride); VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureKHR( VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo); VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureToMemoryKHR( VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryToAccelerationStructureKHR( VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetAccelerationStructureDeviceAddressKHR( VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesKHR( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); VKAPI_ATTR void VKAPI_CALL vkGetDeviceAccelerationStructureCompatibilityKHR( VkDevice device, const VkAccelerationStructureVersionInfoKHR* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility); VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureBuildSizesKHR( VkDevice device, VkAccelerationStructureBuildTypeKHR buildType, const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); #endif #define VK_KHR_ray_tracing_pipeline 1 #define VK_KHR_RAY_TRACING_PIPELINE_SPEC_VERSION 1 #define VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME "VK_KHR_ray_tracing_pipeline" typedef enum VkShaderGroupShaderKHR { VK_SHADER_GROUP_SHADER_GENERAL_KHR = 0, VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR = 1, VK_SHADER_GROUP_SHADER_ANY_HIT_KHR = 2, VK_SHADER_GROUP_SHADER_INTERSECTION_KHR = 3, VK_SHADER_GROUP_SHADER_MAX_ENUM_KHR = 0x7FFFFFFF } VkShaderGroupShaderKHR; typedef struct VkRayTracingShaderGroupCreateInfoKHR { VkStructureType sType; const void* pNext; VkRayTracingShaderGroupTypeKHR type; uint32_t generalShader; uint32_t closestHitShader; uint32_t anyHitShader; uint32_t intersectionShader; const void* pShaderGroupCaptureReplayHandle; } VkRayTracingShaderGroupCreateInfoKHR; typedef struct VkRayTracingPipelineInterfaceCreateInfoKHR { VkStructureType sType; const void* pNext; uint32_t maxPipelineRayPayloadSize; uint32_t maxPipelineRayHitAttributeSize; } VkRayTracingPipelineInterfaceCreateInfoKHR; typedef struct VkRayTracingPipelineCreateInfoKHR { VkStructureType sType; const void* pNext; VkPipelineCreateFlags flags; uint32_t stageCount; const VkPipelineShaderStageCreateInfo* pStages; uint32_t groupCount; const VkRayTracingShaderGroupCreateInfoKHR* pGroups; uint32_t maxPipelineRayRecursionDepth; const VkPipelineLibraryCreateInfoKHR* pLibraryInfo; const VkRayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface; const VkPipelineDynamicStateCreateInfo* pDynamicState; VkPipelineLayout layout; VkPipeline basePipelineHandle; int32_t basePipelineIndex; } VkRayTracingPipelineCreateInfoKHR; typedef struct VkPhysicalDeviceRayTracingPipelineFeaturesKHR { VkStructureType sType; void* pNext; VkBool32 rayTracingPipeline; VkBool32 rayTracingPipelineShaderGroupHandleCaptureReplay; VkBool32 rayTracingPipelineShaderGroupHandleCaptureReplayMixed; VkBool32 rayTracingPipelineTraceRaysIndirect; VkBool32 rayTraversalPrimitiveCulling; } VkPhysicalDeviceRayTracingPipelineFeaturesKHR; typedef struct VkPhysicalDeviceRayTracingPipelinePropertiesKHR { VkStructureType sType; void* pNext; uint32_t shaderGroupHandleSize; uint32_t maxRayRecursionDepth; uint32_t maxShaderGroupStride; uint32_t shaderGroupBaseAlignment; uint32_t shaderGroupHandleCaptureReplaySize; uint32_t maxRayDispatchInvocationCount; uint32_t shaderGroupHandleAlignment; uint32_t maxRayHitAttributeSize; } VkPhysicalDeviceRayTracingPipelinePropertiesKHR; typedef struct VkStridedDeviceAddressRegionKHR { VkDeviceAddress deviceAddress; VkDeviceSize stride; VkDeviceSize size; } VkStridedDeviceAddressRegionKHR; typedef struct VkTraceRaysIndirectCommandKHR { uint32_t width; uint32_t height; uint32_t depth; } VkTraceRaysIndirectCommandKHR; typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysKHR)(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth); typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirectKHR)(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress); typedef VkDeviceSize (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupStackSizeKHR)(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader); typedef void (VKAPI_PTR *PFN_vkCmdSetRayTracingPipelineStackSizeKHR)(VkCommandBuffer commandBuffer, uint32_t pipelineStackSize); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysKHR( VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth); VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirectKHR( VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress); VKAPI_ATTR VkDeviceSize VKAPI_CALL vkGetRayTracingShaderGroupStackSizeKHR( VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader); VKAPI_ATTR void VKAPI_CALL vkCmdSetRayTracingPipelineStackSizeKHR( VkCommandBuffer commandBuffer, uint32_t pipelineStackSize); #endif #define VK_KHR_ray_query 1 #define VK_KHR_RAY_QUERY_SPEC_VERSION 1 #define VK_KHR_RAY_QUERY_EXTENSION_NAME "VK_KHR_ray_query" typedef struct VkPhysicalDeviceRayQueryFeaturesKHR { VkStructureType sType; void* pNext; VkBool32 rayQuery; } VkPhysicalDeviceRayQueryFeaturesKHR; #ifdef __cplusplus } #endif #endif ================================================ FILE: deps/vulkan-headers/vulkan/vulkan_ios.h ================================================ #ifndef VULKAN_IOS_H_ #define VULKAN_IOS_H_ 1 /* ** Copyright 2015-2022 The Khronos Group Inc. ** ** SPDX-License-Identifier: Apache-2.0 */ /* ** This header is generated from the Khronos Vulkan XML API Registry. ** */ #ifdef __cplusplus extern "C" { #endif #define VK_MVK_ios_surface 1 #define VK_MVK_IOS_SURFACE_SPEC_VERSION 3 #define VK_MVK_IOS_SURFACE_EXTENSION_NAME "VK_MVK_ios_surface" typedef VkFlags VkIOSSurfaceCreateFlagsMVK; typedef struct VkIOSSurfaceCreateInfoMVK { VkStructureType sType; const void* pNext; VkIOSSurfaceCreateFlagsMVK flags; const void* pView; } VkIOSSurfaceCreateInfoMVK; typedef VkResult (VKAPI_PTR *PFN_vkCreateIOSSurfaceMVK)(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK( VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); #endif #ifdef __cplusplus } #endif #endif ================================================ FILE: deps/vulkan-headers/vulkan/vulkan_macos.h ================================================ #ifndef VULKAN_MACOS_H_ #define VULKAN_MACOS_H_ 1 /* ** Copyright 2015-2022 The Khronos Group Inc. ** ** SPDX-License-Identifier: Apache-2.0 */ /* ** This header is generated from the Khronos Vulkan XML API Registry. ** */ #ifdef __cplusplus extern "C" { #endif #define VK_MVK_macos_surface 1 #define VK_MVK_MACOS_SURFACE_SPEC_VERSION 3 #define VK_MVK_MACOS_SURFACE_EXTENSION_NAME "VK_MVK_macos_surface" typedef VkFlags VkMacOSSurfaceCreateFlagsMVK; typedef struct VkMacOSSurfaceCreateInfoMVK { VkStructureType sType; const void* pNext; VkMacOSSurfaceCreateFlagsMVK flags; const void* pView; } VkMacOSSurfaceCreateInfoMVK; typedef VkResult (VKAPI_PTR *PFN_vkCreateMacOSSurfaceMVK)(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK( VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); #endif #ifdef __cplusplus } #endif #endif ================================================ FILE: deps/vulkan-headers/vulkan/vulkan_metal.h ================================================ #ifndef VULKAN_METAL_H_ #define VULKAN_METAL_H_ 1 /* ** Copyright 2015-2022 The Khronos Group Inc. ** ** SPDX-License-Identifier: Apache-2.0 */ /* ** This header is generated from the Khronos Vulkan XML API Registry. ** */ #ifdef __cplusplus extern "C" { #endif #define VK_EXT_metal_surface 1 #ifdef __OBJC__ @class CAMetalLayer; #else typedef void CAMetalLayer; #endif #define VK_EXT_METAL_SURFACE_SPEC_VERSION 1 #define VK_EXT_METAL_SURFACE_EXTENSION_NAME "VK_EXT_metal_surface" typedef VkFlags VkMetalSurfaceCreateFlagsEXT; typedef struct VkMetalSurfaceCreateInfoEXT { VkStructureType sType; const void* pNext; VkMetalSurfaceCreateFlagsEXT flags; const CAMetalLayer* pLayer; } VkMetalSurfaceCreateInfoEXT; typedef VkResult (VKAPI_PTR *PFN_vkCreateMetalSurfaceEXT)(VkInstance instance, const VkMetalSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateMetalSurfaceEXT( VkInstance instance, const VkMetalSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); #endif #define VK_EXT_metal_objects 1 #ifdef __OBJC__ @protocol MTLDevice; typedef id MTLDevice_id; #else typedef void* MTLDevice_id; #endif #ifdef __OBJC__ @protocol MTLCommandQueue; typedef id MTLCommandQueue_id; #else typedef void* MTLCommandQueue_id; #endif #ifdef __OBJC__ @protocol MTLBuffer; typedef id MTLBuffer_id; #else typedef void* MTLBuffer_id; #endif #ifdef __OBJC__ @protocol MTLTexture; typedef id MTLTexture_id; #else typedef void* MTLTexture_id; #endif typedef struct __IOSurface* IOSurfaceRef; #ifdef __OBJC__ @protocol MTLSharedEvent; typedef id MTLSharedEvent_id; #else typedef void* MTLSharedEvent_id; #endif #define VK_EXT_METAL_OBJECTS_SPEC_VERSION 1 #define VK_EXT_METAL_OBJECTS_EXTENSION_NAME "VK_EXT_metal_objects" typedef enum VkExportMetalObjectTypeFlagBitsEXT { VK_EXPORT_METAL_OBJECT_TYPE_METAL_DEVICE_BIT_EXT = 0x00000001, VK_EXPORT_METAL_OBJECT_TYPE_METAL_COMMAND_QUEUE_BIT_EXT = 0x00000002, VK_EXPORT_METAL_OBJECT_TYPE_METAL_BUFFER_BIT_EXT = 0x00000004, VK_EXPORT_METAL_OBJECT_TYPE_METAL_TEXTURE_BIT_EXT = 0x00000008, VK_EXPORT_METAL_OBJECT_TYPE_METAL_IOSURFACE_BIT_EXT = 0x00000010, VK_EXPORT_METAL_OBJECT_TYPE_METAL_SHARED_EVENT_BIT_EXT = 0x00000020, VK_EXPORT_METAL_OBJECT_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF } VkExportMetalObjectTypeFlagBitsEXT; typedef VkFlags VkExportMetalObjectTypeFlagsEXT; typedef struct VkExportMetalObjectCreateInfoEXT { VkStructureType sType; const void* pNext; VkExportMetalObjectTypeFlagBitsEXT exportObjectType; } VkExportMetalObjectCreateInfoEXT; typedef struct VkExportMetalObjectsInfoEXT { VkStructureType sType; const void* pNext; } VkExportMetalObjectsInfoEXT; typedef struct VkExportMetalDeviceInfoEXT { VkStructureType sType; const void* pNext; MTLDevice_id mtlDevice; } VkExportMetalDeviceInfoEXT; typedef struct VkExportMetalCommandQueueInfoEXT { VkStructureType sType; const void* pNext; VkQueue queue; MTLCommandQueue_id mtlCommandQueue; } VkExportMetalCommandQueueInfoEXT; typedef struct VkExportMetalBufferInfoEXT { VkStructureType sType; const void* pNext; VkDeviceMemory memory; MTLBuffer_id mtlBuffer; } VkExportMetalBufferInfoEXT; typedef struct VkImportMetalBufferInfoEXT { VkStructureType sType; const void* pNext; MTLBuffer_id mtlBuffer; } VkImportMetalBufferInfoEXT; typedef struct VkExportMetalTextureInfoEXT { VkStructureType sType; const void* pNext; VkImage image; VkImageView imageView; VkBufferView bufferView; VkImageAspectFlagBits plane; MTLTexture_id mtlTexture; } VkExportMetalTextureInfoEXT; typedef struct VkImportMetalTextureInfoEXT { VkStructureType sType; const void* pNext; VkImageAspectFlagBits plane; MTLTexture_id mtlTexture; } VkImportMetalTextureInfoEXT; typedef struct VkExportMetalIOSurfaceInfoEXT { VkStructureType sType; const void* pNext; VkImage image; IOSurfaceRef ioSurface; } VkExportMetalIOSurfaceInfoEXT; typedef struct VkImportMetalIOSurfaceInfoEXT { VkStructureType sType; const void* pNext; IOSurfaceRef ioSurface; } VkImportMetalIOSurfaceInfoEXT; typedef struct VkExportMetalSharedEventInfoEXT { VkStructureType sType; const void* pNext; VkSemaphore semaphore; VkEvent event; MTLSharedEvent_id mtlSharedEvent; } VkExportMetalSharedEventInfoEXT; typedef struct VkImportMetalSharedEventInfoEXT { VkStructureType sType; const void* pNext; MTLSharedEvent_id mtlSharedEvent; } VkImportMetalSharedEventInfoEXT; typedef void (VKAPI_PTR *PFN_vkExportMetalObjectsEXT)(VkDevice device, VkExportMetalObjectsInfoEXT* pMetalObjectsInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkExportMetalObjectsEXT( VkDevice device, VkExportMetalObjectsInfoEXT* pMetalObjectsInfo); #endif #ifdef __cplusplus } #endif #endif ================================================ FILE: deps/vulkan-headers/vulkan/vulkan_win32.h ================================================ #ifndef VULKAN_WIN32_H_ #define VULKAN_WIN32_H_ 1 /* ** Copyright 2015-2022 The Khronos Group Inc. ** ** SPDX-License-Identifier: Apache-2.0 */ /* ** This header is generated from the Khronos Vulkan XML API Registry. ** */ #ifdef __cplusplus extern "C" { #endif #define VK_KHR_win32_surface 1 #define VK_KHR_WIN32_SURFACE_SPEC_VERSION 6 #define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface" typedef VkFlags VkWin32SurfaceCreateFlagsKHR; typedef struct VkWin32SurfaceCreateInfoKHR { VkStructureType sType; const void* pNext; VkWin32SurfaceCreateFlagsKHR flags; HINSTANCE hinstance; HWND hwnd; } VkWin32SurfaceCreateInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR( VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex); #endif #define VK_KHR_external_memory_win32 1 #define VK_KHR_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 #define VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_KHR_external_memory_win32" typedef struct VkImportMemoryWin32HandleInfoKHR { VkStructureType sType; const void* pNext; VkExternalMemoryHandleTypeFlagBits handleType; HANDLE handle; LPCWSTR name; } VkImportMemoryWin32HandleInfoKHR; typedef struct VkExportMemoryWin32HandleInfoKHR { VkStructureType sType; const void* pNext; const SECURITY_ATTRIBUTES* pAttributes; DWORD dwAccess; LPCWSTR name; } VkExportMemoryWin32HandleInfoKHR; typedef struct VkMemoryWin32HandlePropertiesKHR { VkStructureType sType; void* pNext; uint32_t memoryTypeBits; } VkMemoryWin32HandlePropertiesKHR; typedef struct VkMemoryGetWin32HandleInfoKHR { VkStructureType sType; const void* pNext; VkDeviceMemory memory; VkExternalMemoryHandleTypeFlagBits handleType; } VkMemoryGetWin32HandleInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleKHR)(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandlePropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleKHR( VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandlePropertiesKHR( VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); #endif #define VK_KHR_win32_keyed_mutex 1 #define VK_KHR_WIN32_KEYED_MUTEX_SPEC_VERSION 1 #define VK_KHR_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_KHR_win32_keyed_mutex" typedef struct VkWin32KeyedMutexAcquireReleaseInfoKHR { VkStructureType sType; const void* pNext; uint32_t acquireCount; const VkDeviceMemory* pAcquireSyncs; const uint64_t* pAcquireKeys; const uint32_t* pAcquireTimeouts; uint32_t releaseCount; const VkDeviceMemory* pReleaseSyncs; const uint64_t* pReleaseKeys; } VkWin32KeyedMutexAcquireReleaseInfoKHR; #define VK_KHR_external_semaphore_win32 1 #define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION 1 #define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME "VK_KHR_external_semaphore_win32" typedef struct VkImportSemaphoreWin32HandleInfoKHR { VkStructureType sType; const void* pNext; VkSemaphore semaphore; VkSemaphoreImportFlags flags; VkExternalSemaphoreHandleTypeFlagBits handleType; HANDLE handle; LPCWSTR name; } VkImportSemaphoreWin32HandleInfoKHR; typedef struct VkExportSemaphoreWin32HandleInfoKHR { VkStructureType sType; const void* pNext; const SECURITY_ATTRIBUTES* pAttributes; DWORD dwAccess; LPCWSTR name; } VkExportSemaphoreWin32HandleInfoKHR; typedef struct VkD3D12FenceSubmitInfoKHR { VkStructureType sType; const void* pNext; uint32_t waitSemaphoreValuesCount; const uint64_t* pWaitSemaphoreValues; uint32_t signalSemaphoreValuesCount; const uint64_t* pSignalSemaphoreValues; } VkD3D12FenceSubmitInfoKHR; typedef struct VkSemaphoreGetWin32HandleInfoKHR { VkStructureType sType; const void* pNext; VkSemaphore semaphore; VkExternalSemaphoreHandleTypeFlagBits handleType; } VkSemaphoreGetWin32HandleInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreWin32HandleKHR)(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreWin32HandleKHR)(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreWin32HandleKHR( VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreWin32HandleKHR( VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); #endif #define VK_KHR_external_fence_win32 1 #define VK_KHR_EXTERNAL_FENCE_WIN32_SPEC_VERSION 1 #define VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME "VK_KHR_external_fence_win32" typedef struct VkImportFenceWin32HandleInfoKHR { VkStructureType sType; const void* pNext; VkFence fence; VkFenceImportFlags flags; VkExternalFenceHandleTypeFlagBits handleType; HANDLE handle; LPCWSTR name; } VkImportFenceWin32HandleInfoKHR; typedef struct VkExportFenceWin32HandleInfoKHR { VkStructureType sType; const void* pNext; const SECURITY_ATTRIBUTES* pAttributes; DWORD dwAccess; LPCWSTR name; } VkExportFenceWin32HandleInfoKHR; typedef struct VkFenceGetWin32HandleInfoKHR { VkStructureType sType; const void* pNext; VkFence fence; VkExternalFenceHandleTypeFlagBits handleType; } VkFenceGetWin32HandleInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkImportFenceWin32HandleKHR)(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); typedef VkResult (VKAPI_PTR *PFN_vkGetFenceWin32HandleKHR)(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceWin32HandleKHR( VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceWin32HandleKHR( VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); #endif #define VK_NV_external_memory_win32 1 #define VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 #define VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_NV_external_memory_win32" typedef struct VkImportMemoryWin32HandleInfoNV { VkStructureType sType; const void* pNext; VkExternalMemoryHandleTypeFlagsNV handleType; HANDLE handle; } VkImportMemoryWin32HandleInfoNV; typedef struct VkExportMemoryWin32HandleInfoNV { VkStructureType sType; const void* pNext; const SECURITY_ATTRIBUTES* pAttributes; DWORD dwAccess; } VkExportMemoryWin32HandleInfoNV; typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleNV)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV( VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle); #endif #define VK_NV_win32_keyed_mutex 1 #define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 2 #define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_NV_win32_keyed_mutex" typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV { VkStructureType sType; const void* pNext; uint32_t acquireCount; const VkDeviceMemory* pAcquireSyncs; const uint64_t* pAcquireKeys; const uint32_t* pAcquireTimeoutMilliseconds; uint32_t releaseCount; const VkDeviceMemory* pReleaseSyncs; const uint64_t* pReleaseKeys; } VkWin32KeyedMutexAcquireReleaseInfoNV; #define VK_EXT_full_screen_exclusive 1 #define VK_EXT_FULL_SCREEN_EXCLUSIVE_SPEC_VERSION 4 #define VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME "VK_EXT_full_screen_exclusive" typedef enum VkFullScreenExclusiveEXT { VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT = 0, VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT = 1, VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT = 2, VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT = 3, VK_FULL_SCREEN_EXCLUSIVE_MAX_ENUM_EXT = 0x7FFFFFFF } VkFullScreenExclusiveEXT; typedef struct VkSurfaceFullScreenExclusiveInfoEXT { VkStructureType sType; void* pNext; VkFullScreenExclusiveEXT fullScreenExclusive; } VkSurfaceFullScreenExclusiveInfoEXT; typedef struct VkSurfaceCapabilitiesFullScreenExclusiveEXT { VkStructureType sType; void* pNext; VkBool32 fullScreenExclusiveSupported; } VkSurfaceCapabilitiesFullScreenExclusiveEXT; typedef struct VkSurfaceFullScreenExclusiveWin32InfoEXT { VkStructureType sType; const void* pNext; HMONITOR hmonitor; } VkSurfaceFullScreenExclusiveWin32InfoEXT; typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); typedef VkResult (VKAPI_PTR *PFN_vkAcquireFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain); typedef VkResult (VKAPI_PTR *PFN_vkReleaseFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain); typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModes2EXT)(VkDevice device, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR* pModes); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModes2EXT( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); VKAPI_ATTR VkResult VKAPI_CALL vkAcquireFullScreenExclusiveModeEXT( VkDevice device, VkSwapchainKHR swapchain); VKAPI_ATTR VkResult VKAPI_CALL vkReleaseFullScreenExclusiveModeEXT( VkDevice device, VkSwapchainKHR swapchain); VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModes2EXT( VkDevice device, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR* pModes); #endif #ifdef __cplusplus } #endif #endif ================================================ FILE: deps/vulkan-headers/vulkan/vulkan_xcb.h ================================================ #ifndef VULKAN_XCB_H_ #define VULKAN_XCB_H_ 1 /* ** Copyright 2015-2022 The Khronos Group Inc. ** ** SPDX-License-Identifier: Apache-2.0 */ /* ** This header is generated from the Khronos Vulkan XML API Registry. ** */ #ifdef __cplusplus extern "C" { #endif #define VK_KHR_xcb_surface 1 #define VK_KHR_XCB_SURFACE_SPEC_VERSION 6 #define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface" typedef VkFlags VkXcbSurfaceCreateFlagsKHR; typedef struct VkXcbSurfaceCreateInfoKHR { VkStructureType sType; const void* pNext; VkXcbSurfaceCreateFlagsKHR flags; xcb_connection_t* connection; xcb_window_t window; } VkXcbSurfaceCreateInfoKHR; typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR( VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id); #endif #ifdef __cplusplus } #endif #endif ================================================ FILE: include/nicegraf-mtl-handles.h ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include "nicegraf.h" #ifdef __cplusplus extern "C" { #endif /** * \ingroup ngf * * Returns a uintptr_t to the underlying MTLTexture. The caller is responsible for casting the return * value to a MTLTexture. * * @param image A handle to a nicegraf image. */ uintptr_t ngf_get_mtl_image_handle(ngf_image image) NGF_NOEXCEPT; /** * \ingroup ngf * * Returns a uintptr_t to the underlying MTLBuffer. The caller is responsible for casting the return * value to a MTLBuffer. * * @param buffer A handle to a nicegraf buffer. */ uintptr_t ngf_get_mtl_buffer_handle(ngf_buffer buffer) NGF_NOEXCEPT; /** * \ingroup ngf * * Returns a uintptr_t to the underlying MTLSamplerState. The caller is responsible for casting the * return value to a MTLSamplerState. * * @param sampler A handle to a nicegraf sampler. */ uintptr_t ngf_get_mtl_sampler_handle(ngf_sampler sampler) NGF_NOEXCEPT; /** * \ingroup ngf * * Returns a uintptr_t to the underlying MTLCommandBuffer. The caller is responsible for casting * the return value to a MTLCommandBuffer. * * @param cmd_buffer A handle to a nicegraf command buffer. */ uintptr_t ngf_get_mtl_cmd_buffer_handle(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT; /** * \ingroup ngf * * Returns a uintptr_t to the underlying MTLRenderCommandEncoder. The caller is responsible for casting * the return value to a MTLRenderCommandEncoder. * * @param cmd_buffer A handle to a nicegraf command buffer. */ uintptr_t ngf_get_mtl_render_encoder_handle(ngf_render_encoder render_encoder) NGF_NOEXCEPT; /** * \ingroup ngf * * Returns a uintptr_t to the underlying MTLBlitCommandEncoder. The caller is responsible for casting * the return value to a MTLBlitCommandEncoder. * * @param cmd_buffer A handle to a nicegraf command buffer. */ uintptr_t ngf_get_mtl_xfer_encoder_handle(ngf_xfer_encoder xfer_encoder) NGF_NOEXCEPT; /** * \ingroup ngf * * Returns a uintptr_t to the underlying MTLComputeCommandEncoder. The caller is responsible for casting * the return value to a MTLComputeCommandEncoder. * * @param cmd_buffer A handle to a nicegraf command buffer. */ uintptr_t ngf_get_mtl_compute_encoder_handle(ngf_compute_encoder compute_encoder) NGF_NOEXCEPT; /** * \ingroup ngf * * Returns a uint32_t representing the underlying MTLPixelFormat. The caller is responsible for casting the return * value to a MTLPixelFormat. * * @param format A nicegraf image format. */ uint32_t ngf_get_mtl_pixel_format_index(ngf_image_format format) NGF_NOEXCEPT; /** * \ingroup ngf * * Returns a uintptr_t to the underlying MTLDevice. The caller is responsible for casting the return value * to a MTLDevice. */ uintptr_t ngf_get_mtl_device() NGF_NOEXCEPT; /** * \ingroup ngf * * Sets the counter sample buffer attachment descriptor to be used by the next compute pass. * * @param cmd_buffer A handle to a nicegraf command buffer. * @param sample_buf_attachment_descriptor uintptr_t to MTLComputePassSampleBufferAttachmentDescriptor handle. */ void ngf_mtl_set_sample_attachment_for_next_compute_pass( ngf_cmd_buffer cmd_buffer, uintptr_t sample_buf_attachment_descriptor ) NGF_NOEXCEPT; /** * \ingroup ngf * * Sets the counter sample buffer attachment descriptor to be used by the next render pass. * * @param cmd_buffer A handle to a nicegraf command buffer. * @param sample_buf_attachment_descriptor uintptr_t to MTLRenderPassSampleBufferAttachmentDescriptor handle. */ void ngf_mtl_set_sample_attachment_for_next_render_pass( ngf_cmd_buffer cmd_buffer, uintptr_t sample_buf_attachment_descriptor ) NGF_NOEXCEPT; #ifdef __cplusplus } #endif ================================================ FILE: include/nicegraf-util.h ================================================ /** * Copyright (c) 2021 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include "nicegraf.h" #include /** * @file * \defgroup ngf_util Utility Library * * This module contains routines and structures that provide auxiliary functionality or help reduce boilerplate. */ #ifdef __cplusplus extern "C" { #endif /** * @struct ngf_util_graphics_pipeline_data * \ingroup ngf_util * * Contains all the data describing a graphics pipeline, with the exception * of shader stages. * * See \ref ngf_util_create_default_graphics_pipeline_data for more details. */ typedef struct ngf_util_graphics_pipeline_data { ngf_graphics_pipeline_info pipeline_info; /**< Can be used to initialize a new pipeline object. */ ngf_depth_stencil_info depth_stencil_info; ngf_vertex_input_info vertex_input_info; ngf_multisample_info multisample_info; ngf_rasterization_info rasterization_info; ngf_input_assembly_info input_assembly_info; ngf_specialization_info spec_info; } ngf_util_graphics_pipeline_data; /** * \ingroup ngf_util * * Creates a configuration for a graphics pipeline object with some pre-set defaults. * * The fields of the members of the resulting \ref ngf_util_graphics_pipeline_data are set such that * they match OpenGL defaults. They can be adjusted later. The pointer fields of \ref * ngf_util_graphics_pipeline_data::pipeline_info are set to point to the corresponding members of * \ref ngf_util_graphics_pipeline_data. * * The only aspect of configuration that this function does not set are the programmable shader stages. After the application code sets those, \ref * ngf_util_graphics_pipeline_data::pipeline_info can be used to create a new pipeline object. * * @param result Pipeline configuration data will be stored here. */ void ngf_util_create_default_graphics_pipeline_data(ngf_util_graphics_pipeline_data* result); /** * \ingroup ngf_util * * Converts a nicegraf error code to a human-readable string. * * @param err The error enum to get the string for. * @return A human-readable error message. */ const char* ngf_util_get_error_name(const ngf_error err); /** * \ingroup ngf_util * * Rounds `value` up to the nearest multiple of `alignment`. */ static inline size_t ngf_util_align_size(size_t value, size_t alignment) { const size_t m = value % alignment; return value + (m > 0 ? (alignment - m) : 0u); } #ifdef __cplusplus } #endif ================================================ FILE: include/nicegraf-vk-handles.h ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include "nicegraf.h" #ifdef __cplusplus extern "C" { #endif /** * \ingroup ngf * * Returns the underlying VkDevice handle cast to uintptr_t. The caller is responsible for casting * the return value to a VkDevice. */ uintptr_t ngf_get_vk_device_handle() NGF_NOEXCEPT; /** * \ingroup ngf * * Returns the underlying VkInstance handle cast to uintptr_t. The caller is responsible for casting * the return value to a VkInstance. */ uintptr_t ngf_get_vk_instance_handle() NGF_NOEXCEPT; /** * \ingroup ngf * * Returns a uintptr_t to the underlying VkImage. The caller is responsible for casting the return * value to a VkImage. * * @param image A handle to a nicegraf image. */ uintptr_t ngf_get_vk_image_handle(ngf_image image) NGF_NOEXCEPT; /** * \ingroup ngf * * Returns a uintptr_t to the underlying VkBuffer. The caller is responsible for casting the return * value to a VkBuffer. * * @param buffer A handle to a nicegraf buffer. */ uintptr_t ngf_get_vk_buffer_handle(ngf_buffer buffer) NGF_NOEXCEPT; /** * \ingroup ngf * * Returns a uintptr_t to the underlying VkCommandBuffer. The caller is responsible for casting * the return value to a VkCommandBuffer. * * @param cmd_buffer A handle to a nicegraf command buffer. */ uintptr_t ngf_get_vk_cmd_buffer_handle(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT; /** * \ingroup ngf * * Returns a uintptr_t to the underlying VkSampler. The caller is responsible for casting the * return value to a VkSampler. * * @param sampler A handle to a nicegraf sampler. */ uintptr_t ngf_get_vk_sampler_handle(ngf_sampler sampler) NGF_NOEXCEPT; /** * \ingroup ngf * * Returns a uint32_t representing the underlying VkFormat. The caller is responsible for casting the return * value to a VkFormat. * * @param format A nicegraf image format. */ uint32_t ngf_get_vk_image_format_index(ngf_image_format format) NGF_NOEXCEPT; #ifdef __cplusplus } #endif ================================================ FILE: include/nicegraf-wrappers.h ================================================ /** * Copyright (c) 2025 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include "nicegraf-util.h" #include "nicegraf.h" #include #include #include /** * @file * \defgroup ngf_wrappers C++ Wrappers * * This module contains optional C++ wrappers for nicegraf structures and routines. * The \ref ngf namespace contains aliases for most types without the `ngf_` prefix * (i.e. `ngf_extent3d` becomes `ngf::extent3d`). * Most functions are wrapped using static inline wrappers. */ namespace ngf { #define NGF_POD_TYPE_ALIAS(name) using name = ngf_##name; #define NGF_OPAQUE_TYPE_ALIAS(name) using unowned_##name = ngf_##name; NGF_POD_TYPE_ALIAS(diagnostic_log_verbosity) NGF_POD_TYPE_ALIAS(diagnostic_message_type) NGF_POD_TYPE_ALIAS(renderdoc_info) NGF_POD_TYPE_ALIAS(diagnostic_callback) NGF_POD_TYPE_ALIAS(diagnostic_info) NGF_POD_TYPE_ALIAS(allocation_callbacks) NGF_POD_TYPE_ALIAS(device_handle) NGF_POD_TYPE_ALIAS(device_performance_tier) NGF_POD_TYPE_ALIAS(init_info) NGF_POD_TYPE_ALIAS(error) NGF_POD_TYPE_ALIAS(irect2d) NGF_POD_TYPE_ALIAS(extent3d) NGF_POD_TYPE_ALIAS(offset3d) NGF_POD_TYPE_ALIAS(stage_type) NGF_POD_TYPE_ALIAS(shader_stage_info) NGF_POD_TYPE_ALIAS(polygon_mode) NGF_POD_TYPE_ALIAS(cull_mode) NGF_POD_TYPE_ALIAS(front_face_mode) NGF_POD_TYPE_ALIAS(rasterization_info) NGF_POD_TYPE_ALIAS(compare_op) NGF_POD_TYPE_ALIAS(stencil_op) NGF_POD_TYPE_ALIAS(stencil_info) NGF_POD_TYPE_ALIAS(depth_stencil_info) NGF_POD_TYPE_ALIAS(blend_factor) NGF_POD_TYPE_ALIAS(blend_op) NGF_POD_TYPE_ALIAS(color_write_mask_bit) NGF_POD_TYPE_ALIAS(blend_info) NGF_POD_TYPE_ALIAS(type) NGF_POD_TYPE_ALIAS(vertex_input_rate) NGF_POD_TYPE_ALIAS(vertex_buf_binding_desc) NGF_POD_TYPE_ALIAS(vertex_attrib_desc) NGF_POD_TYPE_ALIAS(vertex_input_info) NGF_POD_TYPE_ALIAS(sample_count) NGF_POD_TYPE_ALIAS(multisample_info) NGF_POD_TYPE_ALIAS(image_format) NGF_POD_TYPE_ALIAS(attachment_type) NGF_POD_TYPE_ALIAS(attachment_description) NGF_POD_TYPE_ALIAS(attachment_descriptions) NGF_POD_TYPE_ALIAS(primitive_topology) NGF_POD_TYPE_ALIAS(constant_specialization) NGF_POD_TYPE_ALIAS(specialization_info) NGF_POD_TYPE_ALIAS(input_assembly_info) NGF_POD_TYPE_ALIAS(graphics_pipeline_info) NGF_POD_TYPE_ALIAS(compute_pipeline_info) NGF_POD_TYPE_ALIAS(descriptor_type) NGF_POD_TYPE_ALIAS(sampler_filter) NGF_POD_TYPE_ALIAS(sampler_wrap_mode) NGF_POD_TYPE_ALIAS(sampler_info) NGF_POD_TYPE_ALIAS(image_usage) NGF_POD_TYPE_ALIAS(image_type) NGF_POD_TYPE_ALIAS(image_info) NGF_POD_TYPE_ALIAS(cubemap_face) NGF_POD_TYPE_ALIAS(image_ref) NGF_POD_TYPE_ALIAS(image_view_info) NGF_POD_TYPE_ALIAS(clear) NGF_POD_TYPE_ALIAS(attachment_load_op) NGF_POD_TYPE_ALIAS(attachment_store_op) NGF_POD_TYPE_ALIAS(render_pass_info) NGF_POD_TYPE_ALIAS(xfer_pass_info) NGF_POD_TYPE_ALIAS(compute_pass_info) NGF_POD_TYPE_ALIAS(buffer_storage_type) NGF_POD_TYPE_ALIAS(buffer_usage) NGF_POD_TYPE_ALIAS(buffer_info) NGF_POD_TYPE_ALIAS(buffer_slice) NGF_POD_TYPE_ALIAS(texel_buffer_view_info) NGF_POD_TYPE_ALIAS(buffer_bind_info) NGF_POD_TYPE_ALIAS(image_sampler_bind_info) NGF_POD_TYPE_ALIAS(resource_bind_op) NGF_POD_TYPE_ALIAS(present_mode) NGF_POD_TYPE_ALIAS(colorspace) NGF_POD_TYPE_ALIAS(swapchain_info) NGF_POD_TYPE_ALIAS(context_info) NGF_POD_TYPE_ALIAS(cmd_buffer_info) NGF_POD_TYPE_ALIAS(frame_token) NGF_POD_TYPE_ALIAS(device_capabilities) NGF_POD_TYPE_ALIAS(device) NGF_POD_TYPE_ALIAS(image_write) NGF_OPAQUE_TYPE_ALIAS(shader_stage) NGF_OPAQUE_TYPE_ALIAS(graphics_pipeline) NGF_OPAQUE_TYPE_ALIAS(compute_pipeline) NGF_OPAQUE_TYPE_ALIAS(sampler) NGF_OPAQUE_TYPE_ALIAS(image) NGF_OPAQUE_TYPE_ALIAS(image_view) NGF_OPAQUE_TYPE_ALIAS(render_target_info) NGF_OPAQUE_TYPE_ALIAS(render_target) NGF_OPAQUE_TYPE_ALIAS(render_encoder) NGF_OPAQUE_TYPE_ALIAS(compute_encoder) NGF_OPAQUE_TYPE_ALIAS(xfer_encoder) NGF_OPAQUE_TYPE_ALIAS(buffer) NGF_OPAQUE_TYPE_ALIAS(texel_buffer_view) NGF_OPAQUE_TYPE_ALIAS(context) NGF_OPAQUE_TYPE_ALIAS(cmd_buffer) static inline error get_device_list(const device** devices, uint32_t* ndevices) noexcept { return ngf_get_device_list(devices, ndevices); } static inline error initialize(const init_info* init_info) noexcept { return ngf_initialize(init_info); } static inline void shutdown() noexcept { ngf_shutdown(); } static inline error resize_context(unowned_context ctx, uint32_t new_width, uint32_t new_height) noexcept { return ngf_resize_context(ctx, new_width, new_height); } static inline error set_context(unowned_context ctx) noexcept { return ngf_set_context(ctx); } static inline unowned_context get_context() noexcept { return ngf_get_context(); } static inline error begin_frame(frame_token* token) noexcept { return ngf_begin_frame(token); } static inline error end_frame(frame_token token) noexcept { return ngf_end_frame(token); } static inline error get_current_swapchain_image(frame_token token, unowned_image* result) noexcept { return ngf_get_current_swapchain_image(token, result); } static inline const device_capabilities* get_device_capabilities() noexcept { return ngf_get_device_capabilities(); } static inline unowned_render_target default_render_target() noexcept { return ngf_default_render_target(); } static inline const attachment_descriptions* default_render_target_attachment_descs() noexcept { return ngf_default_render_target_attachment_descs(); } static inline void* buffer_map_range(unowned_buffer buf, size_t offset, size_t size) noexcept { return ngf_buffer_map_range(buf, offset, size); } static inline void buffer_flush_range(unowned_buffer buf, size_t offset, size_t size) noexcept { ngf_buffer_flush_range(buf, offset, size); } static inline void buffer_unmap(unowned_buffer buf) noexcept { ngf_buffer_unmap(buf); } static inline void finish() noexcept { ngf_finish(); } static inline error start_cmd_buffer(unowned_cmd_buffer buf, frame_token token) noexcept { return ngf_start_cmd_buffer(buf, token); } static inline error submit_cmd_buffers(uint32_t nbuffers, unowned_cmd_buffer* bufs) noexcept { return ngf_submit_cmd_buffers(nbuffers, bufs); } static inline void cmd_bind_gfx_pipeline(unowned_render_encoder buf, unowned_graphics_pipeline pipeline) noexcept { ngf_cmd_bind_gfx_pipeline(buf, pipeline); } static inline void cmd_bind_compute_pipeline(unowned_compute_encoder buf, unowned_compute_pipeline pipeline) noexcept { ngf_cmd_bind_compute_pipeline(buf, pipeline); } static inline void cmd_viewport(unowned_render_encoder buf, const irect2d* r) noexcept { ngf_cmd_viewport(buf, r); } static inline void cmd_scissor(unowned_render_encoder enc, const irect2d* r) noexcept { ngf_cmd_scissor(enc, r); } static inline void cmd_stencil_reference(unowned_render_encoder enc, uint32_t front, uint32_t back) noexcept { ngf_cmd_stencil_reference(enc, front, back); } static inline void cmd_stencil_compare_mask(unowned_render_encoder enc, uint32_t front, uint32_t back) noexcept { ngf_cmd_stencil_compare_mask(enc, front, back); } static inline void cmd_stencil_write_mask(unowned_render_encoder enc, uint32_t front, uint32_t back) noexcept { ngf_cmd_stencil_write_mask(enc, front, back); } static inline void cmd_set_depth_bias( unowned_render_encoder enc, float const_scale, float slope_scale, float clamp) noexcept { ngf_cmd_set_depth_bias(enc, const_scale, slope_scale, clamp); } static inline void cmd_bind_resources( unowned_render_encoder enc, const resource_bind_op* bind_operations, uint32_t nbind_operations) noexcept { ngf_cmd_bind_resources(enc, bind_operations, nbind_operations); } static inline void cmd_bind_compute_resources( unowned_compute_encoder enc, const resource_bind_op* bind_operations, uint32_t nbind_operations) noexcept { ngf_cmd_bind_compute_resources(enc, bind_operations, nbind_operations); } static inline void cmd_bind_attrib_buffer( unowned_render_encoder enc, unowned_buffer vbuf, uint32_t binding, size_t offset) noexcept { ngf_cmd_bind_attrib_buffer(enc, vbuf, binding, offset); } static inline void cmd_bind_index_buffer( unowned_render_encoder enc, unowned_buffer idxbuf, size_t offset, type index_type) noexcept { ngf_cmd_bind_index_buffer(enc, idxbuf, offset, index_type); } static inline void cmd_draw( unowned_render_encoder enc, bool indexed, uint32_t first_element, uint32_t nelements, uint32_t ninstances) noexcept { ngf_cmd_draw(enc, indexed, first_element, nelements, ninstances); } static inline void cmd_dispatch( unowned_compute_encoder enc, uint32_t x_threadgroups, uint32_t y_threadgroups, uint32_t z_threadgroups) noexcept { ngf_cmd_dispatch(enc, x_threadgroups, y_threadgroups, z_threadgroups); } static inline void cmd_copy_buffer( unowned_xfer_encoder enc, unowned_buffer src, unowned_buffer dst, size_t size, size_t src_offset, size_t dst_offset) noexcept { ngf_cmd_copy_buffer(enc, src, dst, size, src_offset, dst_offset); } static inline void cmd_write_image( unowned_xfer_encoder enc, unowned_buffer src, unowned_image dst, const image_write* writes, uint32_t nwrites) noexcept { ngf_cmd_write_image(enc, src, dst, writes, nwrites); } static inline void cmd_copy_image_to_buffer( unowned_xfer_encoder enc, const image_ref src, offset3d src_offset, extent3d src_extent, uint32_t nlayers, unowned_buffer dst, size_t dst_offset) noexcept { ngf_cmd_copy_image_to_buffer(enc, src, src_offset, src_extent, nlayers, dst, dst_offset); } static inline error cmd_generate_mipmaps(unowned_xfer_encoder xfenc, unowned_image img) noexcept { return ngf_cmd_generate_mipmaps(xfenc, img); } static inline void cmd_begin_debug_group(unowned_cmd_buffer cmd_buffer, const char* name) noexcept { ngf_cmd_begin_debug_group(cmd_buffer, name); } static inline void cmd_end_current_debug_group(unowned_cmd_buffer cmd_buffer) noexcept { ngf_cmd_end_current_debug_group(cmd_buffer); } static inline void renderdoc_capture_next_frame() noexcept { ngf_renderdoc_capture_next_frame(); } static inline void renderdoc_capture_begin() noexcept { ngf_renderdoc_capture_begin(); } static inline void renderdoc_capture_end() noexcept { ngf_renderdoc_capture_end(); } namespace detail { template struct remove_ref { using Type = T; }; template struct remove_ref { using Type = T; }; template struct remove_ref { using Type = T; }; template using remove_ref_t = typename remove_ref::Type; template constexpr T&& fwd(remove_ref_t& x) noexcept { return (T&&)x; } template constexpr T&& fwd(remove_ref_t&& x) noexcept { return (T&&)x; } template constexpr remove_ref_t&& move(T&& x) noexcept { return (remove_ref_t&&)x; } } // namespace detail /** * \ingroup ngf_wrappers * * A convenience macro to allow easily propagating nicegraf errors. The provided expression must * evaluate to a \ref ngf_error. If the result of the expression is not \ref NGF_ERROR_OK, the value * is returned from the calling function. Note: the calling function must also return an \ref * ngf_error. */ #define NGF_RETURN_IF_ERROR(expr) \ { \ const ngf_error tmp = (expr); \ if (tmp != NGF_ERROR_OK) return tmp; \ } /** * \ingroup ngf_wrappers * * A move-only RAII wrapper over nicegraf handles that provides unique ownership semantics. */ template class unique_handle { public: /** Wraps a raw handle to a nicegraf object. */ explicit unique_handle(T raw) : handle_(raw) { } /** Wraps a null handle. */ unique_handle() : handle_(nullptr) { } unique_handle(const unique_handle&) = delete; unique_handle(unique_handle&& other) : handle_(nullptr) { *this = detail::move(other); } /** Disposes of the owned handle, if it is not null. */ ~unique_handle() { destroy_if_necessary(); } unique_handle& operator=(const unique_handle&) = delete; /** Takes ownership of the handle wrapped by another object. */ unique_handle& operator=(unique_handle&& other) noexcept { destroy_if_necessary(); handle_ = other.handle_; other.handle_ = nullptr; return *this; } typedef typename ObjectManagementFuncs::InitType init_type; static unique_handle create(const typename ObjectManagementFuncs::InitType& info, error* err = nullptr) { unique_handle h; auto e = h.initialize(info); if (err) *err = e; return h; } /** Creates a new handle using the provided configuration, and takes ownership of it. */ ngf_error initialize(const typename ObjectManagementFuncs::InitType& info) { destroy_if_necessary(); const ngf_error err = ObjectManagementFuncs::create(&info, &handle_); if (err != NGF_ERROR_OK) handle_ = nullptr; return err; } struct make_result { unique_handle handle; const ngf_error error; }; static make_result make(const init_type& info) { unique_handle handle; const ngf_error error = handle.initialize(info); return make_result {detail::move(handle), error}; } /** @return The raw handle to the wrapped object. */ T get() { return handle_; } /** @return The raw handle to the wrapped object. */ const T get() const { return handle_; } /** * Relinquishes ownership of the wrapped object and returns a raw handle to it. After this call * completes, it is the responsibility of the calling code to dispose of the handle properly when * it is no longer needed. */ T release() { T tmp = handle_; handle_ = nullptr; return tmp; } /** Implicit conversion to the raw handle type. */ operator T() { return handle_; } /** Implicit conversion to the raw handle type. */ operator const T() const { return handle_; } /** * Wraps a raw handle to a nicegraf object. */ void reset(T new_handle) { destroy_if_necessary(); handle_ = new_handle; } private: void destroy_if_necessary() { if (handle_) { ObjectManagementFuncs::destroy(handle_); handle_ = nullptr; } } T handle_; }; #define NGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(name) \ struct ngf_##name##_ManagementFuncs { \ using InitType = ngf_##name##_info; \ static ngf_error create(const InitType* info, ngf_##name* r) { \ return ngf_create_##name(info, r); \ } \ static void destroy(ngf_##name handle) { \ ngf_destroy_##name(handle); \ } \ }; #define NGF_DEFINE_WRAPPER_TYPE(name) \ using name = unique_handle; NGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(shader_stage); NGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(graphics_pipeline); NGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(compute_pipeline); NGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(image); NGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(image_view); NGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(sampler); NGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(render_target); NGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(buffer); NGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(texel_buffer_view); NGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(context); NGF_DEFINE_WRAPPER_MANAGEMENT_FUNCS(cmd_buffer); /** * \ingroup ngf_wrappers * * A RAII wrapper for \ref ngf_shader_stage. */ NGF_DEFINE_WRAPPER_TYPE(shader_stage); /** * \ingroup ngf_wrappers * * A RAII wrapper for \ref ngf_graphics_pipeline. */ NGF_DEFINE_WRAPPER_TYPE(graphics_pipeline); /** * \ingroup ngf_wrappers * * A RAII wrapper for \ref ngf_compute_pipeline. */ NGF_DEFINE_WRAPPER_TYPE(compute_pipeline); /** * \ingroup ngf_wrappers * * A RAII wrapper for \ref unowned_image. */ NGF_DEFINE_WRAPPER_TYPE(image); /** * \ingroup ngf_wrappers * * A RAII wrapper for \ref unowned_image_view. */ NGF_DEFINE_WRAPPER_TYPE(image_view); /** * \ingroup ngf_wrappers * * A RAII wrapper for \ref unowned_sampler. */ NGF_DEFINE_WRAPPER_TYPE(sampler); /** * \ingroup ngf_wrappers * * A RAII wrapper for \ref ngf_render_target. */ NGF_DEFINE_WRAPPER_TYPE(render_target); /** * \ingroup ngf_wrappers * * A RAII wrapper for \ref unowned_buffer. */ NGF_DEFINE_WRAPPER_TYPE(buffer); /** * \ingroup ngf_wrappers * * A RAII wrapper for \ref unowned_texel_buffer_view. */ NGF_DEFINE_WRAPPER_TYPE(texel_buffer_view); /** * \ingroup ngf_wrappers * * A RAII wrapper for \ref ngf_context. */ NGF_DEFINE_WRAPPER_TYPE(context); /** * \ingroup ngf_wrappers * * A RAII wrapper for \ref ngf_cmd_buffer. */ NGF_DEFINE_WRAPPER_TYPE(cmd_buffer); /** * \ingroup ngf_wrappers * * Wraps a render encoder with unique ownership semantics. */ class render_encoder { public: /** * Creates a new render encoder for the given command buffer. Has the same semantics as \ref * ngf_cmd_begin_render_pass. * * @param cmd_buf The command buffer to create a new render encoder for. * @param pass_info Render pass description. */ explicit render_encoder(ngf_cmd_buffer cmd_buf, const ngf_render_pass_info& pass_info) { ngf_cmd_begin_render_pass(cmd_buf, &pass_info, &enc_); } /** * Creates a new render encoder for the given command buffer. Has the same semantics as \ref * ngf_cmd_begin_render_pass_simple. * * @param cmd_buf The command buffer to create a new render encoder for. * @param rt The render target to render into. * @param clear_color_r A floating point number between 0.0 and 1.0 specifying the red component * of the clear color. * @param clear_color_g A floating point number between 0.0 and 1.0 specifying the green component * of the clear color. * @param clear_color_b A floating point number between 0.0 and 1.0 specifying the blue component * of the clear color. * @param clear_color_a A floating point number between 0.0 and 1.0 specifying the alpha component * of the clear color. * @param clear_depth A floating point value to clear the depth attachment to (if the associated * render target has one). * @param clear_stencil An integer value to clear the stencil buffer to (if the assocuated render * taget has one). */ explicit render_encoder( unowned_cmd_buffer cmd_buf, unowned_render_target rt, float clear_color_r, float clear_color_g, float clear_color_b, float clear_color_a, float clear_depth, uint32_t clear_stencil) { ngf_cmd_begin_render_pass_simple( cmd_buf, rt, clear_color_r, clear_color_g, clear_color_b, clear_color_a, clear_depth, clear_stencil, &enc_); } /** * Finishes the wrapped render pass. */ ~render_encoder() { if (enc_.pvt_data_donotuse.d0) ngf_cmd_end_render_pass(enc_); } render_encoder(render_encoder&& other) noexcept { *this = detail::move(other); } render_encoder& operator=(render_encoder&& other) noexcept { enc_ = other.enc_; other.enc_.pvt_data_donotuse.d0 = 0u; other.enc_.pvt_data_donotuse.d1 = 0u; return *this; } render_encoder(const render_encoder&) = delete; render_encoder& operator=(const render_encoder&) = delete; /** * Implicit conversion to \ref unowned_render_encoder. */ operator unowned_render_encoder() { return enc_; } private: unowned_render_encoder enc_ {}; }; /** * \ingroup ngf_wrappers * * Wraps a transfer encoder with unique ownership semantics. */ class xfer_encoder { public: /** * Creates a new transfer encoder for the given command buffer. * * @param cmd_buf The command buffer to create the transfer encoder for. */ explicit xfer_encoder(unowned_cmd_buffer cmd_buf, const xfer_pass_info& pass_info) { ngf_cmd_begin_xfer_pass(cmd_buf, &pass_info, &enc_); } /** * Ends the wrapped transfer pass. */ ~xfer_encoder() { if (enc_.pvt_data_donotuse.d0) ngf_cmd_end_xfer_pass(enc_); } xfer_encoder(xfer_encoder&& other) noexcept { *this = detail::move(other); } xfer_encoder& operator=(xfer_encoder&& other) noexcept { enc_ = other.enc_; other.enc_.pvt_data_donotuse.d0 = 0u; other.enc_.pvt_data_donotuse.d1 = 0u; return *this; } xfer_encoder(const xfer_encoder&) = delete; xfer_encoder& operator=(const xfer_encoder&) = delete; /** * Implicit conversion to \ref ngf_xfer_encoder. */ operator unowned_xfer_encoder() { return enc_; } private: unowned_xfer_encoder enc_; }; /** * \ingroup ngf_wrappers * * Wraps a compute encoder with unique ownership semantics. */ class compute_encoder { public: /** * Creates a new compute encoder for the given command buffer. Has the same semantics as \ref * ngf_cmd_begin_compute_pass. * * @param cmd_buf The command buffer to create a new compute encoder for. */ explicit compute_encoder(ngf_cmd_buffer cmd_buf, const ngf_compute_pass_info& pass_info) { ngf_cmd_begin_compute_pass(cmd_buf, &pass_info, &enc_); } /** * Creates a new compute encoder for the given command buffer that doesn't execute any * synchronization * * @param cmd_buf The command buffer to create a new compute encoder for. */ explicit compute_encoder(ngf_cmd_buffer cmd_buf) { ngf_cmd_begin_compute_pass(cmd_buf, nullptr, &enc_); } /** * Finishes the wrapped compute pass. */ ~compute_encoder() { if (enc_.pvt_data_donotuse.d0) ngf_cmd_end_compute_pass(enc_); } compute_encoder(compute_encoder&& other) noexcept { *this = detail::move(other); } compute_encoder& operator=(compute_encoder&& other) noexcept { enc_ = other.enc_; other.enc_.pvt_data_donotuse.d0 = 0u; other.enc_.pvt_data_donotuse.d1 = 0u; return *this; } compute_encoder(const compute_encoder&) = delete; compute_encoder& operator=(const compute_encoder&) = delete; /** * Implicit conversion to \ref unowned_compute_encoder. */ operator unowned_compute_encoder() { return enc_; } private: unowned_compute_encoder enc_ {}; }; /** * \ingroup ngf_wrappers * * Convenience wrapper for binding resources. See \ref cmd_bind_resources for details. */ template struct descriptor_set { /** * Convenience wrapper for binding resources. See \ref cmd_bind_resources for details. */ template struct binding { /** * Creates a \ref resource_bind_op for a \ref unowned_image. * * @param image The image to bind. * @param array_index If the descriptor is an array, specifies the index of the array element to * bind the object to. */ static resource_bind_op texture(const unowned_image image, uint32_t array_index = 0u) { resource_bind_op op; op.type = NGF_DESCRIPTOR_IMAGE; op.target_binding = B; op.target_set = S; op.info.image_sampler.is_image_view = false; op.info.image_sampler.resource.image = image; op.array_index = array_index; return op; } /** * Creates a \ref resource_bind_op for an \ref unowned_image that is to be used as a storage * image * * @param image The image to bind. */ static resource_bind_op storage_image(const unowned_image image, uint32_t array_index = 0u) { resource_bind_op op; op.type = NGF_DESCRIPTOR_STORAGE_IMAGE; op.target_binding = B; op.target_set = S; op.info.image_sampler.is_image_view = false; op.info.image_sampler.resource.image = image; op.array_index = array_index; return op; } /** * Creates a \ref resource_bind_op for a \ref unowned_image_view. * * @param view The view to bind. * @param array_index If the descriptor is an array, specifies the index of the array element to * bind the object to. */ static resource_bind_op texture(const unowned_image_view view, uint32_t array_index = 0u) { resource_bind_op op; op.type = NGF_DESCRIPTOR_IMAGE; op.target_binding = B; op.target_set = S; op.info.image_sampler.is_image_view = true; op.info.image_sampler.resource.view = view; op.array_index = array_index; return op; } /** * Creates a \ref resource_bind_op for an \ref unowned_image_view that is to be used as a * storage image * * @param image The image to bind. */ static resource_bind_op storage_image(const unowned_image_view view, uint32_t array_index = 0u) { resource_bind_op op; op.type = NGF_DESCRIPTOR_STORAGE_IMAGE; op.target_binding = B; op.target_set = S; op.info.image_sampler.is_image_view = true; op.info.image_sampler.resource.view = view; op.array_index = array_index; return op; } /** * Creates a \ref resource_bind_op for an storage buffer. * * @param buf The buffer to bind as a storage buffer. * @param offset The offset at which to bind the buffer. * @param range The extent of the bound memory. */ static resource_bind_op storage_buffer( const unowned_buffer buf, size_t offset, size_t range, uint32_t array_index = 0u) { resource_bind_op op; op.type = NGF_DESCRIPTOR_STORAGE_BUFFER; op.target_binding = B; op.target_set = S; op.info.buffer.buffer = buf; op.info.buffer.offset = offset; op.info.buffer.range = range; op.array_index = array_index; return op; } /** * Creates a \ref resource_bind_op for an uniform buffer. * * @param buf The buffer to bind as a uniform buffer. * @param offset The offset at which to bind the buffer. * @param range The extent of the bound memory. */ static resource_bind_op uniform_buffer( const unowned_buffer buf, size_t offset, size_t range, uint32_t array_index = 0u) { resource_bind_op op; op.type = NGF_DESCRIPTOR_UNIFORM_BUFFER; op.target_binding = B; op.target_set = S; op.info.buffer.buffer = buf; op.info.buffer.offset = offset; op.info.buffer.range = range; op.array_index = array_index; return op; } /** * Creates a \ref resource_bind_op for a texel buffer. * * @param buf The buffer to bind as a texel buffer. * @param offset The offset at which to bind the buffer. * @param range The extent of the bound memory. * @param fmt The texel format expected by the shader. */ static resource_bind_op texel_buffer(const unowned_texel_buffer_view buf_view, uint32_t array_index = 0u) { resource_bind_op op; op.type = NGF_DESCRIPTOR_TEXEL_BUFFER; op.target_binding = B; op.target_set = S; op.info.texel_buffer_view = buf_view; op.array_index = array_index; return op; } /** * Creates a \ref resource_bind_op for a sampler. * * @param sampler The sampler to use. */ static resource_bind_op sampler(const unowned_sampler sampler, uint32_t array_index = 0u) { resource_bind_op op; op.type = NGF_DESCRIPTOR_SAMPLER; op.target_binding = B; op.target_set = S; op.info.image_sampler.sampler = sampler; op.array_index = array_index; return op; } /** * Creates a \ref resource_bind_op for a combined image + sampler. * * @param image The image part of the combined image + sampler. * @param sampler The sampler part of the combined image + sampler. */ static resource_bind_op texture_and_sampler( const unowned_image image, const unowned_sampler sampler, uint32_t array_index = 0u) { resource_bind_op op; op.type = NGF_DESCRIPTOR_IMAGE_AND_SAMPLER; op.target_binding = B; op.target_set = S; op.info.image_sampler.is_image_view = false; op.info.image_sampler.resource.image = image; op.info.image_sampler.sampler = sampler; op.array_index = array_index; return op; } }; }; /** * \ingroup ngf_wrappers * * A convenience function for binding many resources at once to the shader. Example usage: * * ``` * ngf::cmd_bind_resources(your_render_encoder, * ngf::descriptor_set<0>::binding<0>::image(your_image), * ngf::descriptor_set<0>::binding<1>::sampler(your_sampler), * ngf::descriptor_set<1>::binding<0>::uniform_buffer(your_buffer)); * ``` */ template void cmd_bind_resources(unowned_render_encoder enc, const Args&&... args) { const resource_bind_op ops[] = {detail::fwd(args)...}; ngf_cmd_bind_resources(enc, ops, sizeof(ops) / sizeof(resource_bind_op)); } /** * \ingroup ngf_wrappers * * A convenience function for binding many resources at once to the shader. Example usage: * * ``` * ngf::cmd_bind_resources(your_compute_encoder, * ngf::descriptor_set<0>::binding<0>::image(your_image), * ngf::descriptor_set<0>::binding<1>::sampler(your_sampler), * ngf::descriptor_set<1>::binding<0>::uniform_buffer(your_buffer)); * ``` * */ template void cmd_bind_resources(unowned_compute_encoder enc, const Args&&... args) { const resource_bind_op ops[] = {detail::fwd(args)...}; ngf_cmd_bind_compute_resources(enc, ops, sizeof(ops) / sizeof(resource_bind_op)); } /** * \ingroup ngf_wrappers * * A convenience class for dynamically updated structured uniform data. */ template class uniform_multibuffer { public: uniform_multibuffer() = default; uniform_multibuffer(uniform_multibuffer&& other) { *this = detail::move(other); } uniform_multibuffer(const uniform_multibuffer&) = delete; uniform_multibuffer& operator=(uniform_multibuffer&& other) = default; uniform_multibuffer& operator=(const uniform_multibuffer&) = delete; ngf_error initialize(const uint32_t frames) { const size_t alignment = ngf_get_device_capabilities()->uniform_buffer_offset_alignment; const size_t aligned_size = ngf_util_align_size(sizeof(T), alignment); NGF_RETURN_IF_ERROR(buf_.initialize(buffer_info { aligned_size * frames, NGF_BUFFER_STORAGE_HOST_WRITEABLE, NGF_BUFFER_USAGE_UNIFORM_BUFFER})); nframes_ = frames; aligned_per_frame_size_ = aligned_size; return NGF_ERROR_OK; } void write(const T& data) { current_offset_ = (frame_)*aligned_per_frame_size_; void* mapped_buf = ngf_buffer_map_range(buf_.get(), current_offset_, aligned_per_frame_size_); memcpy(mapped_buf, (void*)&data, sizeof(T)); ngf_buffer_flush_range(buf_.get(), 0, aligned_per_frame_size_); ngf_buffer_unmap(buf_.get()); frame_ = (frame_ + 1u) % nframes_; } resource_bind_op bind_op_at_current_offset( uint32_t set, uint32_t binding, size_t additional_offset = 0, size_t range = 0) const { resource_bind_op op {}; op.type = NGF_DESCRIPTOR_UNIFORM_BUFFER; op.target_binding = binding; op.target_set = set; op.info.buffer.buffer = buf_.get(); op.info.buffer.offset = current_offset_ + additional_offset; op.info.buffer.range = (range == 0) ? aligned_per_frame_size_ : range; return op; } private: buffer buf_; uint32_t frame_ = 0; size_t current_offset_ = 0; size_t aligned_per_frame_size_ = 0; uint32_t nframes_ = 0; }; } // namespace ngf ================================================ FILE: include/nicegraf.h ================================================ /** * Copyright (c) 2025 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ /** * @file * @brief nicegraf declarations. * * This file contains the core nicegraf API declarations. */ /** * \mainpage Reference Documentation * * These pages contain documentation automatically generated from nicegraf's * source code comments. The text's purpose is to concisely describe the intended * behavior and failure modes of the API. * * If viewing this document in a web browser or a PDF viewer, click one of the * following links to proceed to the documentation for the corresponding module. * * - \ref ngf * - \ref ngf_util * - \ref ngf_wrappers */ /** * \defgroup ngf Core C API * This section contains documentation for the core nicegraf routines, * structures and enumerations. * * \subsection core-remarks General Remarks * * - The library is currently not intended to be linked dynamically. * * - When nicegraf's C headers are included from C++, all global functions * within them are automatically declared to have C linkage. Additionally, * they are declared to be noexcept. * * \subsection object-model Objects * * nicegraf objects, such as images, buffers, render targets, etc., are * represented using opaque handles. The objects are constructed and destroyed * explicitly by the application, and it is the responsibility of the * application to ensure that the order of destruction is correct. * For applications written in C++, a set of wrappers that automate object * lifetime management is available. See \ref ngf_wrappers for details. * * \subsection error-reporting Error Reporting * * Most nicegraf routines report their completion status by returning an * \ref ngf_error, and write their results to out-parameters. The returned value * is a generic error code. Detailed, human-readable information about errors * may vary from platform to platform; nicegraf reports it by invoking a * user-provided callback function (see \ref ngf_diagnostic_info). The callback * function must accept the diagnostic message type (see * \ref ngf_diagnostic_message_type), an arbitrary void pointer (the value of * which the user may specify when providing the callback), a printf-style * format string, and an arbitrary number of arguments specifying the data for * the format-string. * * \subsection host-memory-management Host Memory Management * * By default, nicegraf uses the standard malloc/free to manage host memory for * internal purposes. The client may override this behavior by supplying custom * memory allocation callbacks (see \ref ngf_allocation_callbacks). * * \subsection gpu-memory-management GPU Memory Management * * nicegraf internally manages GPU memory for all backends. It is currently not * possible for clients to override this behavior and do their own GPU memory * management. * */ #pragma once #include #include #ifdef __cplusplus extern "C" { #define NGF_NOEXCEPT noexcept #else #include #define NGF_NOEXCEPT #endif #define NGF_VER_MAJ 0 #define NGF_VER_MIN 0 #ifdef _MSC_VER #pragma region ngf_type_declarations #endif /** * @enum ngf_diagnostic_log_verbosity * \ingroup ngf * Verbosity levels for the diagnostic message log. */ typedef enum ngf_diagnostic_log_verbosity { /** * \ingroup ngf * Normal level, reports only severe errors. */ NGF_DIAGNOSTICS_VERBOSITY_DEFAULT, /** * \ingroup ngf * Recommended for debug builds, may induce performance overhead. */ NGF_DIAGNOSTICS_VERBOSITY_DETAILED } ngf_diagnostic_log_verbosity; /** * @enum ngf_diagnostic_message_type * \ingroup ngf * Type of a diagnostic log entry. */ typedef enum ngf_diagnostic_message_type { /** * \ingroup ngf * Informational message, not actionable. */ NGF_DIAGNOSTIC_INFO, /** * \ingroup ngf * Message warns of a potential issue with an API call.*/ NGF_DIAGNOSTIC_WARNING, /** * \ingroup ngf * Message provides details of an API call failure or a severe performance issue. */ NGF_DIAGNOSTIC_ERROR } ngf_diagnostic_message_type; /** * @struct ngf_renderdoc_info * * Information for initializing the RenderDoc API. */ typedef struct ngf_renderdoc_info { /** * Relaitve (to process) or absolute path to RenderDoc library. If this string is NULL, * RenderDoc will not be initialized. */ const char* renderdoc_lib_path; /** * Template for how RenderDoc captures are saved. If template is "example/capture", captures will * be saved as "example/capture_1234.rdc". */ const char* renderdoc_destination_template; } ngf_renderdoc_info; /** * The diagnostic callback function type. */ typedef void (*ngf_diagnostic_callback)(ngf_diagnostic_message_type, void*, const char*, ...); /** * @struct ngf_diagnostic_info * \ingroup ngf * Diagnostic configuration. */ typedef struct ngf_diagnostic_info { ngf_diagnostic_log_verbosity verbosity; /**< Diagnostic log verbosity. */ void* userdata; /**< Arbitrary pointer that will be passed as-is to the callback. */ ngf_diagnostic_callback callback; /**< Pointer to the diagnostic message callback function.*/ bool enable_debug_groups; /**< Indicates whether to enable debug group functionality. See \ref ngf_cmd_begin_debug_group for details.*/ } ngf_diagnostic_info; /** * @struct ngf_allocation_callbacks * \ingroup ngf * Specifies host memory allocation callbacks for the library's internal needs. */ typedef struct ngf_allocation_callbacks { /** * This callback shall allocate a region of memory that is able to fit `nobjs` objects * of size `obj_size`, and return a pointer to the allocated region. * The starting address of the allocated region shall have the largest alignment for the * target platform. */ void* (*allocate)(size_t obj_size, size_t nobjs, void* userdata); /** * This callback shall free a region allocated by the custom allocator. The count * and size of objects in the region are supplied as additional parameters. */ void (*free)(void* ptr, size_t obj_size, size_t nobjs, void* userdata); /** * An arbitrary pointer that will be passed as-is to the allocate and free callbacks. */ void* userdata; } ngf_allocation_callbacks; /** * @typedef ngf_device_handle * \ingroup ngf * * A handle that uniquely identifies a rendering device. * * Note that the value of the handle corresponding to the same exact physical device may be * different across different instances of the same client. In other words, if the client * application shuts down, then starts up again, it may get different values for device handles than * it did before. Therefore, device handles should not be persisted. \ingroup ngf */ typedef uint32_t ngf_device_handle; /** * @enum ngf_device_performance_tier * Enumerates different types of rendering devices. * \ingroup ngf */ typedef enum ngf_device_performance_tier { /** \ingroup ngf * For high-performance devices, such as discrete GPU. */ NGF_DEVICE_PERFORMANCE_TIER_HIGH = 0, /** \ingroup ngf * For low-power integrated GPUs, software rendering, etc. */ NGF_DEVICE_PERFORMANCE_TIER_LOW, /** \ingroup ngf * The specific performance profile is unknown. */ NGF_DEVICE_PERFORMANCE_TIER_UNKNOWN, NGF_DEVICE_PERFORMANCE_TIER_COUNT } ngf_device_performance_tier; /** * @struct ngf_init_info * nicegraf initialization parameters. * See also: \ref ngf_initialize. */ typedef struct ngf_init_info { /** * Pointer to a structure containing a diagnostic log configuration. * If this pointer is set to `NULL`, no diagnostic callback shall be invoked. */ const ngf_diagnostic_info* diag_info; /** * Pointer to a structure specifying custom allocation callbacks, which the library * shall use to manage CPU memory for internal use. * If this pointer is set to `NULL`, standard malloc and free are used. */ const ngf_allocation_callbacks* allocation_callbacks; /** * Handle for the rendering device that nicegraf shall execute rendering commands on. * A list of available device and their handles can be obtained with \ref ngf_enumerate_devices. */ ngf_device_handle device; /** * Pointer to a structure containing RenderDoc API configuration. * If this pointer is set to `NULL`, the RenderDoc API will not be initialized. */ const ngf_renderdoc_info* renderdoc_info; } ngf_init_info; /** * @enum ngf_error * \ingroup ngf * Enumerates the error codes that nicegraf routines may return. * See also \ref error-reporting. */ typedef enum ngf_error { /** \ingroup ngf * No error, operation finished successfully. */ NGF_ERROR_OK = 0, /** \ingroup ngf * Host memory allocation failed. */ NGF_ERROR_OUT_OF_MEM, /** \ingroup ngf * A call to the backend API that was * supposed to create an object failed.*/ NGF_ERROR_OBJECT_CREATION_FAILED, /** \ingroup ngf * The operation would have resulted in an out of * bounds access. */ NGF_ERROR_OUT_OF_BOUNDS, /** \ingroup ngf * A format enumerator provided as part of an argument to the call is not valid in that context. */ NGF_ERROR_INVALID_FORMAT, /** \ingroup ngf * A size passed as part of an argument to the call is either too large or too small.*/ NGF_ERROR_INVALID_SIZE, /** \ingroup ngf * An enumerator passed as part of an argument to the call is not valid in that context.*/ NGF_ERROR_INVALID_ENUM, /** * \ingroup ngf */ NGF_ERROR_INVALID_OPERATION, /** \ingroup ngf * The routine did not complete successfully. */ NGF_ERROR_OPERATION_FAILED, /*..add new errors above this line */ } ngf_error; /** * @struct ngf_irect2d * \ingroup ngf * Represents a rectangular, axis-aligned 2D region with integer coordinates. */ typedef struct ngf_irect2d { int32_t x; /**< X coord of lower-left corner. */ int32_t y; /**< Y coord of lower-left corner. */ uint32_t width; /**< The size of the rectangle along the x-axis. */ uint32_t height; /**< The size of the rectangle along the y-axis. */ } ngf_irect2d; /** * @struct ngf_extent3d * \ingroup ngf * Represents a rectangular, axis-aligned 3D volume. */ typedef struct ngf_extent3d { uint32_t width; /**< The size of the volume along the x-axis. */ uint32_t height; /**< The size of the volume along the y-axis. */ uint32_t depth; /**< The size of the volume along he z-axis. */ } ngf_extent3d; /** * @struct ngf_offset3d * \ingroup ngf * Three-dimensional offset. */ typedef struct ngf_offset3d { int32_t x; /**< Offset along the x-axis. */ int32_t y; /**< Offset along the y-axis. */ int32_t z; /**< Offset along the z-axis. */ } ngf_offset3d; /** * @enum ngf_stage_type * \ingroup ngf * Shader stage types. * Note that some back-ends might not support all of these. */ typedef enum ngf_stage_type { /** \ingroup ngf * Indicates the vertex processing stage. */ NGF_STAGE_VERTEX = 0, /** \ingroup ngf * Indicates the fragment processing stage. */ NGF_STAGE_FRAGMENT, /** \ingroup ngf * Indicates the compute stage. */ NGF_STAGE_COMPUTE, NGF_STAGE_COUNT } ngf_stage_type; /** * @struct ngf_shader_stage_info * \ingroup ngf * * Describes a programmable shader stage. */ typedef struct ngf_shader_stage_info { ngf_stage_type type; /**< Stage type (vert/frag/etc.) */ /** * This shall be a pointer to a memory buffer containing the code for * the shader stage. * * The specific contents of the buffer depend on which backend nicegraf * is being used with: * - for the Vulkan backend, nicegraf expects the SPIR-V bytecode for the shader stage. * - for the Metal backend, nicegraf expects the source code for the shader stage in the Metal * Shading Language. * * Additionally, the Metal backend expects the code to contain a special comment, mapping all * pairs to the native Metal argument table slots. The comment shall * be a C-style block comment - beginning with a forward slash, followed by an asterisk - * containing the following word: * * ``` * NGF_NATIVE_BINDING_MAP * ``` * * followed by a newline character. * * Each of the following lines until the end of the comment shall have the following format: * * ``` * (s b) : m * ``` * * where `s` is the set number, `b` is the binding number within the set, and `m` is the index * of the corresponding resource in Metal's argument table. * * For example, let's say the Metal shader refers to index 3 in the texture argument table. * Adding the following line to the binding map comment * * ``` * (0 1) : 3 * ``` * * would tell the nicegraf metal backend to use the third slot of the texture argument table when * an image is bound to set 0, binding 1 using \ref ngf_cmd_bind_resources. * * When compiling HLSL shaders using nicegraf-shaderc, the comment with the binding map is * generated automatically. */ const void* content; /** The number of bytes in the \ref ngf_shader_stage_info::content buffer. */ uint32_t content_length; const char* debug_name; /**< Optional name, will appear in debug logs, may be NULL.*/ const char* entry_point_name; /**< Entry point name for this shader stage. */ } ngf_shader_stage_info; /** * @struct ngf_shader_stage * \ingroup ngf * * An opaque handle to a programmable stage of the rendering pipeline. * * Programmable stages are specified using backend-specific blobs of * data, as described in the documentation for \ref ngf_shader_stage_info::content. * * On platforms that require a compilation step at runtime, details about * compile errors are reported via the debug callback mechanism. * * Shader stage objects are necessary for creating \ref ngf_graphics_pipeline objects, but once * the pipelines have been created, the shader stages that had been used to create * them can safely be disposed of. * * See also: \ref ngf_shader_stage_info, \ref ngf_create_shader_stage, \ref * ngf_destroy_shader_stage. */ typedef struct ngf_shader_stage_t* ngf_shader_stage; /** * @enum ngf_polygon_mode * \ingroup ngf * * Enumerates ways to draw polygons. * See also \ref ngf_rasterization_info. */ typedef enum ngf_polygon_mode { /** \ingroup ngf * Fill the entire polyoon.*/ NGF_POLYGON_MODE_FILL = 0, /** \ingroup ngf * Outline only.*/ NGF_POLYGON_MODE_LINE, /** \ingroup ngf * Vertices only.*/ NGF_POLYGON_MODE_POINT, NGF_POLYGON_MODE_COUNT } ngf_polygon_mode; /** * @enum ngf_cull_mode * \ingroup ngf * * Enumerates polygon culling strategies. * See also \ref ngf_rasterization_info. */ typedef enum ngf_cull_mode { /** \ingroup ngf * Cull back-facing polygons. */ NGF_CULL_MODE_BACK = 0, /** \ingroup ngf * Cull front-facing polygons. */ NGF_CULL_MODE_FRONT, /** \ingroup ngf * Cull all polygons. */ NGF_CULL_MODE_FRONT_AND_BACK, /** \ingroup ngf * Do not cull anything. */ NGF_CULL_MODE_NONE, NGF_CULL_MODE_COUNT } ngf_cull_mode; /** * @enum ngf_front_face_mode * \ingroup ngf * Enumerates possible vertex winding orders, which are used to decide which * polygons are front- or back-facing. * See also \ref ngf_rasterization_info. */ typedef enum ngf_front_face_mode { /** \ingroup ngf * Polygons with vertices in counter-clockwise order are considered front-facing. */ NGF_FRONT_FACE_COUNTER_CLOCKWISE = 0, /** \ingroup ngf * Polygons with vertices in clockwise order are considered front-facing. */ NGF_FRONT_FACE_CLOCKWISE, NGF_FRONT_FACE_COUNT } ngf_front_face_mode; /** * @struct ngf_rasterization_info * \ingroup ngf * Rasterization stage parameters. */ typedef struct ngf_rasterization_info { bool discard; /**< Enable/disable rasterizer discard. Use this in pipelines that don't write any fragment data.*/ ngf_polygon_mode polygon_mode; /**< How to draw polygons.*/ ngf_cull_mode cull_mode; /**< Which polygons to cull.*/ ngf_front_face_mode front_face; /**< Which winding counts as front-facing.*/ bool enable_depth_bias; /**< Controls whether to enable depth bias. See also: \ref ngf_cmd_set_depth_bias */ } ngf_rasterization_info; /** * @enum ngf_compare_op * \ingroup ngf * Compare operations used in depth and stencil tests. */ typedef enum ngf_compare_op { /** \ingroup ngf * Comparison test never succeeds. */ NGF_COMPARE_OP_NEVER = 0, /** \ingroup ngf * Comparison test succeeds if A < B. */ NGF_COMPARE_OP_LESS, /** \ingroup ngf * Comparison test succeeds if A <= B. */ NGF_COMPARE_OP_LEQUAL, /** \ingroup ngf * Comparison test succeeds if A == B. */ NGF_COMPARE_OP_EQUAL, /** \ingroup ngf * Comparison test succeeds if A >= B. */ NGF_COMPARE_OP_GEQUAL, /** \ingroup ngf * Comparison test succeeds if A > B. */ NGF_COMPARE_OP_GREATER, /** \ingroup ngf * Comparison test succeeds if A != B. */ NGF_COMPARE_OP_NEQUAL, /** \ingroup ngf * Comparison test always succeeds. */ NGF_COMPARE_OP_ALWAYS, NGF_COMPARE_OP_COUNT } ngf_compare_op; /** * @enum ngf_stencil_op * \ingroup ngf * Operations that can be performed on stencil buffer. */ typedef enum ngf_stencil_op { /** \ingroup ngf * Don't touch. */ NGF_STENCIL_OP_KEEP = 0, /** \ingroup ngf * Set to 0. */ NGF_STENCIL_OP_ZERO, /** \ingroup ngf * Replace with reference value. */ NGF_STENCIL_OP_REPLACE, /** \ingroup ngf * Increment, clamping to max value. */ NGF_STENCIL_OP_INCR_CLAMP, /** \ingroup ngf * Increment, wrapping to 0. */ NGF_STENCIL_OP_INCR_WRAP, /** \ingroup ngf * Decrement, clamping to 0. */ NGF_STENCIL_OP_DECR_CLAMP, /** \ingroup ngf * Decrement, wrapping to max value. */ NGF_STENCIL_OP_DECR_WRAP, /** \ingroup ngf * Bitwise invert. */ NGF_STENCIL_OP_INVERT, NGF_STENCIL_OP_COUNT } ngf_stencil_op; /** * @struct ngf_stencil_info * \ingroup ngf * Stencil operation description. */ typedef struct ngf_stencil_info { ngf_stencil_op fail_op; /**< What to do on stencil test fail.*/ ngf_stencil_op pass_op; /**< What to do on pass.*/ ngf_stencil_op depth_fail_op; /**< What to do when depth test fails but stencil test passes.*/ ngf_compare_op compare_op; /**< Stencil comparison function.*/ uint32_t compare_mask; /**< Compare mask.*/ uint32_t write_mask; /**< Write mask.*/ uint32_t reference; /**< Reference value (used for \ref NGF_STENCIL_OP_REPLACE).*/ } ngf_stencil_info; /** * @struct ngf_depth_stencil_info * \ingroup ngf * A graphics pipeline's depth/stencil state description. */ typedef struct ngf_depth_stencil_info { /** * Stencil test and actions for front-facing polys. * This is ignored when stencil testing is disabled. */ ngf_stencil_info front_stencil; /** * Stencil test and actions for back-facing polys. * This is ignored when stencil testing is disabled. */ ngf_stencil_info back_stencil; /** * The comparison function to use when performing the depth test. * This is ignored when depth testing is disabled. */ ngf_compare_op depth_compare; /** * Whether to enable stencil testing. * The exact procedure for the stencil test, and the actions to * perform on success or failure can be specified separately * for front- and back-facing polygons (see \ref ngf_depth_stencil_info::front_stencil and * \ref ngf_depth_stencil_info::back_stencil). */ bool stencil_test; /** * Whether to enable depth test. * When this is enabled, fragments that fail the test specified in * \ref ngf_depth_stencil_info::depth_compare, get discarded. */ bool depth_test; /** * Whether to enable writing to the depth buffer. * When this is enabled, fragments that pass the depth test have their * depth written into the depth buffer. */ bool depth_write; } ngf_depth_stencil_info; /** * @enum ngf_blend_factor * \ingroup ngf * Factors that can be used for source and destination values during the blend operation. * The factor can be thought * See \ref ngf_blend_info for details. */ typedef enum ngf_blend_factor { /** * \ingroup ngf * - If used as a blend factor for color: sets each color component to 0; * - if used as a blend factor for alpha: sets alpha to 0. */ NGF_BLEND_FACTOR_ZERO = 0, /** * \ingroup ngf * - If used as a blend factor for color: leaves the color unchanged; * - if used as a blend factor for alpha: leaves the alpha value unchanged. */ NGF_BLEND_FACTOR_ONE, /** * \ingroup ngf * - If used as a blend factor for color: multiplies each color component by the corresponding * component of the "source" color value; * - if used as a blend factor for alpha: multiples the alpha value by the "source" alpha value. */ NGF_BLEND_FACTOR_SRC_COLOR, /** * \ingroup ngf * - If used as a blend factor for color: multiplies each color component by one minus the * corresponding component of the "source" color value; * - if used as a blend factor for alpha: multiples the alpha value by one minus the "source" * alpha value. */ NGF_BLEND_FACTOR_ONE_MINUS_SRC_COLOR, /** * \ingroup ngf * - If used as a blend factor for color: multiplies each color component by the corresponding * component of the "destination" color value; * - if used as a blend factor for alpha: multiples the alpha value by the "destination" alpha * value. */ NGF_BLEND_FACTOR_DST_COLOR, /** * \ingroup ngf * - If used as a blend factor for color: multiplies each color component by one minus the * corresponding component of the "destination" color value; * - if used as a blend factor for alpha: multiples the alpha value by one minus the "destination" * alpha value. */ NGF_BLEND_FACTOR_ONE_MINUS_DST_COLOR, /** * \ingroup ngf * - If used as a blend factor for color: multiplies each color component by the "source" alpha * value; * - if used as a blend factor for alpha: multiples the alpha value by the "source" alpha value. */ NGF_BLEND_FACTOR_SRC_ALPHA, /** * \ingroup ngf * - If used as a blend factor for color: multiplies each color component by one minus the * "source" alpha value; * - if used as a blend factor for alpha: multiples the alpha value by one minus the "source" * alpha value. */ NGF_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, /** * \ingroup ngf * - If used as a blend factor for color: multiplies each color component by the "destination" * alpha value; * - if used as a blend factor for alpha: multiples the alpha value by the "destination" alpha * value. */ NGF_BLEND_FACTOR_DST_ALPHA, /** * \ingroup ngf * - If used as a blend factor for color: multiplies each color component by one minus the * "destination" alpha value; * - if used as a blend factor for alpha: multiples the alpha value by one minus the "destination" * alpha value. */ NGF_BLEND_FACTOR_ONE_MINUS_DST_ALPHA, /** * \ingroup ngf * - If used as a blend factor for color: multiplies the red, green and blue components of the * color by the 1st, 2nd and 3rd elements of \ref ngf_graphics_pipeline_info::blend_consts * respectively; * - if used as a blend factor for alpha: multiplies the alpha value by the 4th component of \ref * ngf_graphics_pipeline_info::blend_consts. */ NGF_BLEND_FACTOR_CONSTANT_COLOR, /** * \ingroup ngf * - If used as a blend factor for color: multiplies the red, green and blue components of the * color by one minus the 1st, 2nd and 3rd elements of \ref * ngf_graphics_pipeline_info::blend_consts respectively; * - if used as a blend factor for alpha: multiplies the alpha value by one minus the 4th * component of \ref ngf_graphics_pipeline_info::blend_consts. */ NGF_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR, /** * \ingroup ngf * - If used as a blend factor for color: multiplies the components of the color by the 4th * element of \ref ngf_graphics_pipeline_info::blend_consts; * - if used as a blend factor for alpha: multiplies the alpha value by the 4th component of \ref * ngf_graphics_pipeline_info::blend_consts. */ NGF_BLEND_FACTOR_CONSTANT_ALPHA, /** * \ingroup ngf * - If used as a blend factor for color: multiplies the components of the color by one minus the * 4th element of \ref ngf_graphics_pipeline_info::blend_consts; * - if used as a blend factor for alpha: multiplies the alpha value by one minus the 4th * component of \ref ngf_graphics_pipeline_info::blend_consts. */ NGF_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA, NGF_BLEND_FACTOR_COUNT } ngf_blend_factor; /** * @enum ngf_blend_op * \ingroup ngf * Operations that can be performed to blend the values computed by the fragment stage * (source values, denoted `S` in the member documentation) with values already present * in the target color attachment of the framebuffer (destination values, denoted `D` in * the member documentation). * * The factors (\ref ngf_blend_factor) for the source and destination values are denoted * as `Fs` and `Fd` respectively in the member documentation below. * */ typedef enum ngf_blend_op { /** \ingroup ngf * The result of the blend operation shall be `S*Fs + D*Fd` */ NGF_BLEND_OP_ADD, /** \ingroup ngf * The result of the blend operation shall be `S*Fs - D*Fd` */ NGF_BLEND_OP_SUB, /** \ingroup ngf * The result of the blend operation shall be `D*Fd - S*Fs` */ NGF_BLEND_OP_REV_SUB, /** \ingroup ngf * The result of the blend operation shall be `min(S, D)` */ NGF_BLEND_OP_MIN, /** \ingroup ngf * The result of the blend operation shall be `max(S, D)` */ NGF_BLEND_OP_MAX, NGF_BLEND_OP_COUNT } ngf_blend_op; /** * Identifies a color channel for color write mask. See \ref ngf_blend_info::color_write_mask for * details. */ typedef enum ngf_color_write_mask_bit { NGF_COLOR_MASK_WRITE_BIT_R = 0x01, NGF_COLOR_MASK_WRITE_BIT_G = 0x02, NGF_COLOR_MASK_WRITE_BIT_B = 0x04, NGF_COLOR_MASK_WRITE_BIT_A = 0x08 } ngf_color_write_mask_bit; /** * @struct ngf_blend_info * \ingroup ngf * Describes how blending should be handled by the pipeline. * If blending is disabled, the resulting color and alpha values are directly assigned * the color and alpha values computed at the fragment stage. * * When blending is enabled, the resulting color and alpha values are computed using the * corresponding blend operations and factors (specified separately for color and alpha). * Note that if the render target attachment from which the destination values are read * uses an sRGB format, the destination color values are linearized prior to being used * in a blend operation. * * If the render target attachment uses an sRGB format, the resulting color value * is converted to an sRGB representation prior to being finally written to the attachment. */ typedef struct ngf_blend_info { ngf_blend_op blend_op_color; /**< The blend operation to perform for color. */ ngf_blend_op blend_op_alpha; /**< The blend operation to perform for alpha. */ ngf_blend_factor src_color_blend_factor; /**< The source blend factor for color. */ ngf_blend_factor dst_color_blend_factor; /**< The destination blend factor for color. */ ngf_blend_factor src_alpha_blend_factor; /**< The source blend factor for alpha. */ ngf_blend_factor dst_alpha_blend_factor; /**< The destination blend factor for alpha. */ uint32_t color_write_mask; /**< A combination of \ref ngf_color_write_mask_bit flags that specifies which color channels actually get written out for the attachment corresponding to this blend state. */ bool enable; /**< Specifies whether blending is enabled.*/ } ngf_blend_info; /** * @enum ngf_type * \ingroup ngf * Enumerates the available vertex attribute component types. */ typedef enum ngf_type { /** \ingroup ngf * Signed 8-bit integer. */ NGF_TYPE_INT8 = 0, /** \ingroup ngf * Unsigned 8-bit integer. */ NGF_TYPE_UINT8, /** \ingroup ngf * Signed 16-bit integer. */ NGF_TYPE_INT16, /** \ingroup ngf * Unsigned 16-bit integer. */ NGF_TYPE_UINT16, /** \ingroup ngf * Signed 32-bit integer. */ NGF_TYPE_INT32, /** \ingroup ngf * Unsigned 32-bit integer. */ NGF_TYPE_UINT32, /** \ingroup ngf * 32-bit floating point number. */ NGF_TYPE_FLOAT, /** \ingroup ngf * 16-bit floating point number. */ NGF_TYPE_HALF_FLOAT, /** \ingroup ngf * Double-precision floating point number. */ NGF_TYPE_DOUBLE, NGF_TYPE_COUNT } ngf_type; /** * @enum ngf_input_rate * \ingroup ngf * The vertex input rate specifies whether a new set of attributes is read from a buffer per each * vertex or per each instance. */ typedef enum ngf_vertex_input_rate { /** * \ingroup ngf * * Attributes are read per-vertex. * With this vertex input rate, each vertex receives its own set of attributes. */ NGF_INPUT_RATE_VERTEX = 0, /** * \ingroup ngf * * Attributes are read per-instance. * With this vertex input rate, all vertices within the same instance share the same * attribute values. */ NGF_INPUT_RATE_INSTANCE, NGF_VERTEX_INPUT_RATE_COUNT } ngf_vertex_input_rate; /** * @struct ngf_vertex_buf_binding_desc * \ingroup ngf * Specifies a vertex buffer binding. * A _vertex buffer binding_ may be thought of as a slot to which a vertex attribute buffer can be * bound. An \ref ngf_graphics_pipeline may have several such slots, which are addressed by their * indices. Vertex attribute buffers can be bound to these slots with \ref * ngf_cmd_bind_attrib_buffer. The binding also partly defines how the contents of the bound buffer * is interpreted - via \ref ngf_vertex_buf_binding_desc::stride and \ref * ngf_vertex_buf_binding_desc::input_rate */ typedef struct ngf_vertex_buf_binding_desc { uint32_t binding; /**< Index of the binding that this structure describes.*/ /** * Specifies the distance (in bytes) between the starting bytes of two consecutive attribute * values. * * As an example, assume the buffer contains data for a single attribute, such as the position of * a vertex in three-dimensional space. Each component of the position is a 32-bit floating point * number. The values are laid out in memory one after another: * * ``` * ________ ________ ________ ________ ________ ________ ____ * | | | | | | | * | pos0.x | pos0.y | pos0.z | pos1.x | pos1.y | pos1.z | ... * |________|________|________|________|________|________|____ * * ``` * In this case, the stride is 3*4 = 12 bytes - the distance from the beginning of the first * attribute to the beginning of the next attribute is equal to the size of one attribute value. * * Now consider a different case, where we have two attributes: a three-dimensional position and * an RGB color, and the buffer first lists all the attribute values for the first vertex, * then all attribute values for the second vertex and so on: * * ``` * ________ ________ ________ ________ ________ ________ ________ _____ * | | | | | | | | * | pos0.x | pos0.y | pos0.z | col0.x | col0.y | col0.z | pos1.x | ... * |________|________|________|________|________|________|________|_____ * * ``` * * In this case, the position of the next vertex does not immediately follow the position previous * one - there is the value of the color attribute in between. In this case, assuming the * attribute components use a 32-bit floating point, the stride would have to be * `3 * 4 + 3 * 4 = 24` bytes. */ uint32_t stride; /** * Specifies whether attributes are read from the bound buffer * per-vetex or per-instance. */ ngf_vertex_input_rate input_rate; } ngf_vertex_buf_binding_desc; /** * @struct ngf_vertex_attrib_desc * \ingroup ngf * Specifies information about a vertex attribute. */ typedef struct ngf_vertex_attrib_desc { uint32_t location; /**< Attribute index. */ uint32_t binding; /**< The index of the vertex attribute buffer binding to use.*/ uint32_t offset; /**< Offset in the buffer at which attribute data starts.*/ ngf_type type; /**< Type of attribute component.*/ uint32_t size; /**< Number of attribute components. This value has to be between 1 and 4 (inclusive). */ /** * Whether the vertex stage sees the raw or normalized values for the attribute components. * Only attribute components of types \ref NGF_TYPE_INT8, \ref NGF_TYPE_UINT8, \ref * NGF_TYPE_INT16 and \ref NGF_TYPE_UINT16 can be normalized. For signed types, the values are * scaled to the [-1; 1] floating point range, for unsigned types they are scaled to [0; 1]. */ bool normalized; } ngf_vertex_attrib_desc; /** * @struct ngf_vertex_input_info * \ingroup ngf * Specifies information about the pipeline's vertex input. */ typedef struct ngf_vertex_input_info { uint32_t nattribs; /**< Number of attribute descriptions.*/ uint32_t nvert_buf_bindings; /**< Number of vertex buffer binding descriptions.*/ /** * Pointer to an array of structures describing vertex attribute buffer * bindings. */ const ngf_vertex_buf_binding_desc* vert_buf_bindings; /** * Pointer to an array of structures describing the vertex attributes. */ const ngf_vertex_attrib_desc* attribs; } ngf_vertex_input_info; /** * @enum ngf_sample_count * \ingroup ngf * Specifies the number of MSAA samples. */ typedef enum ngf_sample_count { NGF_SAMPLE_COUNT_1 = 1, NGF_SAMPLE_COUNT_2 = 2, NGF_SAMPLE_COUNT_4 = 4, NGF_SAMPLE_COUNT_8 = 8, NGF_SAMPLE_COUNT_16 = 16, NGF_SAMPLE_COUNT_32 = 32, NGF_SAMPLE_COUNT_64 = 64, } ngf_sample_count; /** * @struct ngf_multisample_info * \ingroup ngf * * Specifies the state of multisampling. */ typedef struct ngf_multisample_info { ngf_sample_count sample_count; /**< MSAA sample count. */ bool alpha_to_coverage; /**< Whether alpha-to-coverage is enabled.*/ } ngf_multisample_info; /** * @enum ngf_image_format * \ingroup ngf * * Image formats. * * Some backends may not support all of those. * Using an sRGB format in a color attachment or swapchain image means that all * color values output by the fragment stage are interpreted as being in linear * color space, and an appropriate transfer function is applied to them to * covert them to the sRGB colorspace before writing them to the target. * Using an sRGB format in a sampled image means that all color values stored * in the image are interpreted to be in the sRGB color space, and all read * operations automatically apply a transfer function to convert the values * from sRGB to linear color space. */ typedef enum ngf_image_format { NGF_IMAGE_FORMAT_R8 = 0, NGF_IMAGE_FORMAT_RG8, NGF_IMAGE_FORMAT_RG8_SNORM, NGF_IMAGE_FORMAT_RGB8, NGF_IMAGE_FORMAT_RGBA8, NGF_IMAGE_FORMAT_SRGB8, NGF_IMAGE_FORMAT_SRGBA8, NGF_IMAGE_FORMAT_BGR8, NGF_IMAGE_FORMAT_BGRA8, NGF_IMAGE_FORMAT_BGR8_SRGB, NGF_IMAGE_FORMAT_BGRA8_SRGB, NGF_IMAGE_FORMAT_RGB10A2, NGF_IMAGE_FORMAT_R32F, NGF_IMAGE_FORMAT_RG32F, NGF_IMAGE_FORMAT_RGB32F, NGF_IMAGE_FORMAT_RGBA32F, NGF_IMAGE_FORMAT_R16F, NGF_IMAGE_FORMAT_RG16F, NGF_IMAGE_FORMAT_RGB16F, NGF_IMAGE_FORMAT_RGBA16F, NGF_IMAGE_FORMAT_RG11B10F, NGF_IMAGE_FORMAT_RGB9E5, NGF_IMAGE_FORMAT_R16_UNORM, NGF_IMAGE_FORMAT_R16_SNORM, NGF_IMAGE_FORMAT_RG16_UNORM, NGF_IMAGE_FORMAT_RG16_SNORM, NGF_IMAGE_FORMAT_RGBA16_UNORM, NGF_IMAGE_FORMAT_RGBA16_SNORM, NGF_IMAGE_FORMAT_R8U, NGF_IMAGE_FORMAT_R8S, NGF_IMAGE_FORMAT_R16U, NGF_IMAGE_FORMAT_R16S, NGF_IMAGE_FORMAT_RG16U, NGF_IMAGE_FORMAT_RGB16U, NGF_IMAGE_FORMAT_RGBA16U, NGF_IMAGE_FORMAT_R32U, NGF_IMAGE_FORMAT_RG32U, NGF_IMAGE_FORMAT_RGB32U, NGF_IMAGE_FORMAT_RGBA32U, NGF_IMAGE_FORMAT_BC7, NGF_IMAGE_FORMAT_BC7_SRGB, NGF_IMAGE_FORMAT_BC6H_SFLOAT, NGF_IMAGE_FORMAT_BC6H_UFLOAT, NGF_IMAGE_FORMAT_BC5_UNORM, NGF_IMAGE_FORMAT_BC5_SNORM, NGF_IMAGE_FORMAT_ASTC_4x4, NGF_IMAGE_FORMAT_ASTC_4x4_SRGB, NGF_IMAGE_FORMAT_ASTC_5x4, NGF_IMAGE_FORMAT_ASTC_5x4_SRGB, NGF_IMAGE_FORMAT_ASTC_5x5, NGF_IMAGE_FORMAT_ASTC_5x5_SRGB, NGF_IMAGE_FORMAT_ASTC_6x5, NGF_IMAGE_FORMAT_ASTC_6x5_SRGB, NGF_IMAGE_FORMAT_ASTC_6x6, NGF_IMAGE_FORMAT_ASTC_6x6_SRGB, NGF_IMAGE_FORMAT_ASTC_8x5, NGF_IMAGE_FORMAT_ASTC_8x5_SRGB, NGF_IMAGE_FORMAT_ASTC_8x6, NGF_IMAGE_FORMAT_ASTC_8x6_SRGB, NGF_IMAGE_FORMAT_ASTC_8x8, NGF_IMAGE_FORMAT_ASTC_8x8_SRGB, NGF_IMAGE_FORMAT_ASTC_10x5, NGF_IMAGE_FORMAT_ASTC_10x5_SRGB, NGF_IMAGE_FORMAT_ASTC_10x6, NGF_IMAGE_FORMAT_ASTC_10x6_SRGB, NGF_IMAGE_FORMAT_ASTC_10x8, NGF_IMAGE_FORMAT_ASTC_10x8_SRGB, NGF_IMAGE_FORMAT_ASTC_10x10, NGF_IMAGE_FORMAT_ASTC_10x10_SRGB, NGF_IMAGE_FORMAT_ASTC_12x10, NGF_IMAGE_FORMAT_ASTC_12x10_SRGB, NGF_IMAGE_FORMAT_ASTC_12x12, NGF_IMAGE_FORMAT_ASTC_12x12_SRGB, NGF_IMAGE_FORMAT_DEPTH32, NGF_IMAGE_FORMAT_DEPTH16, NGF_IMAGE_FORMAT_DEPTH24_STENCIL8, NGF_IMAGE_FORMAT_UNDEFINED, NGF_IMAGE_FORMAT_COUNT } ngf_image_format; /** * @enum ngf_attachment_type * \ingroup ngf * Enumerates render target attachment types. */ typedef enum ngf_attachment_type { /** \ingroup ngf * For attachments containing color data. */ NGF_ATTACHMENT_COLOR = 0, /** \ingroup ngf * For attachments containing depth data. */ NGF_ATTACHMENT_DEPTH, /** \ingroup ngf * For attachments containing combined depth and stencil data. */ NGF_ATTACHMENT_DEPTH_STENCIL } ngf_attachment_type; /** * @struct ngf_attachment_description * \ingroup ngf * Describes the type and format of a render target attachment. */ typedef struct ngf_attachment_description { ngf_attachment_type type; /**< What the attachment shall be used for. */ ngf_image_format format; /**< Format of the associated image. Note that it must be valid for the given attachment type. */ ngf_sample_count sample_count; /**< Number of samples per pixel in the associated image. */ bool is_resolve; /**< Whether the image associated with this attachment is used as an MSAA resolve target. */ } ngf_attachment_description; /** * @struct ngf_attachment_descriptions * \ingroup ngf * A list of attachment descriptions. */ typedef struct ngf_attachment_descriptions { /** Pointer to a continuous array of \ref ngf_attachment_descriptions::ndescs \ref * ngf_attachment_description objects. */ const ngf_attachment_description* descs; uint32_t ndescs; /**< The number of attachment descriptions in the list. */ } ngf_attachment_descriptions; /** * @enum ngf_primitive_topology * \ingroup ngf * * Enumerates the available primitive topologies (ways to group vertices into primitives). */ typedef enum ngf_primitive_topology { /** * \ingroup ngf * A list of separate triangles - each three vertices define a separate triangle. */ NGF_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 0, /** * \ingroup ngf * A list of connected triangles, with consecutive triangles sharing an edge like so: * ``` * o---------o-----------o * \ / \ / * \ / \ / ... * o----------o * * ``` */ NGF_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, /** * \ingroup ngf * A list of separate lines. Each two vertices define a separate line. */ NGF_PRIMITIVE_TOPOLOGY_LINE_LIST, /** * \ingroup ngf * A list of connected lines. The end of a line is the beginning of the next line in the list. */ NGF_PRIMITIVE_TOPOLOGY_LINE_STRIP, NGF_PRIMITIVE_TOPOLOGY_COUNT } ngf_primitive_topology; /** * @struct ngf_constant_specialization * \ingroup ngf * * A constant specialization entry, sets the value for a single specialization constant. */ typedef struct ngf_constant_specialization { uint32_t constant_id; /**< ID of the specialization constant used in the shader stage */ uint32_t offset; /**< Offset at which the user-provided value is stored in the specialization buffer. */ ngf_type type; /**< Type of the specialization constant. */ } ngf_constant_specialization; /** * @struct ngf_specialization_info * \ingroup ngf * Sets specialization constant values for a pipeline. * Specialization constants are a kind of shader constant whose values can be set at pipeline * creation time. The shaders that run as part of said pipeline will then see the provided values * during execution. */ typedef struct ngf_specialization_info { const ngf_constant_specialization* specializations; /**< List of specialization entries. */ uint32_t nspecializations; /**< Number of specialization entries. */ const void* value_buffer; /**< Pointer to a buffer containing the values for the specialization constants. */ } ngf_specialization_info; typedef struct ngf_input_assembly_info { ngf_primitive_topology primitive_topology; bool enable_primitive_restart; } ngf_input_assembly_info; /** * @struct ngf_graphics_pipeline_info * \ingroup ngf * * Contains all information necessary for creating a graphics pipeline object. */ typedef struct ngf_graphics_pipeline_info { ngf_shader_stage shader_stages[5]; /**< The programmable stages for this pipeline. */ uint32_t nshader_stages; /**< The number of programmable stages involved. */ const ngf_rasterization_info* rasterization; /**< Specifies the parameters for the rasterizer. */ const ngf_multisample_info* multisample; /**< Specifies the parameters for multisampling. */ /** * Specifies the parameters for depth and stencil testing. */ const ngf_depth_stencil_info* depth_stencil; /** * Specifies vertex attributes and vertex attribute buffer bindings. */ const ngf_vertex_input_info* input_info; /** * Specifies how primitives are assembled from vertices. */ const ngf_input_assembly_info* input_assembly_info; const ngf_specialization_info* spec_info; /**< Specifies the values for specialization constants (if any) used by the programmable stages. */ /** * Describes which render targets compatible with this pipeline. * A compatible render target must have the same number of attachments as specified in the list, * with matching type, format and sample count. */ const ngf_attachment_descriptions* compatible_rt_attachment_descs; /** * A pointer to an array of \ref ngf_blend_info structures specifying the parameters for blending. * The array must contain exactly the same number of elements as there are color attachments * specified in \ref ngf_graphics_pipeline_info::compatible_rt_attachment_descs. * If set to NULL, all color attachments will have blending disabled and fully enabled color write * mask. */ const ngf_blend_info* color_attachment_blend_states; float blend_consts[4]; /**< Blend constants used by \ref NGF_BLEND_FACTOR_CONSTANT_COLOR, \ref NGF_BLEND_FACTOR_CONSTANT_ALPHA, \ref NGF_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR and \ref NGF_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA . */ const char* debug_name; } ngf_graphics_pipeline_info; /** * @struct ngf_graphics_pipeline * \ingroup ngf * * An opaque handle to a graphics pipeline object. * * See also: \ref ngf_graphics_pipeline_info, \ref ngf_create_graphics_pipeline and \ref * ngf_destroy_graphics_pipeline. */ typedef struct ngf_graphics_pipeline_t* ngf_graphics_pipeline; /** * @struct ngf_compute_pipeline_info * \ingroup ngf * * Contains all information necessary for creating a compute pipeline object. */ typedef struct ngf_compute_pipeline_info { ngf_shader_stage shader_stage; /**< The (only) stage for this pipeline. */ const ngf_specialization_info* spec_info; /**< Specifies the value of specialization consts used by this pipeline. */ const char* debug_name; } ngf_compute_pipeline_info; /** * @struct ngf_compute_pipeline * \ingroup ngf * * An opaque handle to a compute pipeline object. * * See also: \ref ngf_compute_pipeline_info, \ref ngf_create_compute_pipeline and \ref * ngf_destroy_compute_pipeline. */ typedef struct ngf_compute_pipeline_t* ngf_compute_pipeline; /** * @enum ngf_descriptor_type * \ingroup ngf * * Available descriptor types. * Not that some back-ends may not support all of the listed descriptor types. */ typedef enum ngf_descriptor_type { /** * \ingroup ngf * * A uniform buffer, also known as a constant buffer, can be used to pass * a small to medium sized chunk of data to the shader in a structured way. */ NGF_DESCRIPTOR_UNIFORM_BUFFER = 0, /** * \ingroup ngf * * An \ref ngf_image. */ NGF_DESCRIPTOR_IMAGE, /** * \ingroup ngf * * An \ref ngf_sampler. */ NGF_DESCRIPTOR_SAMPLER, /** * \ingroup ngf * * A combination of an image and sampler in a single object. */ NGF_DESCRIPTOR_IMAGE_AND_SAMPLER, /** * \ingroup ngf * * A texel buffer can be used to pass a large amount of unstructured data * (i.e. a big array of `float4`s) to the shader. */ NGF_DESCRIPTOR_TEXEL_BUFFER, /** * \ingroup ngf * * A storage buffer is a large buffer that can be both read and written in shaders. */ NGF_DESCRIPTOR_STORAGE_BUFFER, /** * An image that can be both read and written to in a shader. */ NGF_DESCRIPTOR_STORAGE_IMAGE, NGF_DESCRIPTOR_ACCELERATION_STRUCTURE, NGF_DESCRIPTOR_TYPE_COUNT } ngf_descriptor_type; /** * @enum ngf_sampler_filter * \ingroup ngf * * Enumerates filters for texture lookups. */ typedef enum ngf_sampler_filter { /** * \ingroup ngf * * When used as the minification (\ref ngf_sampler_info::min_filter) or magnification (\ref * ngf_sampler_info::mag_filter) filter, the result of the filtering operation shall be the * value of the texel whose center is nearest to the sample. * * When used as \ref ngf_sampler_info::mip_filter, makes the selected mip level snap to the one * that is closest to the requested mip level value. */ NGF_FILTER_NEAREST = 0, /** * \ingroup ngf * * When used as the minification (\ref ngf_sampler_info::min_filter) or magnification (\ref * ngf_sampler_info::mag_filter) filter, the result of the filtering operation shall be linearly * interpolated from the values of 4 (in case of 2D images) or 8 (in case of 3D images) texels * whose centers are nearest to the sample. * * When used as \ref ngf_sampler_info::mip_filter, linearly blends the values from two mip levels * closest to the requested mip level value. */ NGF_FILTER_LINEAR, NGF_FILTER_COUNT } ngf_sampler_filter; /** * @enum ngf_sampler_wrap_mode * \ingroup ngf * * Enumerates strategies for dealing with sampling an image out-of-bounds. */ typedef enum ngf_sampler_wrap_mode { /** \ingroup ngf * Clamp the texel value to what's at the edge of the image. */ NGF_WRAP_MODE_CLAMP_TO_EDGE = 0, /** \ingroup ngf * Repeat the image contents. */ NGF_WRAP_MODE_REPEAT, /** \ingroup ngf * Repeat the image contents, mirrored. */ NGF_WRAP_MODE_MIRRORED_REPEAT, NGF_WRAP_MODE_COUNT } ngf_sampler_wrap_mode; /** * @struct ngf_sampler_info * \ingroup ngf * * Information for creating an \ref ngf_sampler object. */ typedef struct ngf_sampler_info { ngf_sampler_filter min_filter; /**< The filter to apply when the sampled image is minified .*/ ngf_sampler_filter mag_filter; /**< The filter to apply when the sampled image is magnified. */ ngf_sampler_filter mip_filter; /**< The filter to use when transitioning between mip levels. */ ngf_sampler_wrap_mode wrap_u; /**< Wrap mode for the U coordinate. */ ngf_sampler_wrap_mode wrap_v; /**< Wrap mode for the V coordinate. */ ngf_sampler_wrap_mode wrap_w; /**< Wrap mode for the W coordinate. */ float lod_max; /**< Maximum mip level that shall be used during the filtering operation. * Note that this refers to the _level itself_ and not the dimensions of data * residing in that level, e.g. level 0 (the smallest possible level) has * the largest dimensions. */ float lod_min; /**< Minimum mip level that shall be used during the filtering operation. * Note that this refers to the _level itself_ and not the dimensions of data * residing in that level, e.g. level 0 (the smallest possible level) has * the largest dimensions. */ float lod_bias; /**< A bias to add to the mip level calculated during the sample operation. */ float max_anisotropy; /**< Max allowed degree of anisotropy. Ignored if \ref * ngf_sampler_info::enable_anisotropy is false. */ bool enable_anisotropy; /**< Whether to allow anisotropic filtering. */ ngf_compare_op compare_op; /**< The comparison to use when comparing depth texture samples to a * reference value. Set to \ref ngf_compare_op::NGF_COMPARE_OP_NEVER to * disable comparison for the sampler. */ } ngf_sampler_info; /** * @struct ngf_sampler * \ingroup ngf * * An opaque handle for a sampler object. * * Samplers encapsulate how to filter an image - what happens when an image is minified or * magnified, whether anisotropic filtering is enabled, etc. See \ref ngf_sampler_info for more * details. * * Samplers can be bound separately from images - in which case the shader code sees them as two * distinct objects, and the same sampler can be ussed to sample two different images. They can also * be combined into a single descriptor (see \ref NGF_DESCRIPTOR_IMAGE_AND_SAMPLER), in which case * the shader code sees only a single image object, which can be sampled only one certain way. */ typedef struct ngf_sampler_t* ngf_sampler; /** * @enum ngf_image_usage * \ingroup ngf * * Image usage flags. * * A valid image usage mask may be formed by combining one or more of these * values with a bitwise OR operator. */ typedef enum ngf_image_usage { /** \ingroup ngf * The image may be read from in a shader.*/ NGF_IMAGE_USAGE_SAMPLE_FROM = 0x01, /** \ingroup ngf * The image may be used as an attachment for a render target.*/ NGF_IMAGE_USAGE_ATTACHMENT = 0x02, /** \ingroup ngf * The image may be used as a destination for a transfer operation. **/ NGF_IMAGE_USAGE_XFER_DST = 0x04, /** \ingroup ngf * Mipmaps may be generated for the image with \ref ngf_cmd_generate_mipmaps. */ NGF_IMAGE_USAGE_MIPMAP_GENERATION = 0x08, /** \ingroup ngf * The image may be read or written to by a shader. */ NGF_IMAGE_USAGE_STORAGE = 0x10, /** \ingroup ngf * The image may be used as a source for a transfer operation. */ NGF_IMAGE_USAGE_XFER_SRC = 0x20 } ngf_image_usage; /** * @enum ngf_image_type * \ingroup ngf * * Enumerates the possible image types. */ typedef enum ngf_image_type { /** \ingroup ngf * Two-dimensional image. */ NGF_IMAGE_TYPE_IMAGE_2D = 0, /** \ingroup ngf * Three-dimensional image. */ NGF_IMAGE_TYPE_IMAGE_3D, /** \ingroup ngf * Cubemap. */ NGF_IMAGE_TYPE_CUBE, NGF_IMAGE_TYPE_COUNT } ngf_image_type; /** * @struct ngf_image_info * \ingroup ngf * * Information required to create an \ref ngf_image object. */ typedef struct ngf_image_info { ngf_image_type type; /**< The image type. */ ngf_extent3d extent; /**< The width, height and depth. Note that dimensions irrelevant for the specified image type are ignored.*/ uint32_t nmips; /**< The number of mip levels in the image.*/ uint32_t nlayers; /**< Number of layers within the image. */ ngf_image_format format; /**< Internal format.*/ ngf_sample_count sample_count; /**< The number of samples per pixel in the image. **/ uint32_t usage_hint; /**< Specifies how the client intends to use the image. Must be a combination of \ref ngf_image_usage flags.*/ } ngf_image_info; /** * @struct ngf_image * \ingroup ngf * * An opaque handle to an image object. * * Images are multidimensional arrays of data that can be sampled from in shaders, or rendered into. * The individual elements of such arrays shall be referred to as "texels". An \ref ngf_image_format * describes the specific type and layout of data elements within a single texel. Note that * compressed image formats typically don't store values of texels directly, rather they store * enough information that the texel values can be reconstructed (perhaps lossily) by the rendering * device. * * Images can be one of the following types (see \ref ngf_image_type): * - a two-dimensional image, identified by \ref NGF_IMAGE_TYPE_IMAGE_2D and representing a * two-dimensional array of texels; * - a three-dimensional image, identified by \ref NGF_IMAGE_TYPE_IMAGE_3D and representing a * three-dimensional array of texels; * - a cubemap, identified by \ref NGF_IMAGE_TYPE_CUBE and representing a collection of six * two-dimensional texel arrays, each corresponding to a face of a cube. * * An image object may actually contain several images of the same type, format and dimensions. * Those are referred to as "layers" and images containing more than a single layer are called * "layered", or "image arrays". Note that a multi-layered 2D image is different from a * single-layered 3D image, because filtering is not performed across levels when sampling it. Also * note that layered cubemaps are not supported by all hardware - see \ref * ngf_device_capabilities::cubemap_arrays_supported. * * Each image layer may contain mip levels. Mip level 0 is the layer itself, and each subsequent * level (1, 2 and so on) is 2x smaller in dimensions, and usually contains the downscaled version * of the preceding level for the purposes of filtering, although the application is free to upload * arbitrary data into any mip level, as long as dimension requirements are respected. */ typedef struct ngf_image_t* ngf_image; /** * @enum ngf_cubemap_face * \ingroup ngf * * Members of this enumeration are used to refer to the different faces of a cubemap. */ typedef enum ngf_cubemap_face { NGF_CUBEMAP_FACE_POSITIVE_X, NGF_CUBEMAP_FACE_NEGATIVE_X, NGF_CUBEMAP_FACE_POSITIVE_Y, NGF_CUBEMAP_FACE_NEGATIVE_Y, NGF_CUBEMAP_FACE_POSITIVE_Z, NGF_CUBEMAP_FACE_NEGATIVE_Z, NGF_CUBEMAP_FACE_COUNT } ngf_cubemap_face; /** * @struct ngf_image_ref * \ingroup ngf * * A reference to a part of an image. */ typedef struct ngf_image_ref { ngf_image image; /**< The image being referred to.*/ uint32_t mip_level; /**< The mip level within the image.*/ uint32_t layer; /**< The layer within the image.*/ ngf_cubemap_face cubemap_face; /**< The face of the cubemap for cubemaps, ignored for non-cubemap images.*/ } ngf_image_ref; /** * @struct ngf_image_view_info * \ingroup ngf * * Information required to create an \ref ngf_image_view. * Contains the definition of the sub-resource represented by the view as well as * the view's corresponding type and format. */ typedef struct ngf_image_view_info { ngf_image src_image; /**< References the source image. */ uint32_t base_mip_level; /**< Specifies the first mip level represented in the view. */ uint32_t nmips; /**< Specifies the number of mip levels represented in the view. */ uint32_t base_layer; /**< Specifies the first image layer represented in the view. */ uint32_t nlayers; /**< Specifies the number of layers represented in the view. */ ngf_image_type view_type; /**< The type to reinterpret the source image as. Must be compatible with the source image type.*/ ngf_image_format view_format; /**< The format to reinterpret the source image as. Must be compatible with the source image format.*/ } ngf_image_view_info; /** * @struct ngf_image_view * \ingroup ngf * * An opaque handle to an image view object. * * Image views provide a way to reinterpret different sub-parts of a source image as having a * particular type and/or format. They can be bound and used in GPU programs just like regular * images. Image views are backed by the memory of their corresponding source images and do not * incur additional GPU allocations. They become invalid if their source image is destroyed. * * Image views can use a different type than the source image, however this is subject to * compatibility rules defined in the table below: * * Source image type | Compatible view types * ------------------------------ | ---------------------------------------------------------- * \ref NGF_IMAGE_TYPE_IMAGE_2D | \ref NGF_IMAGE_TYPE_IMAGE_2D, \ref NGF_IMAGE_TYPE_IMAGE_3D * \ref NGF_IMAGE_TYPE_CUBE | \ref NGF_IMAGE_TYPE_IMAGE_2D * \ref NGF_IMAGE_TYPE_IMAGE_3D | \ref NGF_IMAGE_TYPE_IMAGE_3D * * Attempting to create a view with a type that is not compatible with the source image type will * result in an error. * * Image views can use a different pixel format from the source image (thus "type punning" or * reinterpreting pixel data). However, the format must be compatible with the source image format. * Format compatibility is platform-dependent. Attempting to create a view with a format that is not * compatible with the source image format will result in an error. */ typedef struct ngf_image_view_t* ngf_image_view; /** * @struct ngf_render_target_info * \ingroup ngf * Information required to create a render target object. */ typedef struct ngf_render_target_info { /** List of attachment descriptions. */ const ngf_attachment_descriptions* attachment_descriptions; /** Image references, describing what is bound to each attachment. */ const ngf_image_ref* attachment_image_refs; } ngf_render_target_info; /** * @struct ngf_render_target * \ingroup ngf * * An opaque handle to a render target object. * * Render targets are collections of images that can be rendered into. Each image in the collection * is referred to as an "attachment". Some attachments have special meaning, for example the depth * or the combined depth+stencil attachment, the contents of which are used in depth/stencil tests. * A render target is not allowed to have multiple depth or depth+stencil attachments, however it is * allowed to have multiple color attachments (up to a certain limit). */ typedef struct ngf_render_target_t* ngf_render_target; /** * @struct ngf_clear_info * \ingroup ngf * * Specifies a render target clear operation. */ typedef union ngf_clear_info { /** * The color to clear to. Each element corresponds to the red, green, blue and alpha channel * respectively, and is a floating point value within the [0; 1] range, with 0.0 corresponding to * none an 1.0 corresponding to full intensity. If the format of the render target image does not * have a corresponding channel, the value is ignored. * This field is used for color attachments only. */ float clear_color[4]; /** * The depth and stencil values to clear to. This field is used for depth or combined * depth/stencil attachments only. */ struct { float clear_depth; /**< The depth value to clear to. */ uint32_t clear_stencil; /**< The stencil value to clear to. */ } clear_depth_stencil; } ngf_clear; /** * @enum ngf_attachment_load_op * \ingroup ngf * Enumerates actions that can be performed on attachment "load" (at the start of a render pass). */ typedef enum ngf_attachment_load_op { /** \ingroup ngf * Don't care what happens. */ NGF_LOAD_OP_DONTCARE = 0, /** \ingroup ngf * Preserve the prior contents of the attachment. */ NGF_LOAD_OP_KEEP, /** \ingroup ngf * Clear the attachment. */ NGF_LOAD_OP_CLEAR, NGF_LOAD_OP_COUNT } ngf_attachment_load_op; /** * @enum ngf_attachment_store_op * \ingroup ngf * Enumerates actions that can be performed on attachment "store" (at the end of a render pass). */ typedef enum ngf_attachment_store_op { /** * \ingroup ngf * * Don't care what happens. Use this if you don't plan on reading back the * contents of the attachment in any shaders, or presenting it to screen. */ NGF_STORE_OP_DONTCARE = 0, /** * \ingroup ngf * * Use this if you plan on reading the contents of the attachment in any shaders or * presenting it to screen. The contents of the attachment shall be written out to system memory. */ NGF_STORE_OP_STORE, /** * \ingroup ngf * * Use this to resolve a multisampled color attachment to a corresponding resolve attachment. */ NGF_STORE_OP_RESOLVE, NGF_STORE_OP_COUNT } ngf_attachment_store_op; struct ngfi_private_encoder_data { uintptr_t d0; uintptr_t d1; }; /** * @struct ngf_render_encoder * \ingroup ngf * * A render encoder records rendering commands (such as draw calls) into its * corresponding command buffer. */ typedef struct ngf_render_encoder { struct ngfi_private_encoder_data pvt_data_donotuse; } ngf_render_encoder; /** * @struct ngf_xfer_encoder * \ingroup ngf * * A transfer encoder records transfer commands (i.e. copying buffer contents) * into its corresponding command buffer. */ typedef struct ngf_xfer_encoder { struct ngfi_private_encoder_data pvt_data_donotuse; } ngf_xfer_encoder; /** * @struct ngf_compute_encoder * \ingroup ngf * * A compute encoder records compute dispatches into its corresponding command buffer. */ typedef struct ngf_compute_encoder { struct ngfi_private_encoder_data pvt_data_donotuse; } ngf_compute_encoder; /** * @struct ngf_render_pass_info * \ingroup ngf * Information required to begin a render pass. */ typedef struct ngf_render_pass_info { /** * A render target that shall be rendered to during this pass. */ ngf_render_target render_target; /** * A pointer to a buffer of \ref ngf_load_op enumerators specifying the operation to perform at * the start of the render pass for each attachment of \ref ngf_render_pass_info::render_target. * The buffer must have at least the same number of elements as there are attachments in the * render target. The `i`th element of the buffer corresponds to the `i`th attachment. */ const ngf_attachment_load_op* load_ops; /** * A pointer to a buffer of \ref ngf_store_op enumerators specifying the operation to perform at * the end of the render pass for each attachment of \ref ngf_render_pass_info::render_target. The * buffer must have at least the same number of elements as there are attachments in the render * target. The `i`th element of the buffer corresponds to the `i`th attachment. */ const ngf_attachment_store_op* store_ops; /** * If no attachment has a clear as its load op, this field may be NULL. * Otherwise, it shall be a pointer to a buffer of \ref ngf_clear objects. The buffer must contain * at least as many elements as there are attachments in the render target. The `i`th element of * the buffer corresponds to the `i`th attachment. For attachments that are to be cleared at the * beginning of the pass, the clear values from the corresponding element of the buffer are used. * The rest of the buffer's elements are ignored. */ const ngf_clear* clears; } ngf_render_pass_info; /** * @struct ngf_xfer_pass_info * \ingroup ngf * * Information required to begin a transfer pass. */ typedef struct ngf_xfer_pass_info { void* reserved; } ngf_xfer_pass_info; /** * @struct ngf_compute_pass_info * \ingroup ngf * * Information required to begin a compute pass. */ typedef struct ngf_compute_pass_info { void* reserved; } ngf_compute_pass_info; /** * @enum ngf_buffer_storage_type * \ingroup ngf * Enumerates types of memory backing a buffer object. */ typedef enum ngf_buffer_storage_type { /** * \ingroup ngf * Memory that can be read by the host. */ NGF_BUFFER_STORAGE_HOST_READABLE, /** * \ingroup ngf * Memory that can be written to by the host. */ NGF_BUFFER_STORAGE_HOST_WRITEABLE, /** * \ingroup ngf * Memory that can be both read from and written to by the * host. */ NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE, /** * \ingroup ngf * * Memory that is local to the device (GPU). Normally, this type of storage * isn't accessible directly from the host and the contents of a * buffer backed by this type of memory can only be modified by executing a * \ref ngf_cmd_copy_buffer. */ NGF_BUFFER_STORAGE_DEVICE_LOCAL, /** * \ingroup ngf * * Memory that is both local to the device (GPU) and mappable/writeable directly * from host. This type of storage is available only when the capability * \ref ngf_device_capabilities::device_local_memory_is_host_visible is supported. * Examples of systems that may support this type of storage are iGPUs or discrete * GPUs with ReBAR enabled. * Using this type of backing storage allows the host to write bytes directly into * the mapped memory, obviating the need for staging buffers in some cases. */ NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABLE, /** * \ingroup ngf * * Same as \ref NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABALE, but additionally allows * the host to read directly from mapped memory. */ NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_READABLE_WRITEABLE } ngf_buffer_storage_type; /** * @enum ngf_buffer_usage * \ingroup ngf * Enumerates the buffer usage flags. A valid buffer usage mask may be formed by combining a subset * of these values with a bitwise OR operator. */ typedef enum ngf_buffer_usage { /** \ingroup ngf * The buffer may be used as a source for transfer operations. */ NGF_BUFFER_USAGE_XFER_SRC = 0x01, /** \ingroup ngf * The buffer may be used as a destination for transfer operations. */ NGF_BUFFER_USAGE_XFER_DST = 0x02, /** \ingroup ngf * The buffer may be bound as a uniform buffer. */ NGF_BUFFER_USAGE_UNIFORM_BUFFER = 0x04, /** \ingroup ngf * The buffer may be used as the source of index data for indexed draws. */ NGF_BUFFER_USAGE_INDEX_BUFFER = 0x08, /** \ingroup ngf * The buffer may be used as a source of vertex attribute data. */ NGF_BUFFER_USAGE_VERTEX_BUFFER = 0x10, /** \ingroup ngf * The buffer may be bound as a uniform texel buffer. */ NGF_BUFFER_USAGE_TEXEL_BUFFER = 0x20, /** * \ingroup ngf * The buffer may be bound as a storage buffer. */ NGF_BUFFER_USAGE_STORAGE_BUFFER = 0x40, NGF_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT = 0x80, NGF_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT = 0x100, NGF_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT = 0x200 } ngf_buffer_usage; /** * @struct ngf_buffer_info * \ingroup ngf * Information required to create a buffer object. */ typedef struct ngf_buffer_info { size_t size; /**< The size of the buffer in bytes. */ ngf_buffer_storage_type storage_type; /**< Flags specifying the preferred storage type.*/ uint32_t buffer_usage; /**< Flags specifying the intended usage.*/ } ngf_buffer_info; /** * @struct ngf_buffer * \ingroup ngf * * An opaque handle to a buffer object. */ typedef struct ngf_buffer_t* ngf_buffer; /** * @struct ngf_buffer_slice * \ingroup ngf * * A reference to a subregion of a buffer. */ typedef struct ngf_buffer_slice { ngf_buffer buffer; /**< The handle of the buffer being referred to. */ size_t offset; /**< Starting offset of the subregion. */ size_t range; /**< Size of the subregion. */ } ngf_buffer_slice; /** * @struct ngf_texel_buffer_view * \ingroup ngf * * GPU programs have to access texel buffers through special "texel buffer view" objects which * specify the exact format of the data stored in the buffer. * See also: \ref ngf_texel_buffer_view_info, \ref ngf_create_texel_buffer_view. */ typedef struct ngf_texel_buffer_view_t* ngf_texel_buffer_view; /** * @struct ngf_texel_buffer_view_info * * Information required to create a texel buffer view object. */ typedef struct ngf_texel_buffer_view_info { ngf_buffer buffer; /**< The buffer that the view covers. */ size_t offset; /**< Offset withing the buffer (in bytes) that the view covers. */ size_t size; /**< The size of the range (in byutes) that the view covers. */ ngf_image_format texel_format; /**< The texel format to intepret the buffer contents as. */ } ngf_texel_buffer_view_info; /** * @struct ngf_buffer_bind_info * \ingroup ngf * Specifies a buffer resource bind operation. */ typedef struct ngf_buffer_bind_info { ngf_buffer buffer; /**< Which buffer to bind.*/ size_t offset; /**< Offset at which to bind the buffer.*/ size_t range; /**< Bound range.*/ } ngf_buffer_bind_info; /** * @struct ngf_image_sampler_bind_info * \ingroup ngf * Specifies an image and/or sampler resource bind operation. To bind a combined image sampler, both * fields have to be set. */ typedef struct ngf_image_sampler_bind_info { bool is_image_view; union { ngf_image image; ngf_image_view view; } resource; /**< The image OR image view to bind. Can be NULL if binding just a sampler. */ ngf_sampler sampler; /**< The sampler to bind. Can be NULL if binding just an image. */ } ngf_image_sampler_bind_info; /** * @struct ngf_resource_bind_op * \ingroup ngf * * Specifies a resource binding operation. * * The resource binding model in nicegraf is similar to that of Vulkan. Shaders group their * resources into "sets", and individual slot within thosse sets are referred to as "bindings". * The main difference in nicegraf is that one does not have to explicitly allocate descriptor pools * like in Vulkan. Instead, the application code simply says which set and binding to assign a * particular resource to. Internally, some optimization may be performed to avoid redundant binds. * For backends that don't have a similar resource binding model (e.g. Metal), a special comment * musst be added to the shader code that maps the backend's "native" binding model onto this one. * See \ref ngf_shader_stage_info::content for more details on that. */ typedef struct ngf_resource_bind_op { uint32_t target_set; /**< Target set ID. */ uint32_t target_binding; /**< Target binding ID. */ ngf_descriptor_type type; /**< The type of the resource being bound. */ union { ngf_buffer_bind_info buffer; ngf_texel_buffer_view texel_buffer_view; ngf_image_sampler_bind_info image_sampler; uintptr_t acceleration_structure; /**< The opaque handle to the acceleration structure. */ } info; /**< The details about the resource being bound, depending on type. */ uint32_t array_index; /**< Specifies the destination array index for bindings that are arrays. */ } ngf_resource_bind_op; /** * @enum ngf_present_mode * \ingroup ngf * Enumerates possible presentation modes. * "Presentation mode" refers to the particular way the CPU, * GPU and the presentation engine interact. Some of the listed presentation modes * may not be supported on various backend, hardware or OS combinations. If an * unsupported mode is requested, nicegraf silently falls back onto \ref NGF_PRESENTATION_MODE_FIFO. */ typedef enum ngf_present_mode { /** * \ingroup ngf * * This is the only presentation mode that is guaranteed to be supported. * In this mode, the presentation requests are queued internally, and the * presentation engine waits for the vertical blanking signal to present * the image at the front of the queue. This mode guarantees no * frame tearing. */ NGF_PRESENTATION_MODE_FIFO, /** * \ingroup ngf * * In this mode, the presentation engine does not wait for the vertical blanking signal, instead * presenting an image immediately. This mode results in lower latency but may induce frame * tearing. It is not recommended to use this mode on mobile targets. */ NGF_PRESENTATION_MODE_IMMEDIATE } ngf_present_mode; /** * Enumerates color spaces for swapchain images. * Check \ref ngf_device_capabilities::colorspace_support to determine whether a particular color * space is supported. */ typedef enum ngf_colorspace { NGF_COLORSPACE_SRGB_NONLINEAR = 0u, NGF_COLORSPACE_EXTENDED_SRGB_NONLINEAR, NGF_COLORSPACE_EXTENDED_SRGB_LINEAR, NGF_COLORSPACE_DISPLAY_P3, NGF_COLORSPACE_DISPLAY_P3_LINEAR, NGF_COLORSPACE_DCI_P3, NGF_COLORSPACE_ITUR_BT2020, NGF_COLORSPACE_ITUR_BT2100_PQ, NGF_COLORSPACE_COUNT } ngf_colorspace; /** * @struct ngf_swapchain_info * \ingroup ngf * Swapchain configuration. */ typedef struct ngf_swapchain_info { ngf_image_format color_format; /**< Swapchain image format. */ ngf_colorspace colorspace; /**< Colorspace that the swapchain image uses. */ ngf_image_format depth_format; /**< Format to use for the depth buffer, if set to NGF_IMAGE_FORMAT_UNDEFINED, no depth buffer will be created. */ ngf_sample_count sample_count; /**< Number of samples per pixel (0 for non-multisampled) */ uint32_t capacity_hint; /**< Number of images in swapchain (may be ignored)*/ uint32_t width; /**< Width of swapchain images in pixels. */ uint32_t height; /**< Height of swapchain images in pixels. */ ngf_present_mode present_mode; /**< Desired present mode. */ uintptr_t native_handle; /**< HWND, ANativeWindow, NSWindow, etc. */ bool enable_compute_access; /**< Whether to enable access to swapchain images from compute stage. */ } ngf_swapchain_info; /** * @struct ngf_context * \ingroup ngf * An opaque handle to a nicegraf rendering context. * * A context represents the internal state of the library that is required for * performing most of the library's functionality. This includes, but is not * limited to: presenting rendered content in a window; creating and managing * resources, such as images, buffers and command buffers; recording and * submitting command buffers. * * Most operations, with the exception of `ngf_init` and context management * functions themelves, require a context to be "current" on the calling * thread. * * Invoking `ngf_set_context` will make a context current on the calling * thread. Once a context is made current on a thread, it cannot be migrated to * another thread. * * The results of using resources created within one context, in another * context are undefined, unless the two contexts are explicitly configured to * share data. When contexts are configured as shared, resources created in one * can be used in the other, and vice versa. Notably, command buffers created * and recorded in one context, can be submitted in another, shared context. * * A context mainatins exclusive ownership of its swapchain (if it has one), * and even shared contexts cannot acquire, present or render to images from * that swapchain. * * See also: \ref ngf_context_info and \ref ngf_create_context. */ typedef struct ngf_context_t* ngf_context; /** * @struct ngf_context_info * \ingroup ngf * Configures a nicegraf rendering context. */ typedef struct ngf_context_info { /** * Configures the swapchain that the context will be presenting to. This * can be NULL if all rendering is done off-screen and the context never * presents to a window. */ const ngf_swapchain_info* swapchain_info; /** * A reference to another context; the newly created context shall be able to use the resources * (such as buffers and images) created within the given context, and vice versa Can be NULL. */ const ngf_context shared_context; } ngf_context_info; /** * @struct ngf_cmd_buffer_info * \ingroup ngf * Information about a command buffer. */ typedef struct ngf_cmd_buffer_info { uint32_t reserved; } ngf_cmd_buffer_info; /** * @struct ngf_cmd_buffer * \ingroup ngf * Encodes a series of rendering commands. * * Internally, a command buffer may be in any of the following five states: * - new; * - ready; * - recording; * - awaiting submission; * - submitted. * * Every newly created command buffer is in the "new" state. It can be * transitioned to the "ready" state by calling \ref ngf_start_cmd_buffer on it. * * When a command buffer is in the "ready" state, you may begin recording a new * series of rendering commands into it. * * Recording commands into a command buffer is performed using command * encoders. There are a few different types of encoders, supporting different * types of commands. * * A new encoder may be created for a command buffer only if the command buffer * is in either the "ready" or the "awaiting submission" state. * * Creating a new encoder for a command buffer transitions that command buffer * to the "recording" state. * * Finishing and disposing of an active encoder transitions its corresponding * command buffer into the "awaiting submission" state. * * The three rules above mean that a command buffer may not have more than * one encoder active at a given time. * * Once all of the desired commands have been recorded, and the command buffer * is in the "awaiting submission" state, the command buffer may be submitted * for execution via a call to \ref ngf_submit_cmd_buffers, which transitions it * into the "submitted" state. * * Submission may only be performed on command buffers that are in the * "awaiting submission" state. * * Once a command buffer is in the "submitted" state, it is * impossible to append any new commands to it. * It is, however, possible to begin recording a new, completely separate batch * of commands by calling \ref ngf_start_cmd_buffer which implicitly * transitions the buffer to the "ready" state if it is already "submitted". * This does not affect any previously submitted commands. * * Calling a command buffer function on a buffer that is in a state not * expected by that function will result in an error. For example, calling * \ref ngf_submit_cmd_buffers would produce an error on a buffer that is in * the "ready" state, since, according to the rules outlined above, * \ref ngf_submit_cmd_buffers expects command buffers to be in the "awaiting * submission" state. * */ typedef struct ngf_cmd_buffer_t* ngf_cmd_buffer; /** * @typedef ngf_frame_token * \ingroup ngf * A token identifying a frame of rendering. See \ref ngf_begin_frame and \ref ngf_end_frame for * details. */ typedef uintptr_t ngf_frame_token; /** * This is a special value used within the \ref ngf_device_capabilities structure * to indicate that a limit value (i.e. max texture size) is not known or not * relevant for the current backend. */ #define NGF_DEVICE_LIMIT_UNKNOWN (~0u) /** * @struct ngf_device_capabilities * \ingroup ngf * Contains information about various device features, limits, etc. Clients * shouldn't instantiate this structure. See \ref ngf_get_device_capabilities. */ typedef struct ngf_device_capabilities { /** * When binding uniform buffers, the specified offset must be * a multiple of this number. */ size_t uniform_buffer_offset_alignment; /** * When binding storage buffers, the specified offset must be a multiple of this number. */ size_t storage_buffer_offset_alignment; /** * When binding a uniform buffer, the specified range must not exceed * this value. */ size_t max_uniform_buffer_range; /** * When binding texel buffers, the specified offset must be * a multiple of this number. */ size_t texel_buffer_offset_alignment; /** * The maximum allowed number of vertex attributes per pipeline. */ size_t max_vertex_input_attributes_per_pipeline; /** * The maximum allowed number of sampled images (textures) per single * shader stage. Descriptors with type \ref NGF_DESCRIPTOR_IMAGE_AND_SAMPLER * and \ref NGF_DESCRIPTOR_TEXEL_BUFFER do count against this limit. */ size_t max_sampled_images_per_stage; /** * The maximum allowed number of sampler objects per single shader stage. * Descriptors with type \ref NGF_DESCRIPTOR_IMAGE_AND_SAMPLER do count against * this limit. */ size_t max_samplers_per_stage; /** * The maximum allowed number of uniform buffers per single shader stage. */ size_t max_uniform_buffers_per_stage; /** * This is the maximum number of _components_, across all inputs, for the fragment * stage. "Input component" refers to the individual components of an input vector. * For example, if the fragment stage has a single float4 input (vector of 4 floats), * then it has 4 input components. */ size_t max_fragment_input_components; /** * This is the maximum number of inputs for the fragment stage. */ size_t max_fragment_inputs; /** * Maximum allowed width of a 1D image. */ size_t max_1d_image_dimension; /** * Maximum allowed width, or height of a 2D image. */ size_t max_2d_image_dimension; /** * Maximum allowed width, height, or depth of a 3D image. */ size_t max_3d_image_dimension; /** * Maximum allowed width, or height of a cubemap. */ size_t max_cube_image_dimension; /** * Maximum allowed number of layers in an image. */ size_t max_image_layers; /** * Maximum number of color attachments that can be written to * during a render pass. */ size_t max_color_attachments_per_pass; /** * The maximum degree of sampler anisotropy. */ float max_sampler_anisotropy; /** * This flag is set to `true` if the platform supports [0; 1] * range for the clip-space z coordinate. nicegraf enforces clip-space * z to be in this range on all backends that support it. This ensures * better precision for near-field objects. * See the following for an in-depth explanation: * http://web.archive.org/web/20210829130722/https://developer.nvidia.com/content/depth-precision-visualized */ bool clipspace_z_zero_to_one; /** * This flag is set to true if the device supports cubemap arrays. */ bool cubemap_arrays_supported; /** * Bitmap representing multisample count support for framebuffer color attachments * For example, (framebuffer_color_sample_counts & 16) indicates support for 16 samples */ size_t framebuffer_color_sample_counts; /** * The highest supported sample count for framebuffer color attachments. * This value is derived from \ref framebuffer_color_sample_counts. */ ngf_sample_count max_supported_framebuffer_color_sample_count; /** * Bitmap representing multisample count support for framebuffer depth attachments * For example, (framebuffer_depth_sample_counts & 16) indicates support for 16 samples */ size_t framebuffer_depth_sample_counts; /** * The highest supported sample count for framebuffer depth attachments. * This value is derived from \ref framebuffer_depth_sample_counts. */ ngf_sample_count max_supported_framebuffer_depth_sample_count; /** * Bitmap representing multisample count support for color textures * For example, (texture_color_sample_counts & 16) indicates support for 16 samples */ size_t texture_color_sample_counts; /** * The highest supported sample count for color textures. * This value is derived from \ref texture_color_sample_counts. */ ngf_sample_count max_supported_texture_color_sample_count; /** * Bitmap representing multisample count support for depth textures * For example, (texture_depth_sample_counts & 16) indicates support for 16 samples */ size_t texture_depth_sample_counts; /** * The highest supported sample count for depth textures. * This value is derived from \ref texture_depth_sample_counts. */ ngf_sample_count max_supported_texture_depth_sample_count; /** * Indicates whether the device-local storage is also host visible. * Examples of cases where this may be supported are iGPU systems with unified memory, * or discrete GPUs with ReBAR enabled. * On systems with this capability, device-local storage can be mapped directly into * the host address space, removing the need for host-visible staging buffers in certain * cases. */ bool device_local_memory_is_host_visible; /** * Indicates whether the device is capable of inline raytracing. */ bool supports_inline_raytracing; } ngf_device_capabilities; /** * Maximum length of a device's name. * \ingroup ngf */ #define NGF_DEVICE_NAME_MAX_LENGTH (256u) /** * @struct ngf_device * Information about a rendering device. * See also: \ref ngf_get_device_list * \ingroup ngf */ typedef struct ngf_device { ngf_device_performance_tier performance_tier; /**< Device's performance tier. */ ngf_device_handle handle; /**< A handle to be passed to \ref ngf_initialize. */ /** * A string associated with the device. This is _not_ guaranteed to be unique per device. */ char name[NGF_DEVICE_NAME_MAX_LENGTH]; ngf_device_capabilities capabilities; /**< Device capabilities and limits. */ } ngf_device; /** * @struct ngf_image_write * Specifies an operation writing data from a source buffer into a mip level of an image. * * See \ref ngf_cmd_write_image. */ typedef struct ngf_image_write { size_t src_offset; /** < Data offset in the source buffer. */ ngf_offset3d dst_offset; /** < Offset in texels of the subregion to write. */ ngf_extent3d extent; /** < Size in texels of the subregion to write. */ uint32_t dst_level; /** < Destination mip level. */ uint32_t dst_base_layer; /** < Starting destination layer. */ uint32_t nlayers; /** < Number of layers to copy for the specified mip level. */ } ngf_image_write; #ifdef _MSC_VER #pragma endregion #pragma region ngf_function_declarations #endif /** * \ingroup ngf * * Obtains a list of rendering devices available to nicegraf. * * This function is not thread-safe. * The devices are not returned in any particular order, and the order is not guaranteed to be the * same every time the function is called. * @param devices pointer to a pointer to `const` \ref ngf_device. If not `NULL`, this will be * populated with a pointer to an array of \ref ngf_device instances, each containing data about a * rendering device available to the system. Callers should not attempt to free the returned * pointer. * @param ndevices pointer to a `uint32_t`. If not NULL, the number of available rendering devices * shall be written to the memory pointed to by this parameter. */ ngf_error ngf_get_device_list(const ngf_device** devices, uint32_t* ndevices) NGF_NOEXCEPT; /** * \ingroup ngf * * Initializes nicegraf. * * The client should call this function only once during the * entire lifetime of the application. This function is not thread safe. * @param init_info Initialization parameters. */ ngf_error ngf_initialize(const ngf_init_info* init_info) NGF_NOEXCEPT; /* * \ingroup ngf * * De-initializes nicegraf. * * The client should call this function only once during the * entire lifetime of the application. Must be called after * \ref ngf_initialize and after \ref ngf_destroy_context has * been called on every initialized \ref ngf_context. */ void ngf_shutdown() NGF_NOEXCEPT; /** * \ingroup ngf * * Creates a new \ref ngf_context. * * @param info The context configuration. */ ngf_error ngf_create_context(const ngf_context_info* info, ngf_context* result) NGF_NOEXCEPT; /** * \ingroup ngf * * Destroys the given \ref ngf_context. * * @param ctx The context to destroy. */ void ngf_destroy_context(ngf_context ctx) NGF_NOEXCEPT; /** * \ingroup ngf * * Adjust the images associated with the given context's swapchain. * This function must be called every time that the window the context's presenting to is resized. * It is up to the client application to detect the resize events and call this function. * Not calling this function on resize results in undefined behavior. * * @param ctx The context to operate on * @param new_width New window client area width in pixels * @param new_height New window client area height in pixels */ ngf_error ngf_resize_context(ngf_context ctx, uint32_t new_width, uint32_t new_height) NGF_NOEXCEPT; /** * \ingroup ngf * * Set the given nicegraf context as current for the calling thread. * * All subsequent rendering operations invoked from the calling thread shall affect * the given context. * * Once a context has been set as current on a thread, it cannot be migrated to * another thread. * * @param ctx The context to set as current. */ ngf_error ngf_set_context(ngf_context ctx) NGF_NOEXCEPT; /** * \ingroup ngf * Get the active nicegraf context associated with the calling thread. * * Returns NULL if no context associated with the calling thread exists. */ ngf_context ngf_get_context() NGF_NOEXCEPT; /** * \ingroup ngf * * Begin a frame of rendering. * * This function starts a frame of rendering in the calling thread's current context. * It generates a special token associated with the frame, which is required for recording * command buffers (see \ref ngf_start_cmd_buffer). * @param token A pointer to a \ref ngf_frame_token. The generated frame token shall be returned * here. */ ngf_error ngf_begin_frame(ngf_frame_token* token) NGF_NOEXCEPT; /** * \ingroup ngf * * End the current frame of rendering on the calling thread's context. * * @param token The frame token generated by the corresponding preceding call to \ref * ngf_begin_frame. */ ngf_error ngf_end_frame(ngf_frame_token token) NGF_NOEXCEPT; /** * \ingroup ngf * * Obtain a handle to the current swapchain image. * * The obtained handle should not be destroyed, or persisted across frames by the calling code. * Only use it to bind the current swapchain image as a resource accessed from the compute stage. * * @param token The frame token generated by the last call to \ref ngf_begin_frame. * @param result The pointer to the swapchain image handle shall be written here. */ ngf_error ngf_get_current_swapchain_image(ngf_frame_token token, ngf_image* result) NGF_NOEXCEPT; /** * \ingroup ngf * * @return A pointer to an \ref ngf_device_capabilities instance, or NULL, if no context is present * on the calling thread. */ const ngf_device_capabilities* ngf_get_device_capabilities(void) NGF_NOEXCEPT; /** * \ingroup ngf * * Creates a new shader stage object. * * @param stages Information required to construct the shader stage object. * @param result Pointer to where the handle to the newly created object will be returned. */ ngf_error ngf_create_shader_stage(const ngf_shader_stage_info* info, ngf_shader_stage* result) NGF_NOEXCEPT; /** * \ingroup ngf * * Destroys the given shader stage. * * @param stage The handle to the shader stage object to be destroyed. */ void ngf_destroy_shader_stage(ngf_shader_stage stage) NGF_NOEXCEPT; /** * \ingroup ngf * * Creates a new graphics pipeline object. * * @param info Information required to construct the graphics pipeline object. * @param result Pointer to where the handle to the newly created object will be returned. */ ngf_error ngf_create_graphics_pipeline( const ngf_graphics_pipeline_info* info, ngf_graphics_pipeline* result) NGF_NOEXCEPT; /** * \ingroup ngf * * Destroys the given graphics pipeline object. * * @param pipeline The handle to the pipeline object to be destroyed. */ void ngf_destroy_graphics_pipeline(ngf_graphics_pipeline pipeline) NGF_NOEXCEPT; /** * \ingroup ngf * * Creates a new compute pipeline object. * * @param info Information required to construct the compute pipeline object. * @param result Pointer to where the handle to the newly created object will be returned. */ ngf_error ngf_create_compute_pipeline( const ngf_compute_pipeline_info* info, ngf_compute_pipeline* result) NGF_NOEXCEPT; /** * \ingroup ngf * * Destroys the given compute pipeline object. * * @param pipeline The handle to the pipeline object to be destroyed. */ void ngf_destroy_compute_pipeline(ngf_compute_pipeline pipeline) NGF_NOEXCEPT; /** * \ingroup ngf * * Creates a new image object. * * @param info Information required to construct the image object. * @param result Pointer to where the handle to the newly created object will be returned. */ ngf_error ngf_create_image(const ngf_image_info* info, ngf_image* result) NGF_NOEXCEPT; /** * \ingroup ngf * * Destroys the given image object. * * @param image The handle to the image object to be destroyed. */ void ngf_destroy_image(ngf_image image) NGF_NOEXCEPT; /** * \ingroup ngf * * Creates a new image view object. * * @param info Information required to construct the image view object. * @param result Pointer to where the handle to the newly created object will be returned. */ ngf_error ngf_create_image_view(const ngf_image_view_info* info, ngf_image_view* result) NGF_NOEXCEPT; /** * \ingroup ngf * * Destroys the given image view object. * * @param image The handle to the image view object to be destroyed. */ void ngf_destroy_image_view(ngf_image_view image_view) NGF_NOEXCEPT; /** * \ingroup ngf * * Creates a new sampler object. * * @param info Information required to construct the sampler object. * @param result Pointer to where the handle to the newly created object will be returned. */ ngf_error ngf_create_sampler(const ngf_sampler_info* info, ngf_sampler* result) NGF_NOEXCEPT; /** * \ingroup ngf * * Destroys the given sampler object. * * @param ssampler The handle to the sampler object to be destroyed. */ void ngf_destroy_sampler(ngf_sampler sampler) NGF_NOEXCEPT; /** * \ingroup ngf * * Create a new rendertarget object. * * @param info Information required to construct the rendertarget object. * @param result Pointer to where the handle to the newly created object will be returned. */ ngf_error ngf_create_render_target(const ngf_render_target_info* info, ngf_render_target* result) NGF_NOEXCEPT; /** * \ingroup ngf * * Destroys the given render target. * * @param rendertarget The handle to the rendertarget object to be destroyed. */ void ngf_destroy_render_target(ngf_render_target rendertarget) NGF_NOEXCEPT; /** * \ingroup ngf * * Returns the handle to the \ref ngf_render_target associated with the the current context's * swapchain (aka the default render target). If the current context does not have a swapchain, the * result shall be null. Otherwise, it shall be a render target that has a color attachment * associated with the context's swapchain. If the swapchain was created with an accompanying depth * buffer, the render target shall have an attachment for that as well. * * The caller should not attempt to destroy the returned render target. It shall * be destroyed automatically, together with the parent context. */ ngf_render_target ngf_default_render_target() NGF_NOEXCEPT; /** * \ingroup ngf * * Returns the attachment descriptions for the default render target. The caller should not attempt * to free the returned pointer or modify the contents of the memory it points to. */ const ngf_attachment_descriptions* ngf_default_render_target_attachment_descs() NGF_NOEXCEPT; /** * \ingroup ngf * * Creates a new buffer object. * * @param info Information required to construct the buffer object. * @param result Pointer to where the handle to the newly created object will be written to. */ ngf_error ngf_create_buffer(const ngf_buffer_info* info, ngf_buffer* result) NGF_NOEXCEPT; /** * \ingroup ngf * * Destroys the given buffer object. * * @param buffer The handle to the buffer object to be destroyed. */ void ngf_destroy_buffer(ngf_buffer buffer) NGF_NOEXCEPT; /** * \ingroup ngf * * Maps a region of a given buffer to host memory. * * It is an error to bind a buffer that is currently mapped using any command. If a buffer that * needs to be bound is mapped, first call \ref ngf_buffer_flush_range to ensure any new data in the * mapped range becomes visible to the subsequent commands, then call \ref ngf_buffer_unmap. Writing * into any region that could be in use by previously submitted commands results in undefined * behavior. * * @param buf The handle to the buffer to be mapped. * @param offset The offset at which the mapped region starts, in bytes. It must * satisfy platform-specific alignment requirements. See, for example, \ref * ngf_device_capabilities::uniform_buffer_offset_alignment and \ref * ngf_device_capabilities::texel_buffer_offet_alignment. * @param size The size of the mapped region, in bytes. * @param flags A combination of flags from \ref ngf_buffer_map_flags. * @return A pointer to the mapped memory, or NULL if the buffer could not be mapped. */ void* ngf_buffer_map_range(ngf_buffer buf, size_t offset, size_t size) NGF_NOEXCEPT; /** * \ingroup ngf * * Ensures that any writes performed by the CPU into the mapped range are be visible to subsequently * submitted rendering commands executed by the rendering device. * @param ptr The handle to the buffer that needs to be flushed. * @param offset The offset, relative to the start of the mapped range, at which * the flushed region starts, in bytes. * @param size The size of the flushed region, in bytes. */ void ngf_buffer_flush_range(ngf_buffer buf, size_t offset, size_t size) NGF_NOEXCEPT; /** * \ingroup ngf * * Unmaps a previously mapped buffer. * * If multiple regions were mapped, all of them are unmapped. Any pointers returned by prior calls * to \ref ngf_buffer_map_range are invalidated. * * @param buf The buffer that needs to be unmapped. */ void ngf_buffer_unmap(ngf_buffer buf) NGF_NOEXCEPT; /** * \ingroup ngf * Creates a new texel buffer view object. * * @param info Information required to construct the texel buffer view object. * @param result Pointer to where the handle to the newly created object will be written to. */ ngf_error ngf_create_texel_buffer_view( const ngf_texel_buffer_view_info* info, ngf_texel_buffer_view* result) NGF_NOEXCEPT; /** * \ingroup ngf * * Destroys the given texel buffer view object. * * @param buffer The handle to the texel buffer view object to be destroyed. */ void ngf_destroy_texel_buffer_view(ngf_texel_buffer_view buf_view) NGF_NOEXCEPT; /** * \ingroup ngf * * Waits for all pending rendering commands to complete. * * Do not use this function lightly. It is expensive because it introduces a sync point between the * CPU and the rendering device. */ void ngf_finish(void) NGF_NOEXCEPT; /** * \ingroup ngf * Maximum size, in bytes, of the inline data block that may be set on an encoder via * \ref ngf_set_bytes / \ref ngf_set_compute_bytes. Matches Vulkan's portability floor for * push constants. Every pipeline created by nicegraf reserves a push-constant range of * this size, free for any shader to consume via `[[vk::push_constant]]`. */ #define NGF_MAX_ENCODER_INLINE_BYTES 128u /** * \ingroup ngf * Sets a small inline data block visible to subsequent draws in the underlying * command buffer. May be called before or after binding a pipeline; pushed * values persist across pipeline binds within the same encoder. * * `size_bytes` must be <= \ref NGF_MAX_ENCODER_INLINE_BYTES and a multiple of 4. * Returns \ref NGF_ERROR_INVALID_SIZE if either constraint is violated. * `data == NULL` or `size_bytes == 0` is a silent no-op. */ ngf_error ngf_set_bytes(ngf_render_encoder enc, const void* data, size_t size_bytes) NGF_NOEXCEPT; /** * \ingroup ngf * Compute counterpart of \ref ngf_set_bytes. */ ngf_error ngf_set_compute_bytes(ngf_compute_encoder enc, const void* data, size_t size_bytes) NGF_NOEXCEPT; /** * \ingroup ngf * * Marks the given resources as "read-only". Once a resource has been marked as read-only, * nicegraf's internal hazard-tracking operations may be omitted for it, improving CPU * performance. Performing any modifying operations on a resource that had previously been * marked as "read-only" results in undefined behaviour. * * @param img A pointer to an array of handles to images, which are to be marked as read-only. * @param nimgs The number of images to be marked as read-only. * @param bufs A pointer to an array of handles to buffers, which are to be marked as read-only. * @param nbufs The number of buffers to be marked as read-only. */ void ngf_mark_read_only(ngf_image* imgs, uint32_t nimgs, ngf_buffer* bufs, uint32_t nbufs) NGF_NOEXCEPT; /** * \ingroup ngf * * Creates a new command buffer. * * @param info The information required to create the new command buffer. * @param result Pointer to where the handle to the newly created command buffer will be returned. */ ngf_error ngf_create_cmd_buffer(const ngf_cmd_buffer_info* info, ngf_cmd_buffer* result) NGF_NOEXCEPT; /** * \ingroup ngf * * Destroys the given command buffer. * * If there is any work submitted via the given command buffer still pending on the rendering * device, it shall be executed asynchronously. Therefore, application code doesn't need to wait for * the commands associated with the command buffer to finish before it can safely dispose of the * command buffer. * * @param buffer The handle to the command buffer object to be destroyed. */ void ngf_destroy_cmd_buffer(ngf_cmd_buffer buffer) NGF_NOEXCEPT; /** * \ingroup ngf * * Resets the command buffer. * * Erases all the commands previously recorded into the given command buffer, * and prepares it for recording commands to be submitted within the frame * identified by the specified token. * * The command buffer is required to be in the "ready" state. * * @param buf The handle to the command buffer to operate on * @param token The token for the frame within which the recorded commands are going to be * submitted. */ ngf_error ngf_start_cmd_buffer(ngf_cmd_buffer buf, ngf_frame_token token) NGF_NOEXCEPT; /** * \ingroup ngf * * Submits the commands recorded in the given command buffers for execution. * All command buffers must be in the "awaiting submission" state, and shall be transitioned to the * "submitted" state. * * @param nbuffers The number of command buffers being submitted for execution. * @param bufs A pointer to a contiguous array of \ref nbuffers handles to command buffer objects to * be submitted for execution. */ ngf_error ngf_submit_cmd_buffers(uint32_t nbuffers, ngf_cmd_buffer* bufs) NGF_NOEXCEPT; /** * \ingroup ngf * * Begins a new render pass. * A render pass can be thought of as a sequence of rendering commands associated with a particular * render target. At the start of the pass, an "load operation" is performed for each attachment. The application code * may specify exactly what load operations to perform for each individual attachment. After that, all the * rendering commands are executed. Finally, at the end of the pass, a "store operation" is performed for each attachment. * Again, the application code may specify exactly which store operations to perform for each individual attachment. * @param buf The command buffer to operate on. Must be in the "ready" state, shall be transitioned * to the "recording" state. * @param pass_info Specifies the renderpass parameters, such as load and store operations. * @param enc Pointer to memory into which a handle to a render encoder will be returned. All the * commands associated with the renderpass must be recorder using that encoder. */ ngf_error ngf_cmd_begin_render_pass( ngf_cmd_buffer buf, const ngf_render_pass_info* pass_info, ngf_render_encoder* enc) NGF_NOEXCEPT; /** * \ingroup ngf * * Similar to \ref ngf_cmd_begin_render_pass, but with some choices pre-made: * - All color attachments of the render target are cleared to the specified color. * - Depth and stencil attachments are cleared to the specified respective values (if they are * present). * - The store action for any attachment that is not marked as "sampled from" (see \ref * ngf_attachment_description::is_sampled), is set to NGF_STORE_OP_DONTCARE. * - The store action for attachments marked as "sampled from", is set to NGF_STORE_OP_STORE. * @param buf The command buffer to operate on. * @param rt The handle to the render target to use for the pass. * @param clear_color_r The red component of the clear color to be used on color attachments. * Ignored for attachments that don't have that channel. * @param clear_color_g The red component of the clear color to be used on color attachments. * Ignored for attachments that don't have that channel. * @param clear_color_b The red component of the clear color to be used on color attachments. * Ignored for attachments that don't have that channel. * @param clear_color_a The red component of the clear color to be used on color attachments. * Ignored for attachments that don't have that channel. * @param clear_depth The value to clear the depth attachment to (if it is present). * @param clear_stencil The value to clear the stencil attachment to (if it is present). */ ngf_error ngf_cmd_begin_render_pass_simple( ngf_cmd_buffer buf, ngf_render_target rt, float clear_color_r, float clear_color_g, float clear_color_b, float clear_color_a, float clear_depth, uint32_t clear_stencil, ngf_render_encoder* enc) NGF_NOEXCEPT; /** * \ingroup ngf * * Ends a render pass. * * Disposes of the given render command encoder, transitioning its corresponding * command buffer to the "ready" state. */ ngf_error ngf_cmd_end_render_pass(ngf_render_encoder enc) NGF_NOEXCEPT; /** * \ingroup ngf * * Begins a transfer pass. * * A transfer pass is a sequence of commands that copy data. * * @param buf The handle to the command buffer to operate on. Must be in the "ready" * state, will be transitioned to the "recording" state. * @param pass_info Pointer to \ref ngf_xfer_pass_info specifying details about this transfer pass. * @param enc Pointer to memory where a handle to a transfer encoder shall be returned. All commands * associated with the transfer pass must be recorded using that encoder. */ ngf_error ngf_cmd_begin_xfer_pass( ngf_cmd_buffer buf, const ngf_xfer_pass_info* pass_info, ngf_xfer_encoder* enc) NGF_NOEXCEPT; /** * \ingroup ngf * * Ends a transfer pass. * * Disposes of the given transfer cmd encoder, transitioning its corresponding * command buffer to the "ready" state. */ ngf_error ngf_cmd_end_xfer_pass(ngf_xfer_encoder enc) NGF_NOEXCEPT; /** * \ingroup ngf * * Begins a compute pass. * * @param buf The handle of the command buffer to operate on. Must be in the "ready" * state, will be transitioned to the "recording" state. * @param pass_info A pointer to \ref ngf_compute_pass_info specifying details about this compute * pass. * @param enc Pointer to memory where a handle to a transfer encoder shall be returned. All commands * associated with the transfer pass must be recorded using that encoder. */ ngf_error ngf_cmd_begin_compute_pass( ngf_cmd_buffer buf, const ngf_compute_pass_info* pass_info, ngf_compute_encoder* enc) NGF_NOEXCEPT; /** * \ingroup ngf * * Ends a compute pass. * * Disposes of the given compute cmd encoder, transitioning its corresponding * command buffer to the "ready" state. */ ngf_error ngf_cmd_end_compute_pass(ngf_compute_encoder enc) NGF_NOEXCEPT; /** * \ingroup ngf * * Binds a graphics pipeline. */ void ngf_cmd_bind_gfx_pipeline(ngf_render_encoder buf, ngf_graphics_pipeline pipeline) NGF_NOEXCEPT; /** * \ingroup ngf * * Binds a compute pipeline. */ void ngf_cmd_bind_compute_pipeline(ngf_compute_encoder buf, ngf_compute_pipeline pipeline) NGF_NOEXCEPT; /** * \ingroup ngf * * Sets the viewport to be used in subsequent rendering commands. * The viewport defines a region of the destination framebuffer that the resulting rendering * is scaled to fit into. */ void ngf_cmd_viewport(ngf_render_encoder buf, const ngf_irect2d* r) NGF_NOEXCEPT; /** * \ingroup ngf * * Sets the scissor region to be used in the subsequent rendering commands. * The scissor defines a region of the framebuffer that can be affected by the rendering commands. * Any pixels outside of that region are not written to. */ void ngf_cmd_scissor(ngf_render_encoder enc, const ngf_irect2d* r) NGF_NOEXCEPT; /** * \ingroup ngf * * Sets the reference value to be used in stencil tests. */ void ngf_cmd_stencil_reference(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT; /** * \ingroup ngf * * Set the compare mask to be used in stencil tests. */ void ngf_cmd_stencil_compare_mask(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT; /** * \ingroup ngf * * Sets the stencil write mask. */ void ngf_cmd_stencil_write_mask(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT; /** * \ingroup ngf * * Configures a bias value to be added to the depth of each rasterized fragment. * Unclamped bias `b` is computed as follows: * * `b = const_scale * r + max_slope * slope_scale` * * where: * - `r` is a constant value dependent on the format of the depth buffer and other factors, * representing the minimum absolute difference between two rasterized depth values. * - `max_slope` is ideally the length of the depth function's gradient vector at the point * corresponding to the fragment (but can be approximated by `max(|dZ/dx|, |dZ/dy|)`. * * The final bias `B`, which is added to the fragment depth, is computed as follows: * * `B = clamp > 0.0f ? min(clamp, b) : (clamp < 0.0f ? max(clamp, b) : b)` * * Requires the bound pipeline to have depth bias enabled to have effect. * See \ref ngf_rasterization_info::enable_depth_bias. */ void ngf_cmd_set_depth_bias( ngf_render_encoder enc, float const_scale, float slope_scale, float clamp) NGF_NOEXCEPT; /** * \ingroup ngf * * Bind resources for shaders to access. See ngf_resource_bind_op for more information. * * @param enc The handle to the render encoder object to record the command into. * @param bind_operations A pointer to a contiguous array of \ref ngf_resource_bind_op objects. * @param nbinds The number of elements in the array pointed to by \ref bind_operations. */ void ngf_cmd_bind_resources( ngf_render_encoder enc, const ngf_resource_bind_op* bind_operations, uint32_t nbind_operations) NGF_NOEXCEPT; /** * \ingroup ngf * * Bind resources for shaders to access. See ngf_resource_bind_op for more information. * * @param enc The handle to the render encoder object to record the command into. * @param bind_operations A pointer to a contiguous array of \ref ngf_resource_bind_op objects. * @param nbinds The number of elements in the array pointed to by \ref bind_operations. */ void ngf_cmd_bind_compute_resources( ngf_compute_encoder enc, const ngf_resource_bind_op* bind_operations, uint32_t nbind_operations) NGF_NOEXCEPT; /** * \ingroup ngf * * Binds a vertex attribute buffer to be used in the next draw. * * @param enc The handle to the render encoder. * @param vbuf The handle to the vertex buffer to bind. * @param binding The vertex buffer binding ID to bind the buffer to. * @param offset The offset (in bytes) to bind at. */ void ngf_cmd_bind_attrib_buffer( ngf_render_encoder enc, ngf_buffer vbuf, uint32_t binding, size_t offset) NGF_NOEXCEPT; /** * \ingroup ngf * * Binds an index buffer to be used in the next draw. * * @param enc The handle to the render encoder. * @param idxbuf The handle to the index buffer to bind. * @param offset The offset at which to bind the buffer (in bytes). * @param index_type The type of values that are stored in the index buffer. Can be either \ref * NGF_TYPE_UINT32 or \ref NGF_TYPE_UINT16. */ void ngf_cmd_bind_index_buffer( ngf_render_encoder enc, ngf_buffer idxbuf, size_t offset, ngf_type index_type) NGF_NOEXCEPT; /** * \ingroup ngf * * Executes a draw. * This command is not supported by compute-type command buffers. * * @param enc The render encoder to record the command into. * @param indexed Indicates whether the draw uses an index buffer or not. * @param first_element Offset of the first vertex. * @param nelements Number of vertices to process. * @param ninstance Number of instances (use `1` for regular non-instanced draws). */ void ngf_cmd_draw( ngf_render_encoder enc, bool indexed, uint32_t first_element, uint32_t nelements, uint32_t ninstances) NGF_NOEXCEPT; /** * \ingroup ngf * * Encodes a compute shader dispatch. * This command is not supported by draw-type buffers. * * @param enc The encoder to record the command into. * @param x_threadgroups Number of threadgroups along the X dimension of the grid. * @param y_threadgroups Number of threadgroups along the Y dimension of the grid. * @param z_threadgroups Number of threadgroups along the Z dimension of the grid. */ void ngf_cmd_dispatch( ngf_compute_encoder enc, uint32_t x_threadgroups, uint32_t y_threadgroups, uint32_t z_threadgroups) NGF_NOEXCEPT; /** * \ingroup ngf * * Copies data between buffers. * * @param enc The handle to the transfer encoder object to record the command into. * @param src The handle to the buffer object to be copied from. * @param dst The handle to the buffer object to be copied into. * @param size The size of the copied region, in bytes. * @param src_offset The offset in the source buffer to copy from. * @param dst_offset The offset in the destination buffer to copy into. */ void ngf_cmd_copy_buffer( ngf_xfer_encoder enc, ngf_buffer src, ngf_buffer dst, size_t size, size_t src_offset, size_t dst_offset) NGF_NOEXCEPT; /** * \ingroup ngf * * Copies data from a buffer into an image. * * For non-compressed formats, the source data is assumed to be arranged in a simple linear layout. * Cubemap faces and layers are assumed to be stored successively in the source buffer, from first * to last. For each layer, the first texel corresponds to the lower left corner of the image, and * the subsequent texels progress from left to right, through the remainder of the bottom row, and * from then on, through higher rows. * * @param enc The handle to the transfer encoder object to record the command into. * @param src The handle to the buffer object to be copied from. * @param dst The image that the data from the buffer shall be written into. * @param writes A pointer to an array of \ref ngf_image_write objects, each describing a write to a * mip level of the image to be written. * @param nwrites Number of objects in the `writes` array. */ void ngf_cmd_write_image( ngf_xfer_encoder enc, ngf_buffer src, ngf_image dst, const ngf_image_write* writes, uint32_t nwrites) NGF_NOEXCEPT; /** * \ingroup ngf * * Copies data from an image to a buffer. * * @param enc The handle to the transfer encoder object to record the command into. * @param src Reference to the image region that shall be copied from. * @param src_offset The offset in the source image from which to start copying. * @param extent The size of the region in the source mip level being copied. * @param nlayers The number of layers to be copied. * @param dst Reference to the image region that shall be written to. * @param dst_offset Offset within the target mip level to write to (in texels). */ void ngf_cmd_copy_image_to_buffer( ngf_xfer_encoder enc, const ngf_image_ref src, ngf_offset3d src_offset, ngf_extent3d src_extent, uint32_t nlayers, ngf_buffer dst, size_t dst_offset) NGF_NOEXCEPT; /** * \ingroup ngf * * Generates mipmaps automatically. * * Mipmaps are generated for all layers of the given image, from level 1 to the the maximum level * specified when creating the image, using the data from the preceding level as the source. Level 0 * of each layer is expected to be populated by the application code prior to calling this function. * * @param xfenc A transfer command encoder. * @param img The handle to the image to operate on. */ ngf_error ngf_cmd_generate_mipmaps(ngf_xfer_encoder xfenc, ngf_image img) NGF_NOEXCEPT; /** * \ingroup ngf * * Records the beginning of a "debug group" into the given command buffer. * * Debug groups are a way to group together related commands for easier vieweing in graphics * debugging tools such as RenderDoc. They do not have any other functional impact. Debug groups * have to be enabled during initialization. See \ref ngf_diagnostic_info. * * This command records a marker into the given command buffer indicating that the subsequent * commands recorded into the buffer pertain to a certain debug group. * * @param cmd_buffer the command buffer to record the debug group start marker into. * @param name The name of the debug group that will appear in debugging tools. */ void ngf_cmd_begin_debug_group(ngf_cmd_buffer cmd_buffer, const char* name) NGF_NOEXCEPT; /** * \ingroup ngf * * Records the end of a "debug group" into the given command buffer. * * This command records a marker into the given command buffer that terminates the current debug * group if there is one. Subsequent commands recorded into the buffer shall not pertain to any * debug group until a new one is started. * * @param cmd_buffer The command buffer to record the debug group end marker into. */ void ngf_cmd_end_current_debug_group(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT; /** * \ingroup ngf * Triggers RenderDoc Capture. * * Captures the next frame from the active window in the current context. * If called, subsequent calls to \ref ngf_renderdoc_capture_begin and \ref * ngf_renderdoc_capture_end will do nothing until after the next frame that * ngf_renderdoc_capture_next_frame was called (i.e. you cannot do nested captures). */ void ngf_renderdoc_capture_next_frame() NGF_NOEXCEPT; /** * \ingroup ngf * Begins RenderDoc Capture. * * Begins frame capture for the active window in the current context. * Ended by \ref ngf_renderdoc_capture_end. */ void ngf_renderdoc_capture_begin() NGF_NOEXCEPT; /** * \ingroup ngf * Triggers RenderDoc Capture. * * Ends frame capture for the active window in the current context. */ void ngf_renderdoc_capture_end() NGF_NOEXCEPT; #ifdef _MSC_VER #pragma endregion #endif #ifdef __cplusplus } #endif ================================================ FILE: misc/common/CMakeLists.txt ================================================ #[[ Copyright (c) 2023 nicegraf contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ]] cmake_minimum_required(VERSION 3.23.3) project(nicegraf-misc-common) set(CMAKE_C_STANDARD 99) set(CMAKE_CXX_STANDARD 20) include("${CMAKE_CURRENT_LIST_DIR}/../../build-utils.cmake") if (WIN32) set(NICESHADE_PLATFORM win PARENT_SCOPE) set(NGF_BACKEND nicegraf-vk) elseif(APPLE) set(NICESHADE_PLATFORM macos PARENT_SCOPE) if (NGF_USE_MVK STREQUAL "yes") set(NGF_BACKEND nicegraf-vk) else() set(NGF_BACKEND nicegraf-mtl) endif() elseif(UNIX AND NOT APPLE) set(NICESHADE_PLATFORM linux PARENT_SCOPE) set(NGF_BACKEND nicegraf-vk) else() message(FATAL_ERROR "Your platform is not currently supported by nicegraf.") endif() nmk_header_library(NAME nicegraf PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/../../include PUB_DEPS ${NGF_BACKEND}) string(TOUPPER "NGF_BACKEND_${NGF_BACKEND}" NGF_BACKEND_DEFINE) string(REPLACE "-" "_" NGF_BACKEND_DEFINE ${NGF_BACKEND_DEFINE}) if (NGF_BUILD_SAMPLES STREQUAL "yes") nmk_static_library(NAME nicegraf-misc-common SRCS ${CMAKE_CURRENT_LIST_DIR}/shader-loader.h ${CMAKE_CURRENT_LIST_DIR}/shader-loader.cpp ${CMAKE_CURRENT_LIST_DIR}/file-utils.h ${CMAKE_CURRENT_LIST_DIR}/file-utils.cpp ${CMAKE_CURRENT_LIST_DIR}/mesh-loader.cpp ${CMAKE_CURRENT_LIST_DIR}/mesh-loader.h ${CMAKE_CURRENT_LIST_DIR}/targa-loader.cpp ${CMAKE_CURRENT_LIST_DIR}/targa-loader.h ${CMAKE_CURRENT_LIST_DIR}/logging.h DEPS nicegraf PVT_DEFINES ${NGF_BACKEND_DEFINE} PUB_INCLUDES ${CMAKE_CURRENT_LIST_DIR}/) endif() ================================================ FILE: misc/common/check.h ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include "logging.h" #include #include #include #pragma warning(disable:26812) namespace ngf_misc { #define NGF_MISC_CHECK_NGF_ERROR(expr) \ { \ const ngf_error err = (expr); \ if (err != NGF_ERROR_OK) { \ ::ngf_misc::loge("nicegraf error %d (file %s line %d), aborting.\n", err, __FILE__, __LINE__); \ fflush(stderr); \ abort(); \ } \ } #define NGF_MISC_ASSERT(expr) \ { \ if (!(expr)) { \ ::ngf_misc::loge("assertion %s failed (file %s line %d)\n", #expr, __FILE__, __LINE__); \ fflush(stderr); \ abort(); \ } \ } } // namespace ngf_misc ================================================ FILE: misc/common/file-utils.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "file-utils.h" #include #include namespace ngf_misc { std::vector load_file(const char* file_name) { std::basic_ifstream fs(file_name, std::ios::binary | std::ios::in); if (!fs.is_open()) { throw std::runtime_error{ file_name }; } return std::vector { std::istreambuf_iterator(fs), std::istreambuf_iterator() }; } } // namespace ngf_common ================================================ FILE: misc/common/file-utils.h ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include #include #if defined(_WIN64) || defined(_WIN32) #define NGF_MISC_PATH_SEPARATOR "\\" #else #define NGF_MISC_PATH_SEPARATOR "/" #endif namespace ngf_misc { std::vector load_file(const char* file_name); } ================================================ FILE: misc/common/logging.h ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include #include namespace ngf_misc { inline void vlog_msg(char prefix, const char* fmt, va_list args) { auto file = prefix == 'E' ? stderr : stdout; fprintf(file, "\n[%c] ", prefix); vfprintf(file, fmt, args); fprintf(file, "\n"); } inline void vloge(const char* fmt, va_list args) { vlog_msg('E', fmt, args); } inline void vlogi(const char* fmt, va_list args) { vlog_msg('I', fmt, args); } inline void vlogd(const char* fmt, va_list args) { vlog_msg('D', fmt, args); } inline void loge(const char* fmt, ...) { va_list args; va_start(args, fmt); vloge(fmt, args); va_end(args); } inline void logi(const char* fmt, ...) { va_list args; va_start(args, fmt); vlogi(fmt, args); va_end(args); } inline void logd(const char* fmt, ...) { #if !defined(NDEBUG) va_list args; va_start(args, fmt); vlogd(fmt, args); va_end(args); #else (void)fmt; #endif } } // namespace ngf_misc ================================================ FILE: misc/common/mesh-loader.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define _CRT_SECURE_NO_WARNINGS #include "mesh-loader.h" #include "check.h" #include #include namespace ngf_misc { static void read_into_mapped_buffer(FILE* f, ngf_buffer buf, size_t data_size) { void* mapped_buffer_mem = ngf_buffer_map_range(buf, 0u, data_size); const size_t read_elements = fread(mapped_buffer_mem, sizeof(char), data_size, f); NGF_MISC_ASSERT(read_elements == data_size); ngf_buffer_flush_range(buf, 0, data_size); ngf_buffer_unmap(buf); } mesh load_mesh_from_file(const char* mesh_file_name, ngf_xfer_encoder xfenc) { mesh result; FILE* mesh_file = fopen(mesh_file_name, "rb"); NGF_MISC_ASSERT(mesh_file != NULL); /* Indicates to skip staging buffers and copy directly to device-local memory if possible. */ const bool skip_staging = ngf_get_device_capabilities()->device_local_memory_is_host_visible; /** * Read the "file header" - 4-byte field with the lowest bit indicating * the presence of normals, and the second-lowest bit indicating the * presence of UV coordinates (position attribute is always assumed). */ uint32_t header = 0u; size_t read_elements = 0u; read_elements = fread(&header, sizeof(header), 1u, mesh_file); NGF_MISC_ASSERT(read_elements == 1u); result.have_normals = header & 1; result.have_uvs = header & 2; /** * Read the total size of the vertex data. Depending on device capabilities, * read it all directly into the GPU buffer, or read into a staging buffer. */ uint32_t vertex_data_size = 0u; read_elements = fread(&vertex_data_size, sizeof(vertex_data_size), 1u, mesh_file); NGF_MISC_ASSERT(read_elements == 1u); const ngf_buffer_info vertex_data_staging_buffer_info = { .size = vertex_data_size, .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE, .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC, }; ngf::buffer vertex_data_staging_buffer; if (!skip_staging) { NGF_MISC_CHECK_NGF_ERROR( vertex_data_staging_buffer.initialize(vertex_data_staging_buffer_info)); } const ngf_buffer_info vertex_data_buffer_info = { .size = vertex_data_staging_buffer_info.size, .storage_type = skip_staging ? NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABLE : NGF_BUFFER_STORAGE_DEVICE_LOCAL, .buffer_usage = NGF_BUFFER_USAGE_VERTEX_BUFFER | NGF_BUFFER_USAGE_XFER_DST, }; NGF_MISC_CHECK_NGF_ERROR(result.vertex_data.initialize(vertex_data_buffer_info)); read_into_mapped_buffer( mesh_file, skip_staging ? result.vertex_data.get() : vertex_data_staging_buffer.get(), vertex_data_size); /** * Read the number of indices in the mesh. If number of indices is 0, the * mesh is considered to not have an index buffer, and a non-indexed draw call * should be used to render it. */ read_elements = fread(&result.num_indices, sizeof(uint32_t), 1, mesh_file); NGF_MISC_ASSERT(read_elements == 1u); /** * Allocate buffer(s) for the index data, and read the index data. * As before, we try to read directly into the GPU buffer if the device allows it. */ ngf::buffer index_data_staging_buffer; const ngf_buffer_info index_data_staging_buffer_info = { .size = sizeof(uint32_t) * result.num_indices, .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE, .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC, }; const ngf_buffer_info index_data_buffer_info = { .size = sizeof(uint32_t) * result.num_indices, .storage_type = skip_staging ? NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABLE : NGF_BUFFER_STORAGE_DEVICE_LOCAL, .buffer_usage = NGF_BUFFER_USAGE_INDEX_BUFFER | NGF_BUFFER_USAGE_XFER_DST, }; if (result.num_indices > 0) { NGF_MISC_CHECK_NGF_ERROR(result.index_data.initialize(index_data_buffer_info)); if (!skip_staging) { NGF_MISC_CHECK_NGF_ERROR( index_data_staging_buffer.initialize(index_data_staging_buffer_info)); } read_into_mapped_buffer( mesh_file, skip_staging ? result.index_data.get() : index_data_staging_buffer.get(), index_data_staging_buffer_info.size); } /** * Record commands to upload staging data if we have to. */ if (!skip_staging) { ngf_cmd_copy_buffer( xfenc, vertex_data_staging_buffer.get(), result.vertex_data.get(), vertex_data_buffer_info.size, 0u, 0u); if (result.num_indices > 0) { ngf_cmd_copy_buffer( xfenc, index_data_staging_buffer.get(), result.index_data.get(), index_data_staging_buffer_info.size, 0u, 0u); } } return result; } } // namespace ngf_samples ================================================ FILE: misc/common/mesh-loader.h ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include namespace ngf_misc { struct mesh { ngf::buffer vertex_data; ngf::buffer index_data; uint32_t num_indices; bool have_normals; bool have_uvs; }; mesh load_mesh_from_file(const char* file_name, ngf_xfer_encoder xfenc); } ================================================ FILE: misc/common/shader-loader.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "shader-loader.h" #include "file-utils.h" #include "check.h" #include #include namespace ngf_misc { #if defined(NGF_BACKEND_NICEGRAF_VK) #define SHADER_EXTENSION ".spv" #elif defined(NGF_BACKEND_NICEGRAF_MTL) || defined(NGF_BACKEND_NICEGRAF_MTL_CPP) #define SHADER_EXTENSION ".21.msl" #else #error "build system needs to define samples backend" #endif ngf::shader_stage load_shader_stage(const char* shader_file_name, const char* entry_point_name, ngf_stage_type type) { constexpr const char* shaders_root_dir = "shaders" NGF_MISC_PATH_SEPARATOR; constexpr const char* stage_to_file_ext_map[] = {"vs", "ps", "cs"}; const std::string file_name = shaders_root_dir + std::string(shader_file_name) + "." + stage_to_file_ext_map[type] + SHADER_EXTENSION; const std::vector content = load_file(file_name.c_str()); ngf_shader_stage_info stage_info = { .type = type, .content = reinterpret_cast(content.data()), .content_length = (uint32_t)content.size(), .debug_name = "", .entry_point_name = entry_point_name}; ngf::shader_stage stage; NGF_MISC_CHECK_NGF_ERROR(stage.initialize(stage_info)); return stage; } } // namespace ngf_misc ================================================ FILE: misc/common/shader-loader.h ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include namespace ngf_misc { ngf::shader_stage load_shader_stage(const char* shader_file_name, const char* entry_point_name, ngf_stage_type type); } ================================================ FILE: misc/common/targa-loader.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "targa-loader.h" #include #include #include namespace ngf_misc { namespace tga { /* image type constants */ enum class img_type : uint8_t { none = 0, color_mapped = 1, true_color = 2, black_and_white = 3, color_mapped_rle = 9, true_color_rle = 10, black_and_white_rle = 11 }; /* targa structures */ #pragma pack(push, 1) struct cmap { uint16_t first_entry_idx; uint16_t num_entries; uint8_t bits_per_entry; }; struct image { uint16_t x_origin; uint16_t y_origin; uint16_t width; uint16_t height; uint8_t bitsperpel; uint8_t descriptor; }; struct header { uint8_t id_length; uint8_t has_cmap; img_type type; cmap cmap_entry; image img; }; struct footer { uint32_t ext_offset; uint32_t dev_offset; char sig[18]; }; #pragma pack(pop) } // namespace tga namespace { float srgb_to_linear(uint8_t srgb_value) { const float srgb_valuef = (float)srgb_value / 255.0f; return srgb_valuef <= 0.04045f ? (srgb_valuef / 12.92f) : powf(((srgb_valuef + 0.055f) / 1.055f), 2.4f); } uint8_t linear_to_srgb(float linear_value) { const float srgb_valuef = linear_value <= 0.0031308f ? (12.92f * linear_value) : (1.055f * powf(linear_value, 1.0f / 2.4f) - 0.055f); return (uint8_t)(std::min(1.0f, srgb_valuef) * 255.0f); } } // namespace void load_targa( const void* in_buf, size_t in_buf_size, void* out_buf, size_t out_buf_size, uint32_t* width_px, uint32_t* height_px) { auto in_bytes = (const char*)in_buf; auto out_bytes = (char*)out_buf; /* obtain header and footer data. */ auto hdr = (const tga::header*)in_buf; auto ftr = (const tga::footer*)&in_bytes[in_buf_size - sizeof(tga::footer)]; /* write width and height outputs. */ *width_px = hdr->img.width; *height_px = hdr->img.height; /* if the output buffer pointer is null, we're done. */ if (out_buf == nullptr) { return; } /* compute expected output size and check if it fits into the provided output buffer. */ const size_t expected_output_size = 4u * hdr->img.width * hdr->img.height; if (expected_output_size > out_buf_size) { throw std::runtime_error("buffer overflow"); } /* verify that footer is valid. */ const char* expected_sig = "TRUEVISION-XFILE."; for (size_t si = 0; si < sizeof(ftr->sig); ++si) { if (ftr->sig[si] != expected_sig[si]) { throw std::runtime_error("tga signature not found"); } } /* only rle-encoded true-color images are allowed. */ if (hdr->type != tga::img_type::true_color_rle) { throw std::runtime_error("unsupported tga feature detected"); } const bool has_alpha = (hdr->img.descriptor & 0x08) != 0; /* obtain extension data offset. */ const size_t ext_offset = ftr->ext_offset; /* read 'attributes type' field to determine whether alpha is premultiplied. if no extension section is present, assume non-premultiplied alpha. */ const char attr_type = !has_alpha || ext_offset == 0 ? 3 : in_bytes[ext_offset + 494]; if (attr_type != 3 && attr_type != 4) { throw std::runtime_error("invalid attribute type"); } const bool is_premul_alpha = attr_type == 4; /* read and decode image data, writing result to output. */ const char* img_data = in_bytes + sizeof(tga::header) + hdr->id_length; size_t written_pixels = 0; const size_t bytes_per_pel = has_alpha ? 4 : 3; while (written_pixels < hdr->img.width * hdr->img.height && img_data - in_bytes < (ptrdiff_t)in_buf_size) { const char packet_hdr = *img_data; const bool is_rle_packet = packet_hdr & 0x80; const size_t packet_length = 1u + (packet_hdr & 0x7f); ++img_data; /* advance img. data to point to start of packet data. */ for (size_t p = 0u; p < packet_length; ++p) { /* pixel data is stored as BGRA. */ const uint8_t a = has_alpha ? (uint8_t)img_data[3] : 0xff; const float af = (float)a / 255.0f; auto premul = [&](uint8_t v) { if (is_premul_alpha || !has_alpha) return v; else { /* need to convert from sRGB to linear, premultiply then convert back. */ const float linear = srgb_to_linear(v); const float linear_premul = linear * af; return linear_to_srgb(linear_premul); } }; const uint8_t b = premul((uint8_t)img_data[0]), g = premul((uint8_t)img_data[1]), r = premul((uint8_t)img_data[2]); out_bytes[written_pixels * 4u + 0] = (char)r; out_bytes[written_pixels * 4u + 1] = (char)g; out_bytes[written_pixels * 4u + 2] = (char)b; out_bytes[written_pixels * 4u + 3] = (char)a; ++written_pixels; if (!is_rle_packet) img_data += bytes_per_pel; } if (is_rle_packet) img_data += bytes_per_pel; } if (img_data - in_bytes >= (ptrdiff_t)in_buf_size) { throw std::runtime_error("buffer overflow"); } } } // namespace ngf_samples ================================================ FILE: misc/common/targa-loader.h ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include #include namespace ngf_misc { /** * Decodes an RLE-encoded true color targa file with an optional * alpha channel into the target buffer. * Assumes the source file uses sRGB color space. * If `out_buf` is non-NULL, raw RGBA values, in sRGB, with * premultiplied alpha, will be written to it. The width and * height of the image are returned in the output parameters. * If `out_buf` is NULL, no decoding is performed, however * the width and height of the image are still returned. */ void load_targa( const void* in_buf, size_t in_buf_size, void* out_buf, size_t out_buf_size, uint32_t* width_px, uint32_t* height_px); } // namespace ngf_misc ================================================ FILE: misc/shaders.cmake ================================================ function (ngf_shaders_target) cmake_parse_arguments(SHADERS_TARGET "" "NAME;OUTPUT_DIR;NICESHADE_PATH" "SRCS" ${ARGN}) foreach(source_path ${SHADERS_TARGET_SRCS}) file(STRINGS ${source_path} tech_lines REGEX "// *T *: *([a-zA-Z0-9_]+)") if (tech_lines) set(tech_names "") foreach(tech_line ${tech_lines}) string(REPLACE ":" ";" tmp ${tech_line}) list(GET tmp 1 tmp) string(STRIP "${tmp}" tmp) string(REGEX REPLACE " +" ";" tmp ${tmp}) list(GET tmp 0 tech_name) list(APPEND tech_names "${tech_name}") endforeach(tech_line) set(output_files_list "") get_filename_component(header_file_name ${source_path} NAME_WE) if(NOT ${header_file_name} MATCHES "compute-*") foreach(tech ${tech_names}) list(APPEND output_files_list "${SHADERS_TARGET_OUTPUT_DIR}/${tech}.vs.21.msl") list(APPEND output_files_list "${SHADERS_TARGET_OUTPUT_DIR}/${tech}.ps.21.msl") list(APPEND output_files_list "${SHADERS_TARGET_OUTPUT_DIR}/${tech}.vs.spv") list(APPEND output_files_list "${SHADERS_TARGET_OUTPUT_DIR}/${tech}.ps.spv") list(APPEND output_files_list "${SHADERS_TARGET_OUTPUT_DIR}/${tech}.pipeline") endforeach(tech) else() foreach(tech ${tech_names}) list(APPEND output_files_list "${SHADERS_TARGET_OUTPUT_DIR}/${tech}.cs.21.msl") list(APPEND output_files_list "${SHADERS_TARGET_OUTPUT_DIR}/${tech}.cs.spv") list(APPEND output_files_list "${SHADERS_TARGET_OUTPUT_DIR}/${tech}.pipeline") endforeach(tech) endif() list(APPEND output_files_list "${SHADERS_TARGET_OUTPUT_DIR}/${header_file_name}_binding_consts.h") add_custom_command(OUTPUT ${output_files_list} MAIN_DEPENDENCY ${source_path} COMMAND ${SHADERS_TARGET_NICESHADE_PATH}/niceshade ARGS ${source_path} "-t" "msl21" "-t" "spv" "-O" "${SHADERS_TARGET_OUTPUT_DIR}" "-h" "${header_file_name}_binding_consts.h") #WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/samples/shaders) set(generated_shaders_list "${output_files_list};${generated_shaders_list}") endif() endforeach(source_path) add_custom_target(${SHADERS_TARGET_NAME} DEPENDS ${generated_shaders_list}) endfunction() ================================================ FILE: samples/00-template/sample-impl.cpp ================================================ /** * Copyright (c) 2021 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "sample-interface.h" #include namespace ngf_samples { struct sample_data { uint32_t magic_number = 0xdeadbeef; }; void* sample_initialize(uint32_t , uint32_t ) { printf("sample initializing.\n"); auto d = new sample_data{}; d->magic_number = 0xbadf00d; printf("sample initialization complete.\n"); return static_cast(d); } void sample_draw_frame( ngf_frame_token , uint32_t , uint32_t , float , void* ) { //auto data = static_cast(userdata); //printf("drawing frame %d (w %d h %d) at time %f magic number 0x%x\n", frame_token, width, height, time, data->magic_number); } void sample_draw_ui(void*) { } void sample_shutdown(void* userdata) { auto data = static_cast(userdata); delete data; printf("shutting down\n"); } } ================================================ FILE: samples/01-fullscreen-triangle/fullscreen-triangle.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "check.h" #include "nicegraf-util.h" #include "nicegraf-wrappers.h" #include "sample-interface.h" #include "shader-loader.h" #include using namespace ngf_misc; namespace ngf_samples { namespace fullscreen_triangle { struct state { ngf::graphics_pipeline pipeline; }; } // namespace fullscreen_triangle void* sample_initialize( uint32_t, uint32_t, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder /*xfer_encoder*/) { auto state = new fullscreen_triangle::state {}; /** * Load the shader stages. * Note that these are only necessary when creating pipeline objects. * After the pipeline objects have been created, the shader stage objects * can be safely discarded. */ const ngf::shader_stage vertex_shader_stage = load_shader_stage("fullscreen-triangle", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage fragment_shader_stage = load_shader_stage("fullscreen-triangle", "PSMain", NGF_STAGE_FRAGMENT); /** * Prepare a template with some default values for pipeline initialization. */ ngf_util_graphics_pipeline_data pipeline_data; ngf_util_create_default_graphics_pipeline_data(&pipeline_data); /** * Set shader stages. */ pipeline_data.pipeline_info.nshader_stages = 2; pipeline_data.pipeline_info.shader_stages[0] = vertex_shader_stage.get(); pipeline_data.pipeline_info.shader_stages[1] = fragment_shader_stage.get(); /** * Set multisampling state. */ pipeline_data.multisample_info.sample_count = main_render_target_sample_count; /** * Set the compatible render target description. */ pipeline_data.pipeline_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); /** * Initialize the pipeline object. */ NGF_MISC_CHECK_NGF_ERROR(state->pipeline.initialize(pipeline_data.pipeline_info)); return static_cast(state); } void sample_draw_frame( ngf_render_encoder main_render_pass, float /*time_delta*/, ngf_frame_token /*token*/, uint32_t w, uint32_t h, float /*time*/, void* userdata) { auto state = static_cast(userdata); ngf_cmd_bind_gfx_pipeline(main_render_pass, state->pipeline.get()); const ngf_irect2d viewport {0, 0, w, h}; ngf_cmd_viewport(main_render_pass, &viewport); ngf_cmd_scissor(main_render_pass, &viewport); /** * Make a drawcall. */ ngf_cmd_draw(main_render_pass, false, 0, 3, 1); } void sample_pre_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_submit(void*) { } void sample_draw_ui(void*) { } void sample_shutdown(void* userdata) { auto state = static_cast(userdata); delete state; } } // namespace ngf_samples ================================================ FILE: samples/02-render-to-texture/render-to-texture.cpp ================================================ /** * Copyright (c) 2021 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "sample-interface.h" #include "nicegraf-wrappers.h" #include "nicegraf-util.h" #include "shader-loader.h" #include "check.h" #include using namespace ngf_misc; namespace ngf_samples { struct render_to_texture_data { ngf::render_target default_rt; ngf::render_target offscreen_rt; ngf::graphics_pipeline blit_pipeline; ngf::graphics_pipeline offscreen_pipeline; ngf::image rt_texture; ngf::sampler sampler; }; void* sample_initialize( uint32_t, uint32_t, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder /*xfer_encoder*/) { auto state = new render_to_texture_data{}; /* Create the image to render to. */ const ngf_extent3d img_size { 512u, 512u, 1u }; const ngf_image_info img_info { NGF_IMAGE_TYPE_IMAGE_2D, img_size, 1u, 1u, NGF_IMAGE_FORMAT_BGRA8_SRGB, NGF_SAMPLE_COUNT_1, NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_ATTACHMENT }; NGF_MISC_CHECK_NGF_ERROR(state->rt_texture.initialize(img_info)); /* Create the offscreen render target.*/ const ngf_attachment_description offscreen_color_attachment_description { .type = NGF_ATTACHMENT_COLOR, .format = NGF_IMAGE_FORMAT_BGRA8_SRGB, .sample_count = NGF_SAMPLE_COUNT_1, }; const ngf_attachment_descriptions attachments_list = { .descs = &offscreen_color_attachment_description, .ndescs = 1u, }; const ngf_image_ref img_ref = { .image = state->rt_texture.get(), .mip_level = 0u, .layer = 0u, .cubemap_face = NGF_CUBEMAP_FACE_COUNT }; ngf_render_target_info rt_info { &attachments_list, &img_ref }; NGF_MISC_CHECK_NGF_ERROR(state->offscreen_rt.initialize(rt_info)); /** * Load shader stages. */ const ngf::shader_stage blit_vertex_stage = load_shader_stage("simple-texture", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage blit_fragment_stage = load_shader_stage("simple-texture", "PSMain", NGF_STAGE_FRAGMENT); const ngf::shader_stage offscreen_vertex_stage = load_shader_stage("small-triangle", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage offscreen_fragment_stage = load_shader_stage("small-triangle", "PSMain", NGF_STAGE_FRAGMENT); /** * Create pipeline for blit. */ ngf_util_graphics_pipeline_data blit_pipeline_data; ngf_util_create_default_graphics_pipeline_data(&blit_pipeline_data); blit_pipeline_data.multisample_info.sample_count = main_render_target_sample_count; ngf_graphics_pipeline_info &blit_pipe_info = blit_pipeline_data.pipeline_info; blit_pipe_info.nshader_stages = 2u; blit_pipe_info.shader_stages[0] = blit_vertex_stage.get(); blit_pipe_info.shader_stages[1] = blit_fragment_stage.get(); blit_pipe_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); NGF_MISC_CHECK_NGF_ERROR(state->blit_pipeline.initialize(blit_pipe_info)); /** * Create pipeline for offscreen pass. */ ngf_util_graphics_pipeline_data offscreen_pipeline_data; ngf_util_create_default_graphics_pipeline_data(&offscreen_pipeline_data); ngf_graphics_pipeline_info &offscreen_pipe_info = offscreen_pipeline_data.pipeline_info; offscreen_pipe_info.nshader_stages = 2u; offscreen_pipe_info.shader_stages[0] = offscreen_vertex_stage.get(); offscreen_pipe_info.shader_stages[1] = offscreen_fragment_stage.get(); offscreen_pipe_info.compatible_rt_attachment_descs = &attachments_list; NGF_MISC_CHECK_NGF_ERROR(state->offscreen_pipeline.initialize(offscreen_pipe_info)); /* Create sampler.*/ const ngf_sampler_info samp_info { NGF_FILTER_LINEAR, NGF_FILTER_LINEAR, NGF_FILTER_NEAREST, NGF_WRAP_MODE_CLAMP_TO_EDGE, NGF_WRAP_MODE_CLAMP_TO_EDGE, NGF_WRAP_MODE_CLAMP_TO_EDGE, 0.0f, 0.0f, 0.0f, 1.0f, false }; NGF_MISC_CHECK_NGF_ERROR(state->sampler.initialize(samp_info)); return static_cast(state); } void sample_draw_frame( ngf_render_encoder main_render_pass, float /* time_delta */, ngf_frame_token frame_token, uint32_t w, uint32_t h, float , void* userdata) { auto state = reinterpret_cast(userdata); ngf_irect2d offsc_viewport {0, 0, 512, 512}; ngf_irect2d onsc_viewport {0, 0, w, h}; ngf_cmd_buffer offscr_cmd_buf = nullptr; ngf_cmd_buffer_info cmd_info = {}; ngf_create_cmd_buffer(&cmd_info, &offscr_cmd_buf); ngf_start_cmd_buffer(offscr_cmd_buf, frame_token); { ngf::render_encoder renc {offscr_cmd_buf, state->offscreen_rt, .0f, 0.0f, 0.0f, 0.0f, 1.0, 0u}; ngf_cmd_bind_gfx_pipeline(renc, state->offscreen_pipeline); ngf_cmd_viewport(renc, &offsc_viewport); ngf_cmd_scissor(renc, &offsc_viewport); ngf_cmd_draw(renc, false, 0u, 3u, 1u); } ngf_submit_cmd_buffers(1, &offscr_cmd_buf); ngf_destroy_cmd_buffer(offscr_cmd_buf); ngf_cmd_bind_gfx_pipeline(main_render_pass, state->blit_pipeline); ngf_cmd_viewport(main_render_pass, &onsc_viewport); ngf_cmd_scissor(main_render_pass, &onsc_viewport); ngf::cmd_bind_resources( main_render_pass, ngf::descriptor_set<0>::binding<1>::texture(state->rt_texture.get()), ngf::descriptor_set<0>::binding<2>::sampler(state->sampler.get())); ngf_cmd_draw(main_render_pass, false, 0u, 3u, 1u); } void sample_pre_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_draw_frame(ngf_cmd_buffer, void*) { } void sample_draw_ui(void*) {} void sample_post_submit(void*){} void sample_shutdown(void* userdata) { auto data = static_cast(userdata); delete data; printf("shutting down\n"); } } ================================================ FILE: samples/03-uniform-buffers/uniform-buffers.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "check.h" #include "nicegraf-util.h" #include "nicegraf-wrappers.h" #include "sample-interface.h" #include "shader-loader.h" #include #include #include using namespace ngf_misc; namespace ngf_samples { namespace uniform_buffers { struct shader_uniform_values { float scale_a = 0.0f; float scale_b = 0.5f; float time = 0.0f; float aspect = 1.0f; float theta = 0.0f; }; struct state { ngf::graphics_pipeline polygon_pipeline; ngf::buffer uniform_buffer; size_t uniform_buffer_offset = 0u; size_t aligned_uniform_data_size = 0u; shader_uniform_values uniform_values; int n = 6; float growth_speed = 1.f; bool growing = true; }; } // namespace uniform_buffers static float theta_for_n(int n) { return 2.0f * 3.1415926f / static_cast(n); } static float min_scale_for_ngon(int n) { float a = theta_for_n(n); return (1.0f - sinf(a) * tanf(a / 2.0f)); } void* sample_initialize( uint32_t, uint32_t, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder /*xfer_encoder*/) { auto state = new uniform_buffers::state {}; /** * Pre-initialize some uniform variables. */ state->uniform_values.scale_a = state->uniform_values.scale_b * min_scale_for_ngon(state->n); state->uniform_values.theta = theta_for_n(state->n); /** * Load shader stages. */ const ngf::shader_stage polygon_vertex_stage = load_shader_stage("polygon", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage polygon_fragment_stage = load_shader_stage("polygon", "PSMain", NGF_STAGE_FRAGMENT); /** * Create pipeline. */ ngf_util_graphics_pipeline_data polygon_pipeline_data; ngf_util_create_default_graphics_pipeline_data(&polygon_pipeline_data); polygon_pipeline_data.multisample_info.sample_count = main_render_target_sample_count; polygon_pipeline_data.rasterization_info.cull_mode = NGF_CULL_MODE_NONE; ngf_graphics_pipeline_info& polygon_pipe_info = polygon_pipeline_data.pipeline_info; polygon_pipe_info.nshader_stages = 2u; polygon_pipe_info.shader_stages[0] = polygon_vertex_stage.get(); polygon_pipe_info.shader_stages[1] = polygon_fragment_stage.get(); polygon_pipe_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); polygon_pipeline_data.input_assembly_info.primitive_topology = NGF_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; NGF_MISC_CHECK_NGF_ERROR(state->polygon_pipeline.initialize(polygon_pipe_info)); /** * Create the uniform buffer. * We need to write to the buffer every frame from the CPU. However, as we're preparing the * data for the next frame, the GPU might still be rendering the current frame. Modifying the * buffer at that time would lead to a data race. To avoid it, we employ a triple bufferinga * strategy: * - assume we need N bytes for the uniform buffer * - allocate 3*N bytes for the buffer; * - ensure that while GPU reads data at offset i*N, the CPU writes at ((i + 1) mod 3) * N. * This ensures that, as long as the CPU is no more than 2 frames ahead of the GPU, no * data races will happen. * * Note that the offset at which we read/write must have an alignment that is specific to the * GPU. That alignment can be obtained from ngf_get_device_capabilities(). */ const size_t uniform_buffer_offset_alignment = ngf_get_device_capabilities()->uniform_buffer_offset_alignment; const size_t requested_data_size = sizeof(uniform_buffers::shader_uniform_values); state->aligned_uniform_data_size = requested_data_size + (uniform_buffer_offset_alignment - requested_data_size % uniform_buffer_offset_alignment); const size_t uniform_buffer_size = 3 * state->aligned_uniform_data_size; const ngf_buffer_info uniform_buffer_info = { .size = uniform_buffer_size, .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE, .buffer_usage = NGF_BUFFER_USAGE_UNIFORM_BUFFER}; NGF_MISC_CHECK_NGF_ERROR(state->uniform_buffer.initialize(uniform_buffer_info)); return static_cast(state); } void sample_draw_frame( ngf_render_encoder main_render_pass, float time_delta, ngf_frame_token /*frame_token*/, uint32_t w, uint32_t h, float, void* userdata) { auto state = reinterpret_cast(userdata); /** * Update the values for the uniform buffer. */ uniform_buffers::shader_uniform_values& uniforms = state->uniform_values; const float max_scale = uniforms.scale_b; float min_scale = max_scale * min_scale_for_ngon(state->n); const bool growing = state->growing; uniforms.aspect = static_cast(w) / static_cast(h); uniforms.time += time_delta; uniforms.scale_a += (growing ? 1.0f : -1.0f) * time_delta * (state->growth_speed); const bool evolve_ngon = (growing && uniforms.scale_a >= max_scale) || (!growing && uniforms.scale_a <= min_scale); constexpr int max_ngon_sides = 96; constexpr int min_ngon_sides = 6; const bool switch_phase = evolve_ngon && ((growing && state->n == max_ngon_sides) || (!growing && state->n == min_ngon_sides)); if (switch_phase) { state->growing = !state->growing; } else if (evolve_ngon) { state->n = growing ? (state->n << 1) : (state->n >> 1); uniforms.theta = theta_for_n(state->n); state->growth_speed *= (growing ? 0.5f : 2.0f); uniforms.scale_a = growing ? max_scale * min_scale_for_ngon(state->n) : max_scale; } /** * Write the updated values to the uniform buffer at current offset. * Map the range, write the data using memcpy, then flush and unmap. */ void* mapped_uniform_buffer_offset = ngf_buffer_map_range( state->uniform_buffer, state->uniform_buffer_offset, state->aligned_uniform_data_size); memcpy(mapped_uniform_buffer_offset, &state->uniform_values, sizeof(state->uniform_values)); ngf_buffer_flush_range(state->uniform_buffer, 0, state->aligned_uniform_data_size); ngf_buffer_unmap(state->uniform_buffer); /** * Record the rendering commands. */ ngf_irect2d viewport {0, 0, w, h}; ngf_cmd_bind_gfx_pipeline(main_render_pass, state->polygon_pipeline); ngf_cmd_viewport(main_render_pass, &viewport); ngf_cmd_scissor(main_render_pass, &viewport); ngf::cmd_bind_resources( main_render_pass, ngf::descriptor_set<0>::binding<0>::uniform_buffer( state->uniform_buffer, state->uniform_buffer_offset, state->aligned_uniform_data_size)); ngf_cmd_draw(main_render_pass, false, 0u, (uint32_t)(state->n) * 3, 1u); /** * Update the uniform buffer offset so we write there on the next frame. */ state->uniform_buffer_offset = (state->uniform_buffer_offset + state->aligned_uniform_data_size) % (3 * state->aligned_uniform_data_size); } void sample_pre_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_draw_frame(ngf_cmd_buffer, void*) { } void sample_draw_ui(void*) { } void sample_post_submit(void*) { } void sample_shutdown(void* userdata) { auto state = static_cast(userdata); delete state; } } // namespace ngf_samples ================================================ FILE: samples/04-texture-sampling/texture-sampling.cpp ================================================ /** * Copyright (c) 2025 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "check.h" #include "file-utils.h" #include "imgui.h" #include "nicegraf-util.h" #include "nicegraf-wrappers.h" #include "nicemath.h" #include "sample-interface.h" #include "shader-loader.h" #include "staging-image.h" #include using namespace ngf_misc; namespace ngf_samples { namespace texture_sampling { struct matrices { struct { nm::float4x4 matrix; char _padding[256 - sizeof(nm::float4x4)]; } m[4]{}; }; struct state { ngf::graphics_pipeline pipeline; ngf::image texture; ngf::sampler samplers[4]; ngf::uniform_multibuffer uniforms; float tilt = 0.0f; float dolly = -5.0f; float pan = 0.0f; }; } // namespace texture_sampling void* sample_initialize( uint32_t /*width*/, uint32_t /*height*/, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder xfer_encoder) { auto s = new texture_sampling::state {}; /* Prepare a staging buffer for the image. */ staging_image texture_staging_image = create_staging_image_from_tga("assets/tiles.tga"); /* Create the image object. */ ngf_image_info texture_image_info = { .type = NGF_IMAGE_TYPE_IMAGE_2D, .extent = { .width = texture_staging_image.width_px, .height = texture_staging_image.height_px, .depth = 1u, }, .nmips = texture_staging_image.nmax_mip_levels, .nlayers = 1u, .format = NGF_IMAGE_FORMAT_SRGBA8, .sample_count = NGF_SAMPLE_COUNT_1, .usage_hint = NGF_IMAGE_USAGE_MIPMAP_GENERATION | NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_XFER_DST}; NGF_MISC_CHECK_NGF_ERROR(s->texture.initialize(texture_image_info)); /* Upload the data from the staging buffer into the 0th mip level of the texture. */ const ngf_image_write img_write = { .src_offset = 0u, .dst_offset = {.x = 0, .y = 0, .z = 0u}, .extent = {.width = texture_staging_image.width_px, .height = texture_staging_image.height_px, .depth = 1u}, .dst_level = 0u, .dst_base_layer = 0u, .nlayers = 1u}; ngf_cmd_write_image( xfer_encoder, texture_staging_image.staging_buffer.get(), s->texture.get(), &img_write, 1u); /* Populate the rest of the mip levels automatically. */ ngf_cmd_generate_mipmaps(xfer_encoder, s->texture.get()); /* Create the image sampler objects. */ /* Note that with the nearest-neighbor sampler, we constrain the min and max LOD to 0, in order to limit ourselves to mip level 0 only and demonstrate the effect of sampling without mips. */ NGF_MISC_CHECK_NGF_ERROR(s->samplers[0].initialize(ngf_sampler_info { .min_filter = NGF_FILTER_NEAREST, .mag_filter = NGF_FILTER_NEAREST, .mip_filter = NGF_FILTER_NEAREST, .wrap_u = NGF_WRAP_MODE_REPEAT, .wrap_v = NGF_WRAP_MODE_REPEAT, .wrap_w = NGF_WRAP_MODE_REPEAT, .lod_max = 0.0f, .lod_min = 0.0f, .lod_bias = 0.0f, .max_anisotropy = 0.0f, .enable_anisotropy = false})); /* Same comment as above regarding the min/max LOD applies in case of the bilinear sampler. */ NGF_MISC_CHECK_NGF_ERROR(s->samplers[1].initialize(ngf_sampler_info { .min_filter = NGF_FILTER_LINEAR, .mag_filter = NGF_FILTER_LINEAR, .mip_filter = NGF_FILTER_NEAREST, .wrap_u = NGF_WRAP_MODE_REPEAT, .wrap_v = NGF_WRAP_MODE_REPEAT, .wrap_w = NGF_WRAP_MODE_REPEAT, .lod_max = 0.0f, .lod_min = 0.0f, .lod_bias = 0.0f, .max_anisotropy = 0.0f, .enable_anisotropy = false})); NGF_MISC_CHECK_NGF_ERROR(s->samplers[2].initialize(ngf_sampler_info { .min_filter = NGF_FILTER_LINEAR, .mag_filter = NGF_FILTER_LINEAR, .mip_filter = NGF_FILTER_LINEAR, .wrap_u = NGF_WRAP_MODE_REPEAT, .wrap_v = NGF_WRAP_MODE_REPEAT, .wrap_w = NGF_WRAP_MODE_REPEAT, .lod_max = (float)texture_staging_image.nmax_mip_levels, .lod_min = 0.0f, .lod_bias = 0.0f, .max_anisotropy = 0.0f, .enable_anisotropy = false})); /* note that with anisotropic sampling, mipmaps are still needed because the specific (hardware-dependent) implementation may access them. */ NGF_MISC_CHECK_NGF_ERROR(s->samplers[3].initialize(ngf_sampler_info { .min_filter = NGF_FILTER_LINEAR, .mag_filter = NGF_FILTER_LINEAR, .mip_filter = NGF_FILTER_LINEAR, .wrap_u = NGF_WRAP_MODE_REPEAT, .wrap_v = NGF_WRAP_MODE_REPEAT, .wrap_w = NGF_WRAP_MODE_REPEAT, .lod_max = (float)texture_staging_image.nmax_mip_levels, .lod_min = 0.0f, .lod_bias = 0.0f, .max_anisotropy = 16.0f, .enable_anisotropy = true})); /** * Load the shader stages. */ const ngf::shader_stage vertex_shader_stage = load_shader_stage("textured-quad", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage fragment_shader_stage = load_shader_stage("textured-quad", "PSMain", NGF_STAGE_FRAGMENT); /** * Prepare a template with some default values for pipeline initialization. */ ngf_util_graphics_pipeline_data pipeline_data; ngf_util_create_default_graphics_pipeline_data(&pipeline_data); /** * Set shader stages. */ pipeline_data.pipeline_info.nshader_stages = 2; pipeline_data.pipeline_info.shader_stages[0] = vertex_shader_stage.get(); pipeline_data.pipeline_info.shader_stages[1] = fragment_shader_stage.get(); /** * Set multisampling state. */ pipeline_data.multisample_info.sample_count = main_render_target_sample_count; /** * Set the compatible render target description. */ pipeline_data.pipeline_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); /** * Initialize the pipeline object. */ s->pipeline.initialize(pipeline_data.pipeline_info); /** * Create the uniform buffer. */ s->uniforms.initialize(3); return static_cast(s); } void sample_draw_frame( ngf_render_encoder main_render_pass, float /*time_delta*/, ngf_frame_token /*token*/, uint32_t w, uint32_t h, float /*time*/, void* userdata) { auto state = reinterpret_cast(userdata); /* Compute the perspective transform for the current frame. */ const nm::float4x4 camera_to_clip = nm::perspective( nm::deg2rad(72.0f), static_cast(w) / static_cast(h), 0.01f, 100.0f); /* Build the world-to-camera transform for the current frame. */ nm::float4x4 world_to_camera = nm::translation(nm::float3 {state->pan, 0.0f, state->dolly}) * nm::rotation_x(state->tilt); /* Build the final transform matrices for this frame. */ texture_sampling::matrices uniforms_for_this_frame; for (size_t i = 0; i < sizeof(uniforms_for_this_frame.m) / sizeof(uniforms_for_this_frame.m[0]); ++i) { const nm::float4x4 object_to_world = nm::translation(nm::float3 {-3.0f + (float)i * 2.05f, 0.0f, 0.0f}); uniforms_for_this_frame.m[i].matrix = camera_to_clip * world_to_camera * object_to_world; } state->uniforms.write(uniforms_for_this_frame); ngf_irect2d viewport {0, 0, w, h}; ngf_cmd_bind_gfx_pipeline(main_render_pass, state->pipeline); ngf_cmd_viewport(main_render_pass, &viewport); ngf_cmd_scissor(main_render_pass, &viewport); for (uint32_t i = 0; i < sizeof(state->samplers) / sizeof(state->samplers[0]); ++i) { ngf::cmd_bind_resources( main_render_pass, state->uniforms .bind_op_at_current_offset(0, 0, 256 * i, sizeof(nm::float4x4)), ngf::descriptor_set<0>::binding<1>::sampler(state->samplers[i]), ngf::descriptor_set<1>::binding<0>::texture(state->texture)); ngf_cmd_draw(main_render_pass, false, 0, 6, 1); } } void sample_pre_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_submit(void*) { } void sample_draw_ui(void* userdata) { auto data = reinterpret_cast(userdata); ImGui::Begin("Camera control"); ImGui::DragFloat("dolly", &data->dolly, 0.01f, -70.0f, 0.11f); ImGui::DragFloat("pan", &data->pan, 0.01f, -70.0f, 70.0f); ImGui::DragFloat("tilt", &data->tilt, 0.01f, -(nm::PI / 2.0f + 0.01f), nm::PI / 2.0f + 0.01f); ImGui::End(); } void sample_shutdown(void* userdata) { delete reinterpret_cast(userdata); } } // namespace ngf_samples ================================================ FILE: samples/05-cubemap/cubemap.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define _CRT_SECURE_NO_WARNINGS #include "check.h" #include "file-utils.h" #include "imgui.h" #include "logging.h" #include "nicegraf-util.h" #include "nicegraf-wrappers.h" #include "nicemath.h" #include "sample-interface.h" #include "shader-loader.h" #include "targa-loader.h" #include #include using namespace ngf_misc; namespace ngf_samples { namespace cubemap { struct uniforms { nm::float4x4 rotation; float aspect_ratio; }; struct state { ngf::graphics_pipeline pipeline; ngf::image texture; ngf::sampler sampler; ngf::uniform_multibuffer uniforms_multibuf; float yaw = 0.0f; float pitch = 0.0f; }; } // namespace cubemap void* sample_initialize( uint32_t /*width*/, uint32_t /*height*/, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder xfer_encoder) { auto state = new cubemap::state {}; /* Load contents of cubemap faces into a staging buffer. */ uint32_t face_width = 0, face_height = 0; ngf::buffer staging_buffer; char* mapped_staging_buffer = nullptr; uint32_t staging_buffer_size = 0u; uint32_t bytes_per_face = 0u; for (uint32_t face = NGF_CUBEMAP_FACE_POSITIVE_X; face < NGF_CUBEMAP_FACE_COUNT; face++) { const std::string file_name = std::string("assets/cube0f") + std::to_string(face) + ".tga"; std::vector cubemap_face_tga_data = load_file(file_name.c_str()); uint32_t width, height; load_targa( cubemap_face_tga_data.data(), cubemap_face_tga_data.size(), nullptr, 0, &width, &height); if (face_width == 0 && face_height == 0) { face_width = width; face_height = height; bytes_per_face = face_width * face_height * 4u; staging_buffer_size = bytes_per_face * NGF_CUBEMAP_FACE_COUNT; staging_buffer.initialize(ngf_buffer_info { .size = staging_buffer_size, .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE, .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC}); mapped_staging_buffer = (char*)ngf_buffer_map_range(staging_buffer.get(), 0, staging_buffer_size); } else if (face_width != width || face_height != height) { loge("All faces of the cubemap must have the same dimensions"); return nullptr; } std::vector cubemap_face_rgba_data; cubemap_face_rgba_data.resize(bytes_per_face); load_targa( cubemap_face_tga_data.data(), cubemap_face_tga_data.size(), cubemap_face_rgba_data.data(), cubemap_face_rgba_data.size(), &width, &height); memcpy( mapped_staging_buffer + face * cubemap_face_rgba_data.size(), cubemap_face_rgba_data.data(), face_width * face_height * 4u); } /* Flush and unmap the staging buffer. */ ngf_buffer_flush_range(staging_buffer.get(), 0, staging_buffer_size); ngf_buffer_unmap(staging_buffer.get()); /* Create the cubemap texture. */ NGF_MISC_CHECK_NGF_ERROR(state->texture.initialize(ngf_image_info { .type = NGF_IMAGE_TYPE_CUBE, .extent = ngf_extent3d {.width = face_width, .height = face_height, .depth = 1}, .nmips = 1u, .nlayers = 1u, .format = NGF_IMAGE_FORMAT_SRGBA8, .sample_count = NGF_SAMPLE_COUNT_1, .usage_hint = NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_XFER_DST})); /* Populate the cubemap texture. */ const ngf_image_write img_write = { .src_offset = 0u, .dst_offset = {0, 0, 0}, .extent = {face_width, face_height, 1u}, .dst_level = 0u, .dst_base_layer = 0u, .nlayers = NGF_CUBEMAP_FACE_COUNT}; ngf_cmd_write_image(xfer_encoder, staging_buffer.get(), state->texture.get(), &img_write, 1u); /* Create the image sampler. */ /* Same comment as above regarding the min/max LOD applies in case of the bilinear sampler. */ NGF_MISC_CHECK_NGF_ERROR(state->sampler.initialize(ngf_sampler_info { .min_filter = NGF_FILTER_LINEAR, .mag_filter = NGF_FILTER_LINEAR, .mip_filter = NGF_FILTER_NEAREST, .wrap_u = NGF_WRAP_MODE_REPEAT, .wrap_v = NGF_WRAP_MODE_REPEAT, .wrap_w = NGF_WRAP_MODE_REPEAT, .lod_max = 0.0f, .lod_min = 0.0f, .lod_bias = 0.0f, .max_anisotropy = 0.0f, .enable_anisotropy = false, .compare_op = NGF_COMPARE_OP_NEVER})); /** * Load the shader stages. */ const ngf::shader_stage vertex_shader_stage = load_shader_stage("cubemap", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage fragment_shader_stage = load_shader_stage("cubemap", "PSMain", NGF_STAGE_FRAGMENT); /** * Prepare a template with some default values for pipeline initialization. */ ngf_util_graphics_pipeline_data pipeline_data; ngf_util_create_default_graphics_pipeline_data(&pipeline_data); /** * Set shader stages. */ pipeline_data.pipeline_info.nshader_stages = 2; pipeline_data.pipeline_info.shader_stages[0] = vertex_shader_stage.get(); pipeline_data.pipeline_info.shader_stages[1] = fragment_shader_stage.get(); /** * Set multisampling state. */ pipeline_data.multisample_info.sample_count = main_render_target_sample_count; /** * Set the compatible render target description. */ pipeline_data.pipeline_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); /** * Initialize the pipeline object. */ NGF_MISC_CHECK_NGF_ERROR(state->pipeline.initialize(pipeline_data.pipeline_info)); /** * Create the uniform buffer. */ NGF_MISC_CHECK_NGF_ERROR(state->uniforms_multibuf.initialize(3)); return static_cast(state); } void sample_draw_frame( ngf_render_encoder main_render_pass, float /*time_delta*/, ngf_frame_token /*token*/, uint32_t w, uint32_t h, float /*time*/, void* userdata) { auto state = reinterpret_cast(userdata); ngf_irect2d viewport {0, 0, w, h}; ngf_cmd_bind_gfx_pipeline(main_render_pass, state->pipeline); ngf_cmd_viewport(main_render_pass, &viewport); ngf_cmd_scissor(main_render_pass, &viewport); state->uniforms_multibuf.write( {nm::rotation_y(state->yaw) * nm::rotation_x(state->pitch), (float)w / (float)h}); ngf::cmd_bind_resources( main_render_pass, state->uniforms_multibuf.bind_op_at_current_offset(0, 0), ngf::descriptor_set<0>::binding<1>::texture(state->texture.get()), ngf::descriptor_set<0>::binding<2>::sampler(state->sampler.get())); ngf_cmd_draw(main_render_pass, false, 0u, 3u, 1u); } void sample_pre_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_submit(void*) { } void sample_draw_ui(void* userdata) { auto state = reinterpret_cast(userdata); ImGui::Begin("Cubemap", nullptr, ImGuiWindowFlags_AlwaysAutoResize); ImGui::SliderFloat("Pitch", &state->pitch, -nm::PI, nm::PI); ImGui::SliderFloat("Yaw", &state->yaw, -nm::PI, nm::PI); ImGui::Text("This sample uses textures by Emil Persson.\n" "Licensed under CC BY 3.0\n" "http://humus.name/index.php?page=Textures"); ImGui::End(); } void sample_shutdown(void* userdata) { delete reinterpret_cast(userdata); } } // namespace ngf_samples ================================================ FILE: samples/06-vertex-attribs/vertex-attribs.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define _CRT_SECURE_NO_WARNINGS #include "check.h" #include "file-utils.h" #include "imgui.h" #include "logging.h" #include "nicegraf-util.h" #include "nicegraf-wrappers.h" #include "nicemath.h" #include "sample-interface.h" #include "shader-loader.h" #include "targa-loader.h" #include using namespace ngf_misc; namespace ngf_samples { namespace vertex_attribs { struct uniforms { nm::float4x4 world_to_clip; float timestamp; }; struct state { ngf::graphics_pipeline pipeline; ngf::image object_texture; ngf::sampler trilinear_sampler; ngf::uniform_multibuffer uniforms_multibuf; ngf::buffer per_instance_data; ngf::texel_buffer_view per_instance_data_view; ngf::buffer vertex_attrib_buffer; ngf::buffer index_buffer; float dolly = -130.0f; float vfov = 60.0f; }; /** * The model instances are arraged in a (slightly perturbed) grid pattern, * this constant controls the size of the grid. */ constexpr int INSTANCES_GRID_SIZE = 128; constexpr size_t INSTANCE_DATA_SIZE = sizeof(float) * 4u * INSTANCES_GRID_SIZE * INSTANCES_GRID_SIZE; /** * The model's raw vertex data (positions and UVs). * A dodecahedron. */ float vertex_data[] = { //clang-format off 0.577350f, 0.577350f, -0.577350f, 0.727805f, 0.749509f, 0.356822f, 0.000000f, -0.934172f, 0.727805f, 0.868727f, 0.000000f, 0.417775f, -0.675973f, 0.645760f, 0.809118f, 0.000000f, 0.934172f, -0.356822f, 0.614422f, 0.712668f, 0.934172f, 0.356822f, 0.000000f, 0.797880f, 0.653059f, 0.675973f, 0.000000f, -0.417775f, 0.829219f, 0.749509f, 0.934172f, -0.356822f, 0.000000f, 0.911264f, 0.689899f, -0.577350f, -0.577350f, 0.577350f, 0.223582f, 0.285757f, -0.934172f, -0.356822f, 0.000000f, 0.336965f, 0.248917f, -0.417775f, -0.675974f, 0.000000f, 0.305627f, 0.345366f, 0.577350f, -0.577350f, -0.577350f, 0.911264f, 0.809118f, -0.577350f, 0.577350f, 0.577350f, 0.544347f, 0.497000f, -0.356822f, 0.000000f, 0.934172f, 0.614422f, 0.400550f, 0.000000f, 0.417775f, 0.675973f, 0.645760f, 0.497000f, -0.356822f, 0.000000f, -0.934172f, 0.614422f, 0.905567f, -0.356822f, 0.000000f, -0.934172f, 0.520423f, 0.308526f, -0.577350f, -0.577350f, -0.577350f, 0.407040f, 0.345366f, -0.675974f, 0.000000f, -0.417775f, 0.438378f, 0.248917f, 0.356822f, 0.000000f, -0.934172f, 0.797880f, 0.845958f, 0.356822f, 0.000000f, -0.934172f, 0.520423f, 0.501425f, 0.577350f, -0.577350f, -0.577350f, 0.407040f, 0.538266f, 0.000000f, -0.417775f, -0.675974f, 0.438378f, 0.441816f, -0.577350f, 0.577350f, -0.577350f, 0.544347f, 0.809118f, 0.417775f, 0.675973f, 0.000000f, 0.696467f, 0.653059f, -0.356822f, 0.000000f, 0.934172f, 0.153507f, 0.189308f, -0.675974f, 0.000000f, 0.417775f, 0.254920f, 0.189308f, 0.577350f, -0.577350f, 0.577350f, 0.153507f, 0.501425f, 0.000000f, -0.934172f, 0.356822f, 0.223582f, 0.404976f, 0.417775f, -0.675973f, 0.000000f, 0.254920f, 0.501425f, -0.577350f, 0.577350f, 0.577350f, 0.501039f, 0.556609f, 0.000000f, 0.934172f, 0.356822f, 0.614422f, 0.593450f, -0.417775f, 0.675974f, 0.000000f, 0.532377f, 0.653059f, 0.000000f, -0.417775f, 0.675974f, 0.141537f, 0.345366f, 0.577350f, 0.577350f, 0.577350f, 0.727805f, 0.556609f, 0.675974f, 0.000000f, 0.417775f, 0.829219f, 0.556609f, -0.934172f, 0.356822f, 0.000000f, 0.430964f, 0.653059f, -0.577350f, 0.577350f, -0.577350f, 0.501039f, 0.749508f, 0.000000f, -0.934172f, -0.356822f, 0.336965f, 0.441816f, 0.356822f, 0.000000f, 0.934172f, 0.727805f, 0.437391f, -0.934172f, 0.356822f, 0.000000f, 0.407040f, 0.152467f, -0.577350f, 0.577350f, -0.577350f, 0.520423f, 0.189308f, -0.356822f, 0.000000f, -0.934172f, 0.520423f, 0.382207f, -0.577350f, 0.577350f, 0.577350f, 0.223582f, 0.092858f, -0.934172f, 0.356822f, 0.000000f, 0.336965f, 0.129698f, 0.577350f, -0.577350f, -0.577350f, 0.336965f, 0.561035f, 0.934172f, -0.356822f, 0.000000f, 0.223582f, 0.597875f, 0.577350f, -0.577350f, 0.577350f, 0.110198f, 0.441816f, 0.356822f, 0.000000f, 0.934172f, 0.040124f, 0.345366f, -0.356822f, 0.000000f, 0.934172f, 0.110198f, 0.248917f, 0.356822f, 0.000000f, 0.934172f, 0.797880f, 0.460159f, 0.577350f, -0.577350f, 0.577350f, 0.911264f, 0.497000f, 0.934172f, -0.356822f, 0.000000f, 0.911264f, 0.616218f //clang-format on }; /** * The model's index buffer. */ uint32_t index_data[] = { //clang-format off 0, 1, 2, 3, 0, 2, 0, 4, 5, 4, 6, 5, 7, 8, 9, 6, 10, 5, 11, 12, 13, 1, 14, 2, 15, 16, 17, 18, 0, 5, 19, 20, 21, 22, 3, 2, 0, 3, 23, 14, 22, 2, 7, 24, 25, 10, 18, 5, 26, 27, 28, 29, 30, 31, 7, 27, 32, 30, 3, 31, 4, 33, 34, 35, 29, 31, 3, 36, 31, 36, 35, 31, 8, 16, 9, 16, 37, 9, 37, 27, 9, 27, 7, 9, 12, 38, 13, 38, 33, 13, 33, 30, 13, 30, 11, 13, 16, 8, 17, 8, 39, 17, 39, 40, 17, 40, 15, 17, 20, 37, 21, 37, 16, 21, 16, 41, 21, 41, 19, 21, 3, 30, 23, 30, 33, 23, 33, 4, 23, 4, 0, 23, 24, 42, 25, 42, 43, 25, 43, 8, 25, 8, 7, 25, 27, 37, 28, 37, 44, 28, 44, 45, 28, 45, 26, 28, 27, 46, 32, 46, 47, 32, 47, 48, 32, 48, 7, 32, 33, 49, 34, 49, 50, 34, 50, 51, 34, 51, 4, 34, //clang-format on }; } // namespace vertex_attribs void* sample_initialize( uint32_t /*width*/, uint32_t /*height*/, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder xfer_encoder) { auto state = new vertex_attribs::state {}; /** * Load the shader stages. */ const ngf::shader_stage vertex_shader_stage = load_shader_stage("instancing", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage fragment_shader_stage = load_shader_stage("instancing", "PSMain", NGF_STAGE_FRAGMENT); /** * Prepare a template with some default values for pipeline initialization. */ ngf_util_graphics_pipeline_data pipeline_data; ngf_util_create_default_graphics_pipeline_data(&pipeline_data); /** * Set shader stages. */ pipeline_data.pipeline_info.nshader_stages = 2; pipeline_data.pipeline_info.shader_stages[0] = vertex_shader_stage.get(); pipeline_data.pipeline_info.shader_stages[1] = fragment_shader_stage.get(); /** * Set multisampling state. */ pipeline_data.multisample_info.sample_count = main_render_target_sample_count; /** * Enable depth testing and depth write. */ pipeline_data.depth_stencil_info.depth_test = true; pipeline_data.depth_stencil_info.depth_write = true; /** * Set the compatible render target description. */ pipeline_data.pipeline_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); /** * Set up vertex attributes. */ /* attribute descriptions indicate the location and format of individual vertex attributes. */ const ngf_vertex_attrib_desc vertex_attrib_descriptions[] = { {/* position. */ .location = 0u, .binding = 0u, .offset = 0u, .type = NGF_TYPE_FLOAT, .size = 3u, .normalized = false}, {/* UV coordinate. */ .location = 1u, .binding = 0u, .offset = 3u * sizeof(float), .type = NGF_TYPE_FLOAT, .size = 2u, .normalized = false}}; /* buffer binding descriptions indicate _how_ the attributes are fetched from a buffer. */ const ngf_vertex_buf_binding_desc vertex_buf_binding_descriptions[] = {{ .binding = 0u, .stride = sizeof(float) * (3u + 2u), .input_rate = NGF_INPUT_RATE_VERTEX, }}; pipeline_data.vertex_input_info.nattribs = sizeof(vertex_attrib_descriptions) / sizeof(vertex_attrib_descriptions[0]); pipeline_data.vertex_input_info.attribs = vertex_attrib_descriptions; pipeline_data.vertex_input_info.nvert_buf_bindings = sizeof(vertex_buf_binding_descriptions) / sizeof(vertex_buf_binding_descriptions[0]); pipeline_data.vertex_input_info.vert_buf_bindings = vertex_buf_binding_descriptions; /** * Initialize the pipeline object. */ NGF_MISC_CHECK_NGF_ERROR(state->pipeline.initialize(pipeline_data.pipeline_info)); /** * Create and populate the vertex and index buffers. */ const ngf_buffer_info vertex_buffer_info = { .size = sizeof(vertex_attribs::vertex_data), .storage_type = NGF_BUFFER_STORAGE_DEVICE_LOCAL, .buffer_usage = NGF_BUFFER_USAGE_VERTEX_BUFFER | NGF_BUFFER_USAGE_XFER_DST, }; const ngf_buffer_info index_buffer_info = { .size = sizeof(vertex_attribs::index_data), .storage_type = NGF_BUFFER_STORAGE_DEVICE_LOCAL, .buffer_usage = NGF_BUFFER_USAGE_INDEX_BUFFER | NGF_BUFFER_USAGE_XFER_DST, }; const ngf_buffer_info vertex_staging_buffer_info = { .size = vertex_buffer_info.size, .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE, .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC, }; const ngf_buffer_info index_staging_buffer_info = { .size = index_buffer_info.size, .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE, .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC, }; ngf::buffer vertex_staging_buffer; NGF_MISC_CHECK_NGF_ERROR(vertex_staging_buffer.initialize(vertex_staging_buffer_info)); ngf::buffer index_staging_buffer; NGF_MISC_CHECK_NGF_ERROR(index_staging_buffer.initialize(index_staging_buffer_info)); NGF_MISC_CHECK_NGF_ERROR(state->vertex_attrib_buffer.initialize(vertex_buffer_info)); NGF_MISC_CHECK_NGF_ERROR(state->index_buffer.initialize(index_buffer_info)); void* mapped_vertex_buffer = ngf_buffer_map_range(vertex_staging_buffer.get(), 0u, vertex_staging_buffer_info.size); void* mapped_index_buffer = ngf_buffer_map_range(index_staging_buffer.get(), 0u, index_staging_buffer_info.size); memcpy(mapped_vertex_buffer, vertex_attribs::vertex_data, vertex_staging_buffer_info.size); memcpy(mapped_index_buffer, vertex_attribs::index_data, index_staging_buffer_info.size); ngf_buffer_flush_range(vertex_staging_buffer.get(), 0, vertex_staging_buffer_info.size); ngf_buffer_flush_range(index_staging_buffer.get(), 0, index_staging_buffer_info.size); ngf_buffer_unmap(vertex_staging_buffer.get()); ngf_buffer_unmap(index_staging_buffer.get()); ngf_cmd_copy_buffer( xfer_encoder, vertex_staging_buffer.get(), state->vertex_attrib_buffer.get(), vertex_buffer_info.size, 0, 0); ngf_cmd_copy_buffer( xfer_encoder, index_staging_buffer.get(), state->index_buffer.get(), index_buffer_info.size, 0, 0); /** * Create and populate per-instance data. */ const ngf_buffer_info instance_data_buffer_info = { .size = vertex_attribs::INSTANCE_DATA_SIZE, .storage_type = NGF_BUFFER_STORAGE_DEVICE_LOCAL, .buffer_usage = NGF_BUFFER_USAGE_TEXEL_BUFFER | NGF_BUFFER_USAGE_XFER_DST, }; const ngf_buffer_info instance_data_staging_buffer_info = { .size = instance_data_buffer_info.size, .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE, .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC}; ngf::buffer instance_data_staging_buffer; NGF_MISC_CHECK_NGF_ERROR(instance_data_staging_buffer.initialize(instance_data_staging_buffer_info)); NGF_MISC_CHECK_NGF_ERROR(state->per_instance_data.initialize(instance_data_buffer_info)); const ngf_texel_buffer_view_info instance_data_view_info = { .buffer = state->per_instance_data.get(), .offset = 0u, .size = instance_data_buffer_info.size, .texel_format = NGF_IMAGE_FORMAT_RGBA32F }; NGF_MISC_CHECK_NGF_ERROR(state->per_instance_data_view.initialize(instance_data_view_info)); auto mapped_per_instance_staging_buffer = (float*)ngf_buffer_map_range( instance_data_staging_buffer.get(), 0, instance_data_staging_buffer_info.size); for (uint32_t r = 0; r < vertex_attribs::INSTANCES_GRID_SIZE; ++r) { for (uint32_t c = 0; c < vertex_attribs::INSTANCES_GRID_SIZE; ++c) { const uint32_t idx = r * (vertex_attribs::INSTANCES_GRID_SIZE) + c; assert(idx < instance_data_staging_buffer_info.size); float* p = &mapped_per_instance_staging_buffer[4 * idx]; constexpr float grid_offset = -static_cast(vertex_attribs::INSTANCES_GRID_SIZE >> 1); constexpr float grid_spacing = 4.0f; p[0] = grid_offset * grid_spacing + grid_spacing * (float)c + 0.75f * (2.0f * (float)rand() / static_cast(RAND_MAX) - 1.0f); p[2] = grid_offset * (float)grid_spacing + grid_spacing * (float)r + 0.75f * (2.0f * (float)rand() / static_cast(RAND_MAX) - 1.0f); p[1] = (2.0f * (float)rand() / static_cast(RAND_MAX) - 1.0f); } } ngf_buffer_flush_range( instance_data_staging_buffer.get(), 0, instance_data_staging_buffer_info.size); ngf_buffer_unmap(instance_data_staging_buffer.get()); ngf_cmd_copy_buffer( xfer_encoder, instance_data_staging_buffer, state->per_instance_data, instance_data_buffer_info.size, 0, 0); /** * Create the uniform buffer. */ NGF_MISC_CHECK_NGF_ERROR(state->uniforms_multibuf.initialize(3)); /* Load contents of the model's texture into a staging buffer. */ char file_name[] = "assets/dodecahedron.tga"; ngf::buffer staging_buffer; std::vector cubemap_face_tga_data = load_file(file_name); uint32_t texture_width, texture_height; load_targa( cubemap_face_tga_data.data(), cubemap_face_tga_data.size(), nullptr, 0, &texture_width, &texture_height); const uint32_t staging_buffer_size = 4u * texture_width * texture_height; staging_buffer.initialize(ngf_buffer_info { .size = staging_buffer_size, .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE, .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC}); auto mapped_staging_buffer = (char*)ngf_buffer_map_range(staging_buffer.get(), 0, staging_buffer_size); std::vector texture_rgba_data; texture_rgba_data.resize(staging_buffer_size); load_targa( cubemap_face_tga_data.data(), cubemap_face_tga_data.size(), texture_rgba_data.data(), texture_rgba_data.size(), &texture_width, &texture_height); memcpy(mapped_staging_buffer, texture_rgba_data.data(), staging_buffer_size); /* Flush and unmap the staging buffer. */ ngf_buffer_flush_range(staging_buffer.get(), 0, staging_buffer_size); ngf_buffer_unmap(staging_buffer.get()); /* Create the texture. */ const uint32_t nmips = 1 + static_cast(std::floor(std::log2(std::max(texture_width, texture_height)))); NGF_MISC_CHECK_NGF_ERROR(state->object_texture.initialize(ngf_image_info { .type = NGF_IMAGE_TYPE_IMAGE_2D, .extent = ngf_extent3d {.width = texture_width, .height = texture_height, .depth = 1}, .nmips = nmips, .nlayers = 1u, .format = NGF_IMAGE_FORMAT_SRGBA8, .sample_count = NGF_SAMPLE_COUNT_1, .usage_hint = NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_XFER_DST | NGF_IMAGE_USAGE_MIPMAP_GENERATION})); /* Populate the texture. */ const ngf_image_write img_write = { .src_offset = 0u, .dst_offset = {0, 0, 0}, .extent = {texture_width, texture_height, 1u}, .dst_level = 0u, .dst_base_layer = 0u, .nlayers = 1u}; ngf_cmd_write_image( xfer_encoder, staging_buffer.get(), state->object_texture.get(), &img_write, 1u); ngf_cmd_generate_mipmaps(xfer_encoder, state->object_texture); /* Create the image sampler. */ NGF_MISC_CHECK_NGF_ERROR(state->trilinear_sampler.initialize(ngf_sampler_info { .min_filter = NGF_FILTER_LINEAR, .mag_filter = NGF_FILTER_LINEAR, .mip_filter = NGF_FILTER_LINEAR, .wrap_u = NGF_WRAP_MODE_REPEAT, .wrap_v = NGF_WRAP_MODE_REPEAT, .wrap_w = NGF_WRAP_MODE_REPEAT, .lod_max = (float)nmips, .lod_min = 0.0f, .lod_bias = 0.0f, .max_anisotropy = 16.0f, .enable_anisotropy = true})); return static_cast(state); } void sample_draw_frame( ngf_render_encoder main_render_pass, float time_delta, ngf_frame_token /*token*/, uint32_t w, uint32_t h, float /*time*/, void* userdata) { static float t = .0f; t += time_delta; auto state = reinterpret_cast(userdata); ngf_irect2d viewport {0, 0, w, h}; ngf_cmd_bind_gfx_pipeline(main_render_pass, state->pipeline); ngf_cmd_viewport(main_render_pass, &viewport); ngf_cmd_scissor(main_render_pass, &viewport); ngf_cmd_bind_attrib_buffer(main_render_pass, state->vertex_attrib_buffer, 0, 0); ngf_cmd_bind_index_buffer(main_render_pass, state->index_buffer, 0u, NGF_TYPE_UINT32); state->uniforms_multibuf.write( {nm::perspective( nm::deg2rad(state->vfov), static_cast(w) / static_cast(h), 0.01f, 1000.0f) * nm::look_at( nm::float3 {0.0f, 50.0f, state->dolly}, nm::float3 {.0f, .0f, .0f}, nm::float3 {.0f, 1.0f, .0f}), t}); ngf::cmd_bind_resources( main_render_pass, state->uniforms_multibuf.bind_op_at_current_offset(0, 0), ngf::descriptor_set<0>::binding<1>::texel_buffer(state->per_instance_data_view.get()), ngf::descriptor_set<0>::binding<2>::texture(state->object_texture.get()), ngf::descriptor_set<0>::binding<3>::sampler(state->trilinear_sampler.get())); ngf_cmd_draw( main_render_pass, true, 0u, sizeof(vertex_attribs::index_data) / sizeof(vertex_attribs::index_data[0]), 128 * 128); } void sample_pre_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_submit(void*) { } void sample_draw_ui(void* userdata) { auto data = reinterpret_cast(userdata); ImGui::Begin("Camera control"); ImGui::DragFloat("dolly", &data->dolly, 0.01f, -500.0f, 1.0f); ImGui::DragFloat("fov", &data->vfov, 0.08f, 25.0f, 90.0f); ImGui::End(); } void sample_shutdown(void* userdata) { delete reinterpret_cast(userdata); } } // namespace ngf_samples ================================================ FILE: samples/07-blinn-phong/blinn-phong.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define _CRT_SECURE_NO_WARNINGS #include "camera-controller.h" #include "check.h" #include "imgui.h" #include "logging.h" #include "nicegraf-util.h" #include "nicegraf-wrappers.h" #include "nicemath.h" #include "sample-interface.h" #include "shader-loader.h" #include "mesh-loader.h" #include using namespace ngf_misc; namespace ngf_samples { namespace blinn_phong { struct light_data { nm::float4 ambient_light_intensity { 0.01f, 0.02f, 0.03f, 0.0f }; nm::float4 obj_space_point_light_position { 0.0f, 0.0f, 2.0f, 1.0f }; nm::float4 point_light_intensity { 0.6f, 0.5f, 0.3f, 1.0f }; nm::float4 obj_space_directional_light_direction { 0.0f, -1.0f, 0.5f, 0.0f }; nm::float4 directional_light_intensity { 0.2f, 0.3f, 0.5f, 1.0f }; }; struct material_data { nm::float4 diffuse_reflectance { 0.9f, 0.9f, 0.9f, 1.0f}; nm::float4 specular_coefficient { 1.0f, 1.0f, 1.0f, 1.0f }; float shininess = 125.0f; }; struct uniforms { camera_matrices cam_matrices; light_data lights; material_data material; }; struct state { ngf::graphics_pipeline vanilla_pipeline; ngf::graphics_pipeline half_lambert_pipeline; mesh bunny_mesh; light_data lights; material_data material; ngf::uniform_multibuffer uniforms_multibuf; camera_state camera; float dolly = 3.0f; float vfov = 60.0f; bool enable_half_lambert = true; }; } // namespace blinn_phong void* sample_initialize( uint32_t /*width*/, uint32_t /*height*/, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder xfer_encoder) { auto state = new blinn_phong::state {}; /** * Load the shader stages. */ const ngf::shader_stage vertex_shader_stage = load_shader_stage("blinn-phong", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage fragment_shader_stage = load_shader_stage("blinn-phong", "PSMain", NGF_STAGE_FRAGMENT); /** * Prepare a template with some default values for pipeline initialization. */ ngf_util_graphics_pipeline_data pipeline_data; ngf_util_create_default_graphics_pipeline_data(&pipeline_data); /** * Set shader stages. */ pipeline_data.pipeline_info.nshader_stages = 2; pipeline_data.pipeline_info.shader_stages[0] = vertex_shader_stage.get(); pipeline_data.pipeline_info.shader_stages[1] = fragment_shader_stage.get(); /** * Set multisampling state. */ pipeline_data.multisample_info.sample_count = main_render_target_sample_count; /** * Enable depth testing and depth write. */ pipeline_data.depth_stencil_info.depth_test = true; pipeline_data.depth_stencil_info.depth_write = true; /** * Set the compatible render target description. */ pipeline_data.pipeline_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); /** * Set up vertex attributes. */ /* attribute descriptions indicate the location and format of individual vertex attributes. */ const ngf_vertex_attrib_desc vertex_attrib_descriptions[] = { {/* position. */ .location = 0u, .binding = 0u, .offset = 0u, .type = NGF_TYPE_FLOAT, .size = 3u, .normalized = false}, {/* normal. */ .location = 1u, .binding = 0u, .offset = 3u * sizeof(float), .type = NGF_TYPE_FLOAT, .size = 3u, .normalized = false}, }; /** * Note that the displayed model has positions, normals and UV coordinates, * however we only use positions and normals in this sample. We still have * to account for the UV coordinates when providing the stride for the vertex * attribute binding. */ const ngf_vertex_buf_binding_desc vertex_buf_binding_descriptions[] = {{ .binding = 0u, .stride = sizeof(float) * (3u + 3u + 2u), .input_rate = NGF_INPUT_RATE_VERTEX, }}; pipeline_data.vertex_input_info.nattribs = sizeof(vertex_attrib_descriptions) / sizeof(vertex_attrib_descriptions[0]); pipeline_data.vertex_input_info.attribs = vertex_attrib_descriptions; pipeline_data.vertex_input_info.nvert_buf_bindings = sizeof(vertex_buf_binding_descriptions) / sizeof(vertex_buf_binding_descriptions[0]); pipeline_data.vertex_input_info.vert_buf_bindings = vertex_buf_binding_descriptions; /** * Initialize the "vanilla" pipeline object. */ NGF_MISC_CHECK_NGF_ERROR(state->vanilla_pipeline.initialize(pipeline_data.pipeline_info)); /** * Set the appropriate specialization constant and initialize the half-lambert pipeline object. */ const ngf_constant_specialization half_lambert_spec = { .constant_id = 0, .offset = 0, .type = NGF_TYPE_UINT32 }; int half_lambert_spec_value = 1; pipeline_data.spec_info.nspecializations = 1; pipeline_data.spec_info.specializations = &half_lambert_spec; pipeline_data.spec_info.value_buffer = &half_lambert_spec_value; NGF_MISC_CHECK_NGF_ERROR(state->half_lambert_pipeline.initialize(pipeline_data.pipeline_info)); /** * Load the model from a file. */ state->bunny_mesh = load_mesh_from_file("assets/bunny.mesh", xfer_encoder); NGF_MISC_ASSERT(state->bunny_mesh.have_normals); NGF_MISC_ASSERT(state->bunny_mesh.num_indices > 0u); /** * Create the uniform buffer. */ NGF_MISC_CHECK_NGF_ERROR(state->uniforms_multibuf.initialize(3)); /** * Set up some initial viewing parameters. */ state->camera.look_at[1] = 1.0f; return static_cast(state); } void sample_draw_frame( ngf_render_encoder main_render_pass, float /*time_delta*/, ngf_frame_token /*token*/, uint32_t w, uint32_t h, float /*time*/, void* userdata) { auto state = reinterpret_cast(userdata); ngf_irect2d viewport {0, 0, w, h}; ngf_cmd_bind_gfx_pipeline(main_render_pass, state->enable_half_lambert ? state->half_lambert_pipeline : state->vanilla_pipeline); ngf_cmd_viewport(main_render_pass, &viewport); ngf_cmd_scissor(main_render_pass, &viewport); ngf_cmd_bind_attrib_buffer(main_render_pass, state->bunny_mesh.vertex_data.get(), 0, 0); ngf_cmd_bind_index_buffer(main_render_pass, state->bunny_mesh.index_data.get(), 0, NGF_TYPE_UINT32); blinn_phong::uniforms uniforms; uniforms.cam_matrices = compute_camera_matrices(state->camera, static_cast(w) / static_cast(h)); uniforms.material = state->material; uniforms.lights = state->lights; uniforms.lights.obj_space_point_light_position = uniforms.cam_matrices.world_to_view_transform * uniforms.lights.obj_space_point_light_position; uniforms.lights.obj_space_directional_light_direction = uniforms.cam_matrices.world_to_view_transform * uniforms.lights.obj_space_directional_light_direction; state->uniforms_multibuf.write(uniforms); ngf::cmd_bind_resources( main_render_pass, state->uniforms_multibuf.bind_op_at_current_offset(0, 0)); ngf_cmd_draw( main_render_pass, true, 0u, (uint32_t)state->bunny_mesh.num_indices, 1u); } void sample_pre_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_submit(void*) {} void sample_draw_ui(void* userdata) { auto data = reinterpret_cast(userdata); ImGui::Begin("Controls"); ImGui::Separator(); ImGui::Checkbox("enable half-lambert trick", &data->enable_half_lambert); ImGui::Separator(); camera_ui(data->camera, std::make_pair(-5.f, 5.f), .1f, std::make_pair(1.0f, 10.0f), .1f); ImGui::Separator(); ImGui::Text("point light"); ImGui::DragFloat3("position", data->lights.obj_space_point_light_position.data, 0.1f, -2.0f, 2.0f, "%.1f", 0); ImGui::ColorEdit3("intensity##0", data->lights.point_light_intensity.data); ImGui::Text("directional light"); ImGui::DragFloat3("direction", data->lights.obj_space_directional_light_direction.data, 0.1f, -2.0f, 2.0f, "%.1f", 0); ImGui::ColorEdit3("intensity##1", data->lights.directional_light_intensity.data); ImGui::Text("ambient light"); ImGui::ColorEdit3("intensity##2", data->lights.ambient_light_intensity.data); ImGui::Separator(); ImGui::Text("material"); ImGui::ColorEdit3("diffuse reflectance", data->material.diffuse_reflectance.data); ImGui::ColorEdit3("specular coefficient", data->material.specular_coefficient.data); ImGui::SliderFloat("shininess", &data->material.shininess, 0.1f, 1000.0f, "%.1f", 0); ImGui::End(); } void sample_shutdown(void* userdata) { delete reinterpret_cast(userdata); } } // namespace ngf_samples ================================================ FILE: samples/08-image-arrays/image-arrays.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define _CRT_SECURE_NO_WARNINGS #include "camera-controller.h" #include "check.h" #include "imgui.h" #include "logging.h" #include "nicegraf-util.h" #include "nicegraf-wrappers.h" #include "nicemath.h" #include "sample-interface.h" #include "shader-loader.h" #include "staging-image.h" #include #include using namespace ngf_misc; namespace ngf_samples { namespace image_arrays { struct img_array_uniforms { nm::float4x4 matrix; float image_array_idx = 0.0f; uint32_t index = 0u; }; struct multiple_imgs_uniforms { nm::float4x4 matrix; uint32_t index = 0u; }; struct cube_array_uniforms { nm::float4x4 matrix; float aspect = 1.0f; float array_idx = 0.0f; }; constexpr int NUM_IMAGE_LAYERS = 4; struct state { ngf::graphics_pipeline img_array_pipeline; ngf::graphics_pipeline cubemap_array_pipeline; ngf::graphics_pipeline multiple_images_pipeline; ngf::image image_array; ngf::image cubemap_array; ngf::image multiple_images[NUM_IMAGE_LAYERS]; ngf::sampler image_sampler; ngf::uniform_multibuffer img_array_uniforms_multibuf; ngf::uniform_multibuffer cube_array_uniforms_multibuf; ngf::uniform_multibuffer multi_img_uniforms_multibuf; float dolly = -5.0f; float image_array_idx = 0.0f; float cubemap_array_idx = 0.0f; uint32_t image_idx = 0; float yaw = 0.0f; float pitch = 0.0f; }; } // namespace image_arrays void* sample_initialize( uint32_t /*width*/, uint32_t /*height*/, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder xfer_encoder) { auto state = new image_arrays::state {}; /** * Create staging buffers for all the layers in the array. */ staging_image staging_images[image_arrays::NUM_IMAGE_LAYERS]; uint32_t image_array_width = 0, image_array_height = 0, nmips = 0; for (uint32_t i = 0; i < image_arrays::NUM_IMAGE_LAYERS; ++i) { const std::string file_name = std::string("assets/imgarr") + std::to_string(i) + ".tga"; staging_images[i] = create_staging_image_from_tga(file_name.c_str()); /** Ensure the dimensions of the image are valid. */ if (i > 0 && (staging_images[i].width_px != image_array_width || staging_images[i].height_px != image_array_height)) { loge("all images in the array must have the same dimensions"); return nullptr; } else { image_array_width = staging_images[i].width_px; image_array_height = staging_images[i].height_px; nmips = staging_images[i].nmax_mip_levels; } } /** * Create the image object with several array layers. */ ngf_image_info image_array_info = { .type = NGF_IMAGE_TYPE_IMAGE_2D, .extent = { .width = image_array_width, .height = image_array_height, .depth = 1u, }, .nmips = nmips, .nlayers = image_arrays::NUM_IMAGE_LAYERS, .format = NGF_IMAGE_FORMAT_SRGBA8, .sample_count = NGF_SAMPLE_COUNT_1, .usage_hint = NGF_IMAGE_USAGE_MIPMAP_GENERATION | NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_XFER_DST}; NGF_MISC_CHECK_NGF_ERROR(state->image_array.initialize(image_array_info)); /** * Initialize individual array members for the descriptor with multiple images. */ image_array_info.nlayers = 1u; for (uint32_t i = 0u; i < image_arrays::NUM_IMAGE_LAYERS; ++i) { NGF_MISC_CHECK_NGF_ERROR(state->multiple_images[i].initialize(image_array_info)); } /** * Populate the first mip level for each layer of each image. */ for (uint32_t i = 0; i < image_arrays::NUM_IMAGE_LAYERS; ++i) { const ngf_image_write img_array_write = { .src_offset = 0u, .dst_offset = {0, 0, 0}, .extent = {image_array_width, image_array_height, 1u}, .dst_base_layer = i, .nlayers = 1u}; ngf_cmd_write_image( xfer_encoder, staging_images[i].staging_buffer.get(), state->image_array.get(), &img_array_write, 1u); const ngf_image_write img_write = { .src_offset = 0u, .dst_offset = {0, 0, 0}, .extent = {image_array_width, image_array_height, 1u}, .dst_base_layer = 0, .nlayers = 1u}; ngf_cmd_write_image( xfer_encoder, staging_images[i].staging_buffer.get(), state->multiple_images[i].get(), &img_write, 1u); ngf_cmd_generate_mipmaps(xfer_encoder, state->multiple_images[i].get()); } /** Populate the rest of the mip levels automatically. **/ ngf_cmd_generate_mipmaps(xfer_encoder, state->image_array.get()); /** Create a cubemap object with several array layers. */ ngf_image_info cubemap_array_info = { .type = NGF_IMAGE_TYPE_CUBE, .extent = { .width = image_array_width, .height = image_array_height, .depth = 1u, }, .nmips = nmips, .nlayers = image_arrays::NUM_IMAGE_LAYERS, .format = NGF_IMAGE_FORMAT_SRGBA8, .sample_count = NGF_SAMPLE_COUNT_1, .usage_hint = NGF_IMAGE_USAGE_MIPMAP_GENERATION | NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_XFER_DST}; NGF_MISC_CHECK_NGF_ERROR(state->cubemap_array.initialize(cubemap_array_info)); /** Upload the first mip level for each layer on each face. */ for (uint32_t i = 0; i < image_arrays::NUM_IMAGE_LAYERS; ++i) { for (uint32_t face = NGF_CUBEMAP_FACE_POSITIVE_X; face < NGF_CUBEMAP_FACE_COUNT; ++face) { const ngf_image_write img_write = { .src_offset = 0u, .dst_offset = {0, 0, 0}, .extent = {image_array_width, image_array_height, 1u}, .dst_level = 0u, .dst_base_layer = 6u * i + face, .nlayers = 1u}; ngf_cmd_write_image( xfer_encoder, staging_images[i].staging_buffer.get(), state->cubemap_array.get(), &img_write, 1u); } } /** Generate the rest of the mips automatically. */ ngf_cmd_generate_mipmaps(xfer_encoder, state->cubemap_array.get()); /** Create an image sampler. */ NGF_MISC_CHECK_NGF_ERROR(state->image_sampler.initialize(ngf_sampler_info { .min_filter = NGF_FILTER_LINEAR, .mag_filter = NGF_FILTER_LINEAR, .mip_filter = NGF_FILTER_LINEAR, .wrap_u = NGF_WRAP_MODE_REPEAT, .wrap_v = NGF_WRAP_MODE_REPEAT, .wrap_w = NGF_WRAP_MODE_REPEAT, .lod_max = (float)nmips, .lod_min = 0.0f, .lod_bias = 0.0f, .max_anisotropy = 0.0f, .enable_anisotropy = false})); /** * Load the shader stages for the regular image array pipeline. */ const ngf::shader_stage img_array_vertex_shader_stage = load_shader_stage("textured-quad-image-array", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage img_array_fragment_shader_stage = load_shader_stage("textured-quad-image-array", "PSMain", NGF_STAGE_FRAGMENT); /** * Prepare a template with some default values for pipeline initialization. */ ngf_util_graphics_pipeline_data pipeline_data; ngf_util_create_default_graphics_pipeline_data(&pipeline_data); /** * Set shader stages. */ pipeline_data.pipeline_info.nshader_stages = 2; pipeline_data.pipeline_info.shader_stages[0] = img_array_vertex_shader_stage.get(); pipeline_data.pipeline_info.shader_stages[1] = img_array_fragment_shader_stage.get(); /** * Set multisampling state. */ pipeline_data.multisample_info.sample_count = main_render_target_sample_count; /** * Set the compatible render target description. */ pipeline_data.pipeline_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); /** * Initialize the image array pipeline object. */ NGF_MISC_CHECK_NGF_ERROR(state->img_array_pipeline.initialize(pipeline_data.pipeline_info)); /** * Load the shader stages for the multiple images pipeline. */ const ngf::shader_stage multiple_images_vertex_shader_stage = load_shader_stage("textured-quad-multiple-images", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage multiple_images_fragment_shader_stage = load_shader_stage("textured-quad-multiple-images", "PSMain", NGF_STAGE_FRAGMENT); /** * Set shader stages. */ pipeline_data.pipeline_info.shader_stages[0] = multiple_images_vertex_shader_stage.get(); pipeline_data.pipeline_info.shader_stages[1] = multiple_images_fragment_shader_stage.get(); /** * Initialize the multiple images pipeline object. */ NGF_MISC_CHECK_NGF_ERROR(state->multiple_images_pipeline.initialize(pipeline_data.pipeline_info)); /** * Load the shader stages for the cubemap array pipeline. */ const ngf::shader_stage cubemap_vertex_shader_stage = load_shader_stage("cubemap-array", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage cubemap_fragment_shader_stage = load_shader_stage("cubemap-array", "PSMain", NGF_STAGE_FRAGMENT); /** * Set shader stages. */ pipeline_data.pipeline_info.shader_stages[0] = cubemap_vertex_shader_stage.get(); pipeline_data.pipeline_info.shader_stages[1] = cubemap_fragment_shader_stage.get(); /** * Initialize the cubemap array pipeline object. */ NGF_MISC_CHECK_NGF_ERROR(state->cubemap_array_pipeline.initialize(pipeline_data.pipeline_info)); /** * Create the uniform buffers. */ NGF_MISC_CHECK_NGF_ERROR(state->img_array_uniforms_multibuf.initialize(3)); NGF_MISC_CHECK_NGF_ERROR(state->cube_array_uniforms_multibuf.initialize(3)); NGF_MISC_CHECK_NGF_ERROR(state->multi_img_uniforms_multibuf.initialize(3)); return static_cast(state); } void sample_draw_frame( ngf_render_encoder main_render_pass, float /*time_delta*/, ngf_frame_token /*token*/, uint32_t w, uint32_t h, float /*time*/, void* userdata) { auto state = reinterpret_cast(userdata); /* Compute the perspective transform for the current frame. */ const nm::float4x4 camera_to_clip = nm::perspective( nm::deg2rad(72.0f), static_cast(w) / static_cast(h), 0.01f, 100.0f); /* Build the world-to-camera transform for the current frame. */ nm::float4x4 world_to_camera = nm::translation(nm::float3 {-3.0f, 0.0f, state->dolly}); image_arrays::img_array_uniforms img_arr_uniforms; img_arr_uniforms.matrix = camera_to_clip * world_to_camera; img_arr_uniforms.image_array_idx = state->image_array_idx; state->img_array_uniforms_multibuf.write(img_arr_uniforms); image_arrays::multiple_imgs_uniforms multiimg_uniforms; multiimg_uniforms.matrix = camera_to_clip * nm::translation(nm::float3{3.0f, 0.0f, state->dolly}); multiimg_uniforms.index = state->image_idx; state->multi_img_uniforms_multibuf.write(multiimg_uniforms); image_arrays::cube_array_uniforms cube_arr_uniforms; cube_arr_uniforms.aspect = (float)w / (float)h; cube_arr_uniforms.array_idx = state->cubemap_array_idx; cube_arr_uniforms.matrix = nm::rotation_y(state->yaw) * nm::rotation_x(state->pitch); state->cube_array_uniforms_multibuf.write(cube_arr_uniforms); ngf_irect2d viewport {0, 0, w, h}; ngf_cmd_bind_gfx_pipeline(main_render_pass, state->cubemap_array_pipeline); ngf_cmd_viewport(main_render_pass, &viewport); ngf_cmd_scissor(main_render_pass, &viewport); ngf::cmd_bind_resources( main_render_pass, state->cube_array_uniforms_multibuf.bind_op_at_current_offset(0, 0), ngf::descriptor_set<0>::binding<1>::texture(state->cubemap_array.get()), ngf::descriptor_set<0>::binding<2>::sampler(state->image_sampler.get())); ngf_cmd_draw(main_render_pass, false, 0u, 3u, 1u); ngf_cmd_bind_gfx_pipeline(main_render_pass, state->multiple_images_pipeline); ngf_cmd_viewport(main_render_pass, &viewport); ngf_cmd_scissor(main_render_pass, &viewport); ngf::cmd_bind_resources( main_render_pass, state->multi_img_uniforms_multibuf .bind_op_at_current_offset(0, 0, 0, sizeof(image_arrays::multiple_imgs_uniforms)), ngf::descriptor_set<0>::binding<1>::sampler(state->image_sampler), ngf::descriptor_set<1>::binding<0>::texture(state->multiple_images[0], 0), ngf::descriptor_set<1>::binding<0>::texture(state->multiple_images[1], 1), ngf::descriptor_set<1>::binding<0>::texture(state->multiple_images[2], 2), ngf::descriptor_set<1>::binding<0>::texture(state->multiple_images[3], 3)); ngf_cmd_draw(main_render_pass, false, 0, 6, 1); ngf_cmd_bind_gfx_pipeline(main_render_pass, state->img_array_pipeline); ngf_cmd_viewport(main_render_pass, &viewport); ngf_cmd_scissor(main_render_pass, &viewport); ngf::cmd_bind_resources( main_render_pass, state->img_array_uniforms_multibuf .bind_op_at_current_offset(0, 0, 0, sizeof(image_arrays::img_array_uniforms)), ngf::descriptor_set<0>::binding<1>::sampler(state->image_sampler), ngf::descriptor_set<1>::binding<0>::texture(state->image_array)); ngf_cmd_draw(main_render_pass, false, 0, 6, 1); } void sample_pre_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_submit(void*) { } void sample_draw_ui(void* userdata) { auto data = reinterpret_cast(userdata); ImGui::Begin("Image Arrays"); ImGui::DragFloat("dolly", &data->dolly, 0.01f, -70.0f, 0.11f); ImGui::DragFloat("image array index", &data->image_array_idx, 0.1f, 0.0f, 3.0f); ImGui::DragFloat("cubemap array index", &data->cubemap_array_idx, 0.1f, 0.0f, 3.0f); ImGui::DragInt("image index", (int*)&data->image_idx, .1f, 0, 3); ImGui::SliderFloat("cubemap pitch", &data->pitch, -nm::PI, nm::PI); ImGui::SliderFloat("cubemap yaw", &data->yaw, -nm::PI, nm::PI); ImGui::End(); } void sample_shutdown(void* userdata) { delete reinterpret_cast(userdata); } } // namespace ngf_samples ================================================ FILE: samples/09-volume-rendering/volume-rendering.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define _CRT_SECURE_NO_WARNINGS #include "check.h" #include "imgui.h" #include "logging.h" #include "nicegraf-util.h" #include "nicegraf-wrappers.h" #include "nicemath.h" #include "sample-interface.h" #include "shader-loader.h" #include #include using namespace ngf_misc; namespace ngf_samples { namespace volume_rendering { struct uniforms { nm::float4x4 transform_matrix; float aspect_ratio; }; struct state { ngf::image volume; ngf::sampler sampler; ngf::graphics_pipeline pipeline; ngf::uniform_multibuffer uniforms_multibuffer; uint16_t volume_voxel_dimensions[3]; }; } // namespace volume_rendering void* sample_initialize( uint32_t /*width*/, uint32_t /*height*/, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder xfer_encoder) { auto state = new volume_rendering::state {}; /** Open the file containing the volume data and read in the dimensions. */ FILE* volume_data_file = fopen("assets/stag-beetle-volume.dat", "rb"); if (volume_data_file == nullptr) { loge("failed to open the volume data file."); return nullptr; } fread(state->volume_voxel_dimensions, sizeof(uint16_t), 3, volume_data_file); /** Prepare a staging buffer. */ const size_t staging_buffer_size = sizeof(uint16_t) * state->volume_voxel_dimensions[0] * state->volume_voxel_dimensions[1] * state->volume_voxel_dimensions[2]; const ngf_buffer_info staging_buffer_info = { .size = staging_buffer_size, .storage_type = NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE, .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC, }; ngf::buffer staging_buffer; NGF_MISC_CHECK_NGF_ERROR(staging_buffer.initialize(staging_buffer_info)); /** Map the staging buffer and read the volume data directly into the memory. */ void* mapped_staging_buffer_ptr = ngf_buffer_map_range(staging_buffer, 0, staging_buffer_size); const uint64_t read_bytes = fread(mapped_staging_buffer_ptr, 1, staging_buffer_size, volume_data_file); if (ferror(volume_data_file)) { loge("error reading volume data file: %d", errno); return nullptr; } if (read_bytes != staging_buffer_size) { loge("failed to read the entire volume data. EOF: %d", feof(volume_data_file)); return nullptr; } fclose(volume_data_file); /** Flush and unmap the staging buffer to prepare it for the upcoming transfer. */ ngf_buffer_flush_range(staging_buffer, 0, staging_buffer_size); ngf_buffer_unmap(staging_buffer); /** Prepare a 3D image. */ const ngf_image_info img_info = { .type = NGF_IMAGE_TYPE_IMAGE_3D, .extent = {.width = state->volume_voxel_dimensions[0], .height = state->volume_voxel_dimensions[1], .depth = state->volume_voxel_dimensions[2]}, .nmips = 1u, .nlayers = 1u, .format = NGF_IMAGE_FORMAT_R16_UNORM, .sample_count = NGF_SAMPLE_COUNT_1, .usage_hint = NGF_IMAGE_USAGE_XFER_DST | NGF_IMAGE_USAGE_SAMPLE_FROM, }; NGF_MISC_CHECK_NGF_ERROR(state->volume.initialize(img_info)); /** Upload the volume data into the image. */ const ngf_image_write img_write = { .src_offset = 0u, .dst_offset = {0, 0, 0}, .extent = img_info.extent, .dst_level = 0u, .dst_base_layer = 0u, .nlayers = 1u}; ngf_cmd_write_image(xfer_encoder, staging_buffer, state->volume.get(), &img_write, 1u); /** * Initialize the sampler. */ NGF_MISC_CHECK_NGF_ERROR(state->sampler.initialize(ngf_sampler_info { .min_filter = NGF_FILTER_LINEAR, .mag_filter = NGF_FILTER_LINEAR, .mip_filter = NGF_FILTER_NEAREST, .wrap_u = NGF_WRAP_MODE_CLAMP_TO_EDGE, .wrap_v = NGF_WRAP_MODE_CLAMP_TO_EDGE, .wrap_w = NGF_WRAP_MODE_CLAMP_TO_EDGE, .lod_max = 0.0f, .lod_min = 0.0f, .lod_bias = 0.0f, .max_anisotropy = 0.0f, .enable_anisotropy = false})); /** * Load the shader stages. */ const ngf::shader_stage vertex_shader_stage = load_shader_stage("volume-renderer", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage fragment_shader_stage = load_shader_stage("volume-renderer", "PSMain", NGF_STAGE_FRAGMENT); /** * Prepare a template with some default values for pipeline initialization. */ ngf_util_graphics_pipeline_data pipeline_data; ngf_util_create_default_graphics_pipeline_data(&pipeline_data); /** * Set shader stages. */ pipeline_data.pipeline_info.nshader_stages = 2; pipeline_data.pipeline_info.shader_stages[0] = vertex_shader_stage.get(); pipeline_data.pipeline_info.shader_stages[1] = fragment_shader_stage.get(); /** * Set multisampling state. */ pipeline_data.multisample_info.sample_count = main_render_target_sample_count; /** * Set the compatible render target description. */ pipeline_data.pipeline_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); /** * Set up blending. */ ngf_blend_info blend_info; blend_info.enable = true; blend_info.blend_op_color = NGF_BLEND_OP_ADD; blend_info.dst_color_blend_factor = NGF_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; blend_info.src_color_blend_factor = NGF_BLEND_FACTOR_SRC_ALPHA; blend_info.blend_op_alpha = NGF_BLEND_OP_ADD; blend_info.src_alpha_blend_factor = NGF_BLEND_FACTOR_ZERO; blend_info.dst_alpha_blend_factor = NGF_BLEND_FACTOR_ONE; blend_info.color_write_mask = NGF_COLOR_MASK_WRITE_BIT_R | NGF_COLOR_MASK_WRITE_BIT_G | NGF_COLOR_MASK_WRITE_BIT_B | NGF_COLOR_MASK_WRITE_BIT_A; pipeline_data.pipeline_info.color_attachment_blend_states = &blend_info; /** * Initialize the pipeline object. */ state->pipeline.initialize(pipeline_data.pipeline_info); /** * Initialize uniforms multibuffer. */ state->uniforms_multibuffer.initialize(3); return static_cast(state); } void sample_draw_frame( ngf_render_encoder main_render_pass, float time_delta, ngf_frame_token /*token*/, uint32_t w, uint32_t h, float /*time*/, void* userdata) { static float t = 0.0; t += time_delta; auto state = reinterpret_cast(userdata); volume_rendering::uniforms u { nm::rotation_x(-1.620f) * nm::rotation_y(t) * nm::translation(nm::float3(0.0, -0.5, 0.0)), (float)w / (float)h }; state->uniforms_multibuffer.write(u); const ngf_irect2d viewport {0, 0, w, h}; ngf_cmd_bind_gfx_pipeline(main_render_pass, state->pipeline); ngf_cmd_viewport(main_render_pass, &viewport); ngf_cmd_scissor(main_render_pass, &viewport); ngf::cmd_bind_resources( main_render_pass, ngf::descriptor_set<0>::binding<0>::texture(state->volume), ngf::descriptor_set<0>::binding<1>::sampler(state->sampler), state->uniforms_multibuffer.bind_op_at_current_offset(1, 0, 0, sizeof(volume_rendering::uniforms))); ngf_cmd_draw(main_render_pass, false, 0, 6, state->volume_voxel_dimensions[2]); } void sample_pre_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_submit(void*) { } void sample_draw_ui(void* /*userdata*/) { } void sample_shutdown(void* userdata) { delete reinterpret_cast(userdata); } } // namespace ngf_samples ================================================ FILE: samples/0a-compute-demo/compute-demo.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define _CRT_SECURE_NO_WARNINGS #include "check.h" #include "imgui.h" #include "logging.h" #include "nicegraf-util.h" #include "nicegraf-wrappers.h" #include "nicemath.h" #include "sample-interface.h" #include "shader-loader.h" #include #include namespace ngf_samples { namespace compute_demo { struct state { ngf::image image; ngf::compute_pipeline compute_pipeline; ngf::graphics_pipeline blit_pipeline; ngf::sampler sampler; ngf_compute_encoder prev_compute_enc; ngf_image_ref image_ref; uint32_t frame; }; } // namespace compute_demo void* sample_initialize( uint32_t /*width*/, uint32_t /*height*/, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder /* xfer_encoder*/) { auto state = new compute_demo::state {}; /** * Load the shader stages. */ const ngf::shader_stage compute_shader = load_shader_stage("compute-demo", "CSMain", NGF_STAGE_COMPUTE); /** * Create the compute pipeline. */ ngf_compute_pipeline_info pipeline_info; pipeline_info.shader_stage = compute_shader.get(); pipeline_info.spec_info = nullptr; NGF_SAMPLES_CHECK_NGF_ERROR(state->compute_pipeline.initialize(pipeline_info)); /** * Load shader stages. */ const ngf::shader_stage blit_vertex_stage = load_shader_stage("simple-texture", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage blit_fragment_stage = load_shader_stage("simple-texture", "PSMain", NGF_STAGE_FRAGMENT); /** * Create pipeline for blit. */ ngf_util_graphics_pipeline_data blit_pipeline_data; ngf_util_create_default_graphics_pipeline_data(&blit_pipeline_data); blit_pipeline_data.multisample_info.sample_count = main_render_target_sample_count; ngf_graphics_pipeline_info& blit_pipe_info = blit_pipeline_data.pipeline_info; blit_pipe_info.nshader_stages = 2u; blit_pipe_info.shader_stages[0] = blit_vertex_stage.get(); blit_pipe_info.shader_stages[1] = blit_fragment_stage.get(); blit_pipe_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); NGF_SAMPLES_CHECK_NGF_ERROR(state->blit_pipeline.initialize(blit_pipe_info)); /** * Initialize the image. */ ngf_image_info image_info; image_info.format = NGF_IMAGE_FORMAT_RGBA8; image_info.extent.depth = 1; image_info.extent.width = 4 * 128; image_info.extent.height = 4 * 128; image_info.nlayers = 1u; image_info.nmips = 1u; image_info.sample_count = NGF_SAMPLE_COUNT_1; image_info.type = NGF_IMAGE_TYPE_IMAGE_2D; image_info.usage_hint = NGF_IMAGE_USAGE_STORAGE | NGF_IMAGE_USAGE_SAMPLE_FROM; NGF_SAMPLES_CHECK_NGF_ERROR(state->image.initialize(image_info)); state->image_ref.image = state->image; state->image_ref.layer = 0u; state->image_ref.mip_level = 0u; /* Create sampler.*/ const ngf_sampler_info samp_info { NGF_FILTER_LINEAR, NGF_FILTER_LINEAR, NGF_FILTER_NEAREST, NGF_WRAP_MODE_CLAMP_TO_EDGE, NGF_WRAP_MODE_CLAMP_TO_EDGE, NGF_WRAP_MODE_CLAMP_TO_EDGE, 0.0f, 0.0f, 0.0f, 1.0f, false}; NGF_SAMPLES_CHECK_NGF_ERROR(state->sampler.initialize(samp_info)); state->frame = 0u; return static_cast(state); } void sample_draw_frame( ngf_render_encoder main_render_pass, float /*time_delta*/, ngf_frame_token /* token*/, uint32_t w, uint32_t h, float /*time*/, void* userdata) { auto state = reinterpret_cast(userdata); if (state->frame > 0u) { ngf_irect2d onsc_viewport {0, 0, w, h}; ngf_cmd_bind_gfx_pipeline(main_render_pass, state->blit_pipeline); ngf_cmd_viewport(main_render_pass, &onsc_viewport); ngf_cmd_scissor(main_render_pass, &onsc_viewport); ngf::cmd_bind_resources( main_render_pass, ngf::descriptor_set<0>::binding<1>::texture(state->image.get()), ngf::descriptor_set<0>::binding<2>::sampler(state->sampler.get())); ngf_cmd_draw(main_render_pass, false, 0u, 3u, 1u); } } void sample_pre_draw_frame(ngf_cmd_buffer, ngf_sync_op* sync_op, void* userdata) { auto state = reinterpret_cast(userdata); if (state->frame > 0u) { sync_op->nwait_compute_encoders = 1u; sync_op->wait_compute_encoders = &state->prev_compute_enc; sync_op->nimage_refs = 1u; sync_op->image_refs = &state->image_ref; } } void sample_post_draw_frame( ngf_cmd_buffer cmd_buffer, ngf_render_encoder prev_render_encoder, void* userdata) { auto state = reinterpret_cast(userdata); const ngf_sync_op compute_sync_op { .nwait_render_encoders = 1u, .wait_render_encoders = &prev_render_encoder, .nimage_refs = 1u, .image_refs = &state->image_ref}; ngf_compute_encoder compute_enc; NGF_SAMPLES_CHECK_NGF_ERROR( ngf_cmd_begin_compute_pass(cmd_buffer, &compute_sync_op, &compute_enc)); ngf_resource_bind_op bind_op; bind_op.info.image_sampler.image = state->image; bind_op.target_set = 0; bind_op.target_binding = 0; bind_op.type = NGF_DESCRIPTOR_STORAGE_IMAGE; ngf_cmd_bind_compute_pipeline(compute_enc, state->compute_pipeline.get()); ngf_cmd_bind_compute_resources(compute_enc, &bind_op, 1); ngf_cmd_dispatch(compute_enc, 128, 128, 1); ngf_cmd_end_compute_pass(compute_enc); state->prev_compute_enc = compute_enc; state->frame++; } void sample_post_submit(void*) { } void sample_draw_ui(void* /*userdata*/) { } void sample_shutdown(void* userdata) { delete reinterpret_cast(userdata); } } // namespace ngf_samples ================================================ FILE: samples/0a-compute-mandelbrot/compute-mandelbrot.cpp ================================================ /** * Copyright (c) 2025 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define _CRT_SECURE_NO_WARNINGS #include "check.h" #include "imgui.h" #include "logging.h" #include "nicegraf-util.h" #include "nicegraf-wrappers.h" #include "nicemath.h" #include "sample-interface.h" #include "shader-loader.h" #include #include using namespace ngf_misc; namespace ngf_samples { namespace compute_demo { struct state { ngf::image image; ngf::compute_pipeline compute_pipeline; ngf::graphics_pipeline blit_pipeline; ngf::sampler sampler; ngf_compute_encoder prev_compute_enc; ngf_image_ref image_ref; uint32_t frame; }; } // namespace compute_demo void* sample_initialize( uint32_t /*width*/, uint32_t /*height*/, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder /* xfer_encoder*/) { auto state = new compute_demo::state {}; /** * Load the shader stages. */ const ngf::shader_stage compute_shader = load_shader_stage("compute-demo", "CSMain", NGF_STAGE_COMPUTE); /** * Create the compute pipeline. */ ngf_compute_pipeline_info pipeline_info{}; pipeline_info.shader_stage = compute_shader.get(); pipeline_info.spec_info = nullptr; NGF_MISC_CHECK_NGF_ERROR(state->compute_pipeline.initialize(pipeline_info)); /** * Load shader stages. */ const ngf::shader_stage blit_vertex_stage = load_shader_stage("simple-texture", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage blit_fragment_stage = load_shader_stage("simple-texture", "PSMain", NGF_STAGE_FRAGMENT); /** * Create pipeline for blit. */ ngf_util_graphics_pipeline_data blit_pipeline_data; ngf_util_create_default_graphics_pipeline_data(&blit_pipeline_data); blit_pipeline_data.multisample_info.sample_count = main_render_target_sample_count; ngf_graphics_pipeline_info& blit_pipe_info = blit_pipeline_data.pipeline_info; blit_pipe_info.nshader_stages = 2u; blit_pipe_info.shader_stages[0] = blit_vertex_stage.get(); blit_pipe_info.shader_stages[1] = blit_fragment_stage.get(); blit_pipe_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); NGF_MISC_CHECK_NGF_ERROR(state->blit_pipeline.initialize(blit_pipe_info)); /** * Initialize the image. */ ngf_image_info image_info; image_info.format = NGF_IMAGE_FORMAT_RGBA8; image_info.extent.depth = 1; image_info.extent.width = 4 * 128; image_info.extent.height = 4 * 128; image_info.nlayers = 1u; image_info.nmips = 1u; image_info.sample_count = NGF_SAMPLE_COUNT_1; image_info.type = NGF_IMAGE_TYPE_IMAGE_2D; image_info.usage_hint = NGF_IMAGE_USAGE_STORAGE | NGF_IMAGE_USAGE_SAMPLE_FROM; NGF_MISC_CHECK_NGF_ERROR(state->image.initialize(image_info)); state->image_ref.image = state->image; state->image_ref.layer = 0u; state->image_ref.mip_level = 0u; /* Create sampler.*/ const ngf_sampler_info samp_info { NGF_FILTER_LINEAR, NGF_FILTER_LINEAR, NGF_FILTER_NEAREST, NGF_WRAP_MODE_CLAMP_TO_EDGE, NGF_WRAP_MODE_CLAMP_TO_EDGE, NGF_WRAP_MODE_CLAMP_TO_EDGE, 0.0f, 0.0f, 0.0f, 1.0f, false}; NGF_MISC_CHECK_NGF_ERROR(state->sampler.initialize(samp_info)); state->frame = 0u; return static_cast(state); } void sample_draw_frame( ngf_render_encoder main_render_pass, float /*time_delta*/, ngf_frame_token /* token*/, uint32_t w, uint32_t h, float /*time*/, void* userdata) { auto state = reinterpret_cast(userdata); if (state->frame > 0u) { ngf_irect2d onsc_viewport {0, 0, w, h}; ngf_cmd_bind_gfx_pipeline(main_render_pass, state->blit_pipeline); ngf_cmd_viewport(main_render_pass, &onsc_viewport); ngf_cmd_scissor(main_render_pass, &onsc_viewport); ngf::cmd_bind_resources( main_render_pass, ngf::descriptor_set<0>::binding<1>::texture(state->image.get()), ngf::descriptor_set<0>::binding<2>::sampler(state->sampler.get())); ngf_cmd_draw(main_render_pass, false, 0u, 3u, 1u); } } void sample_pre_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_draw_frame( ngf_cmd_buffer cmd_buffer, void* userdata) { auto state = reinterpret_cast(userdata); const ngf_compute_pass_info pass_info {}; ngf_compute_encoder compute_enc; NGF_MISC_CHECK_NGF_ERROR( ngf_cmd_begin_compute_pass(cmd_buffer, &pass_info, &compute_enc)); ngf_resource_bind_op bind_op{}; bind_op.info.image_sampler.is_image_view = false; bind_op.info.image_sampler.resource.image = state->image; bind_op.target_set = 0; bind_op.target_binding = 0; bind_op.type = NGF_DESCRIPTOR_STORAGE_IMAGE; ngf_cmd_bind_compute_pipeline(compute_enc, state->compute_pipeline.get()); ngf_cmd_bind_compute_resources(compute_enc, &bind_op, 1); ngf_cmd_dispatch(compute_enc, 128, 128, 1); ngf_cmd_end_compute_pass(compute_enc); state->prev_compute_enc = compute_enc; state->frame++; } void sample_post_submit(void*) { } void sample_draw_ui(void* /*userdata*/) { } void sample_shutdown(void* userdata) { delete reinterpret_cast(userdata); } } // namespace ngf_samples ================================================ FILE: samples/0b-compute-vertices/compute-vertices.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define _CRT_SECURE_NO_WARNINGS #include "camera-controller.h" #include "check.h" #include "imgui.h" #include "logging.h" #include "nicegraf-util.h" #include "nicegraf-wrappers.h" #include "nicemath.h" #include "sample-interface.h" #include "shader-loader.h" #include #include using namespace ngf_misc; namespace ngf_samples { namespace compute_verts { constexpr int nverts_per_side = 512; constexpr int ntotal_verts = nverts_per_side * nverts_per_side; constexpr int nindices_per_strip = 2 * nverts_per_side + 1u; constexpr int nstrips = nverts_per_side - 1; constexpr int ntotal_indices = nstrips * nindices_per_strip; struct render_uniforms { camera_matrices cam_matrices; }; struct compute_uniforms { float time; float pad[3]; }; struct state { ngf::compute_pipeline compute_pipeline; ngf::graphics_pipeline render_pipeline; ngf::uniform_multibuffer render_uniforms_multibuf; ngf::uniform_multibuffer compute_uniforms_multibuf; ngf::buffer index_buffer; ngf::buffer vertex_buffer; ngf_buffer_slice compute_buffer_slice; ngf_compute_encoder prev_compute_encoder; camera_state camera; uint32_t frame = 0u; }; } // namespace compute_verts void* sample_initialize( uint32_t /*width*/, uint32_t /*height*/, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder xfer_encoder) { auto state = new compute_verts::state {}; /** * Load the shader stages. */ const ngf::shader_stage compute_shader = load_shader_stage("compute-vertices", "CSMain", NGF_STAGE_COMPUTE); /** * Create the compute pipeline. */ ngf_compute_pipeline_info pipeline_info{}; pipeline_info.shader_stage = compute_shader.get(); pipeline_info.spec_info = nullptr; NGF_MISC_CHECK_NGF_ERROR(state->compute_pipeline.initialize(pipeline_info)); /** * Load shader stages. */ const ngf::shader_stage render_vertex_stage = load_shader_stage("render-vertices", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage render_fragment_stage = load_shader_stage("render-vertices", "PSMain", NGF_STAGE_FRAGMENT); /** * Create pipeline for rendering vertex data. */ const ngf_vertex_attrib_desc position_attrib_desc { .location = 0u, .binding = 0u, .offset = 0u, .type = NGF_TYPE_FLOAT, .size = 4u, .normalized = false}; const ngf_vertex_buf_binding_desc vert_buf_binding_desc { .binding = 0u, .stride = 4u * sizeof(float), .input_rate = NGF_INPUT_RATE_VERTEX}; ngf_util_graphics_pipeline_data render_pipeline_data; ngf_util_create_default_graphics_pipeline_data(&render_pipeline_data); render_pipeline_data.multisample_info.sample_count = main_render_target_sample_count; render_pipeline_data.input_assembly_info.enable_primitive_restart = true; render_pipeline_data.input_assembly_info.primitive_topology = NGF_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; render_pipeline_data.depth_stencil_info.depth_test = true; render_pipeline_data.depth_stencil_info.depth_write = true; render_pipeline_data.depth_stencil_info.depth_compare = NGF_COMPARE_OP_LESS; render_pipeline_data.rasterization_info.cull_mode = NGF_CULL_MODE_NONE; render_pipeline_data.vertex_input_info.nattribs = 1u; render_pipeline_data.vertex_input_info.attribs = &position_attrib_desc; render_pipeline_data.vertex_input_info.nvert_buf_bindings = 1u; render_pipeline_data.vertex_input_info.vert_buf_bindings = &vert_buf_binding_desc; ngf_graphics_pipeline_info& render_pipe_info = render_pipeline_data.pipeline_info; render_pipe_info.nshader_stages = 2u; render_pipe_info.shader_stages[0] = render_vertex_stage.get(); render_pipe_info.shader_stages[1] = render_fragment_stage.get(); render_pipe_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); NGF_MISC_CHECK_NGF_ERROR(state->render_pipeline.initialize(render_pipe_info)); /** * Initialize the index buffer. */ const ngf_buffer_info staging_index_buffer_info { .size = compute_verts::ntotal_indices * sizeof(uint32_t), .storage_type = NGF_BUFFER_STORAGE_HOST_WRITEABLE, .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC}; const ngf_buffer_info index_buffer_info { .size = compute_verts::ntotal_indices * sizeof(uint32_t), .storage_type = NGF_BUFFER_STORAGE_DEVICE_LOCAL, .buffer_usage = NGF_BUFFER_USAGE_XFER_DST | NGF_BUFFER_USAGE_INDEX_BUFFER}; ngf::buffer staging_index_buffer; NGF_MISC_CHECK_NGF_ERROR(staging_index_buffer.initialize(staging_index_buffer_info)); NGF_MISC_CHECK_NGF_ERROR(state->index_buffer.initialize(index_buffer_info)); auto mapped_staging_index_buffer = (uint32_t*) ngf_buffer_map_range(staging_index_buffer.get(), 0u, staging_index_buffer_info.size); uint32_t idx = 0u; for (uint32_t strip = 0u; strip < compute_verts::nverts_per_side - 1; ++strip) { for (uint32_t v = 0u; v < compute_verts::nverts_per_side; ++v) { NGF_MISC_ASSERT(idx < compute_verts::ntotal_indices); mapped_staging_index_buffer[idx++] = (strip + 1u) * compute_verts::nverts_per_side + v; NGF_MISC_ASSERT(idx < compute_verts::ntotal_indices); mapped_staging_index_buffer[idx++] = strip * compute_verts::nverts_per_side + v; } NGF_MISC_ASSERT(idx < compute_verts::ntotal_indices); mapped_staging_index_buffer[idx++] = ~0u; } ngf_buffer_flush_range(staging_index_buffer.get(), 0, staging_index_buffer_info.size); ngf_buffer_unmap(staging_index_buffer.get()); ngf_cmd_copy_buffer( xfer_encoder, staging_index_buffer.get(), state->index_buffer.get(), staging_index_buffer_info.size, 0u, 0u); /** * Create the vertex buffer. */ const ngf_buffer_info vertex_buffer_info { .size = compute_verts::ntotal_verts * (4u * sizeof(float)) * 2, .storage_type = NGF_BUFFER_STORAGE_DEVICE_LOCAL, .buffer_usage = NGF_BUFFER_USAGE_VERTEX_BUFFER | NGF_BUFFER_USAGE_STORAGE_BUFFER}; NGF_MISC_CHECK_NGF_ERROR(state->vertex_buffer.initialize(vertex_buffer_info)); state->compute_buffer_slice.buffer = state->vertex_buffer.get(); state->compute_buffer_slice.range = compute_verts::ntotal_verts * (4u * sizeof(float)); /** * Set up some initial viewing parameters. */ state->camera.look_at[1] = 1.0f; state->render_uniforms_multibuf.initialize(3); state->compute_uniforms_multibuf.initialize(3); return static_cast(state); } void sample_draw_frame( ngf_render_encoder main_render_pass, float /* time_delta */, ngf_frame_token /* token */, uint32_t w, uint32_t h, float /* time */, void* userdata) { auto state = reinterpret_cast(userdata); const uint32_t f_prev = (state->frame + 1u) % 2; if (state->frame > 0u) { compute_verts::render_uniforms render_uniforms; render_uniforms.cam_matrices = compute_camera_matrices(state->camera, static_cast(w) / static_cast(h)); state->render_uniforms_multibuf.write(render_uniforms); ngf_irect2d onsc_viewport {0, 0, w, h}; ngf_cmd_bind_gfx_pipeline(main_render_pass, state->render_pipeline); ngf::cmd_bind_resources( main_render_pass, state->render_uniforms_multibuf.bind_op_at_current_offset(0, 0)); ngf_cmd_viewport(main_render_pass, &onsc_viewport); ngf_cmd_scissor(main_render_pass, &onsc_viewport); ngf_cmd_bind_index_buffer(main_render_pass, state->index_buffer, 0u, NGF_TYPE_UINT32); ngf_cmd_bind_attrib_buffer( main_render_pass, state->vertex_buffer, 0u, f_prev * sizeof(float) * 4u * compute_verts::ntotal_verts); ngf_cmd_draw(main_render_pass, true, 0u, compute_verts::ntotal_indices, 1u); } } void sample_pre_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_draw_frame( ngf_cmd_buffer cmd_buffer, void* userdata) { static float time = 0.f; auto state = reinterpret_cast(userdata); const uint32_t f_curr = (state->frame) % 2; time += 0.01f; compute_verts::compute_uniforms compute_uniforms; compute_uniforms.time = time; state->compute_uniforms_multibuf.write(compute_uniforms); ngf_compute_pass_info pass_info {}; ngf_compute_encoder compute_enc; ngf_cmd_begin_compute_pass(cmd_buffer, &pass_info, &compute_enc); ngf_cmd_bind_compute_pipeline(compute_enc, state->compute_pipeline); ngf::cmd_bind_resources( compute_enc, state->compute_uniforms_multibuf.bind_op_at_current_offset(1, 1), ngf_resource_bind_op { .target_set = 1u, .target_binding = 0u, .type = NGF_DESCRIPTOR_STORAGE_BUFFER, .info = { .buffer = { .buffer = state->vertex_buffer.get(), .offset = f_curr * 4u * sizeof(float) * compute_verts::ntotal_verts, .range = compute_verts::ntotal_verts * (4u * sizeof(float))}}}); ngf_cmd_dispatch( compute_enc, compute_verts::nverts_per_side / 2, compute_verts::nverts_per_side / 2, 1u); ngf_cmd_end_compute_pass(compute_enc); state->prev_compute_encoder = compute_enc; state->frame += 1u; } void sample_post_submit(void*) { } void sample_draw_ui(void* userdata) { auto data = reinterpret_cast(userdata); ImGui::Begin("Controls"); camera_ui(data->camera, std::make_pair(-5.f, 5.f), .1f, std::make_pair(1.0f, 10.0f), .1f); ImGui::End(); } void sample_shutdown(void* userdata) { delete reinterpret_cast(userdata); } } // namespace ngf_samples ================================================ FILE: samples/0c-render-to-multisample-texture/render-to-multisample-texture.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "check.h" #include "imgui.h" #include "nicegraf-util.h" #include "nicegraf-wrappers.h" #include "sample-interface.h" #include "shader-loader.h" #include using namespace ngf_misc; namespace ngf_samples { struct render_to_multisample_texture_data { ngf::render_target default_rt; ngf::render_target offscreen_rt; ngf::render_target offscreen_multisample_rt; ngf::graphics_pipeline blit_pipeline; ngf::graphics_pipeline offscreen_pipeline; ngf::graphics_pipeline offscreen_multisample_pipeline; ngf::image rt_texture; ngf::image resolve_texture; ngf::sampler sampler; bool is_multisample = true; }; void* sample_initialize( uint32_t, uint32_t, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder /*xfer_encoder*/) { auto state = new render_to_multisample_texture_data {}; /* Create the image to render to. */ const ngf_extent3d img_size {512u, 512u, 1u}; const ngf_image_info img_info { NGF_IMAGE_TYPE_IMAGE_2D, img_size, 1u, 1u, NGF_IMAGE_FORMAT_BGRA8_SRGB, main_render_target_sample_count, NGF_IMAGE_USAGE_ATTACHMENT}; NGF_MISC_CHECK_NGF_ERROR(state->rt_texture.initialize(img_info)); const ngf_image_info resolve_img_info { NGF_IMAGE_TYPE_IMAGE_2D, img_size, 1u, 1u, NGF_IMAGE_FORMAT_BGRA8_SRGB, NGF_SAMPLE_COUNT_1, NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_ATTACHMENT}; NGF_MISC_CHECK_NGF_ERROR(state->resolve_texture.initialize(resolve_img_info)); const ngf_attachment_description offscreen_attachments = { .type = NGF_ATTACHMENT_COLOR, .format = NGF_IMAGE_FORMAT_BGRA8_SRGB, .sample_count = NGF_SAMPLE_COUNT_1, .is_resolve = false}; const ngf_image_ref offscreen_img_ref = { .image = state->resolve_texture.get(), .mip_level = 0u, .layer = 0u, .cubemap_face = NGF_CUBEMAP_FACE_COUNT}; const ngf_attachment_descriptions offscreen_attachments_list = { .descs = &offscreen_attachments, .ndescs = 1u, }; ngf_render_target_info rt_info {&offscreen_attachments_list, &offscreen_img_ref}; NGF_MISC_CHECK_NGF_ERROR(state->offscreen_rt.initialize(rt_info)); const ngf_attachment_description offscreen_multisample_attachments[2] = { {.type = NGF_ATTACHMENT_COLOR, .format = NGF_IMAGE_FORMAT_BGRA8_SRGB, .sample_count = main_render_target_sample_count, .is_resolve = false}, {.type = NGF_ATTACHMENT_COLOR, .format = NGF_IMAGE_FORMAT_BGRA8_SRGB, .sample_count = NGF_SAMPLE_COUNT_1, .is_resolve = true}}; const ngf_attachment_descriptions offscreen_multisample_attachments_list = { .descs = offscreen_multisample_attachments, .ndescs = 2u, }; const ngf_image_ref offscreen_multisample_img_refs[2] = { {.image = state->rt_texture.get(), .mip_level = 0u, .layer = 0u, .cubemap_face = NGF_CUBEMAP_FACE_COUNT}, {.image = state->resolve_texture.get(), .mip_level = 0u, .layer = 0u, .cubemap_face = NGF_CUBEMAP_FACE_COUNT}}; ngf_render_target_info multisample_rt_info { &offscreen_multisample_attachments_list, offscreen_multisample_img_refs}; NGF_MISC_CHECK_NGF_ERROR(state->offscreen_multisample_rt.initialize(multisample_rt_info)); /** * Load shader stages. */ const ngf::shader_stage blit_vertex_stage = load_shader_stage("simple-texture", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage blit_fragment_stage = load_shader_stage("simple-texture", "PSMain", NGF_STAGE_FRAGMENT); const ngf::shader_stage offscreen_vertex_stage = load_shader_stage("small-triangle", "VSMain", NGF_STAGE_VERTEX); const ngf::shader_stage offscreen_fragment_stage = load_shader_stage("small-triangle", "PSMain", NGF_STAGE_FRAGMENT); /** * Create pipeline for blit. */ ngf_util_graphics_pipeline_data blit_pipeline_data; ngf_util_create_default_graphics_pipeline_data(&blit_pipeline_data); blit_pipeline_data.multisample_info.sample_count = main_render_target_sample_count; ngf_graphics_pipeline_info& blit_pipe_info = blit_pipeline_data.pipeline_info; blit_pipe_info.nshader_stages = 2u; blit_pipe_info.shader_stages[0] = blit_vertex_stage.get(); blit_pipe_info.shader_stages[1] = blit_fragment_stage.get(); blit_pipe_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); NGF_MISC_CHECK_NGF_ERROR(state->blit_pipeline.initialize(blit_pipe_info)); /** * Create pipeline for offscreen pass. */ ngf_util_graphics_pipeline_data offscreen_pipeline_data; ngf_util_create_default_graphics_pipeline_data(&offscreen_pipeline_data); ngf_graphics_pipeline_info& offscreen_pipe_info = offscreen_pipeline_data.pipeline_info; offscreen_pipe_info.nshader_stages = 2u; offscreen_pipe_info.shader_stages[0] = offscreen_vertex_stage.get(); offscreen_pipe_info.shader_stages[1] = offscreen_fragment_stage.get(); offscreen_pipe_info.compatible_rt_attachment_descs = &offscreen_attachments_list; NGF_MISC_CHECK_NGF_ERROR(state->offscreen_pipeline.initialize(offscreen_pipe_info)); /** * Create pipeline for multisample offscreen pass. */ ngf_util_graphics_pipeline_data offscreen_multisample_pipeline_data; ngf_util_create_default_graphics_pipeline_data(&offscreen_multisample_pipeline_data); offscreen_multisample_pipeline_data.multisample_info.sample_count = main_render_target_sample_count; ngf_graphics_pipeline_info& offscreen_multisample_pipe_info = offscreen_multisample_pipeline_data.pipeline_info; offscreen_multisample_pipe_info.nshader_stages = 2u; offscreen_multisample_pipe_info.shader_stages[0] = offscreen_vertex_stage.get(); offscreen_multisample_pipe_info.shader_stages[1] = offscreen_fragment_stage.get(); offscreen_multisample_pipe_info.compatible_rt_attachment_descs = &offscreen_multisample_attachments_list; NGF_MISC_CHECK_NGF_ERROR( state->offscreen_multisample_pipeline.initialize(offscreen_multisample_pipe_info)); /* Create sampler.*/ const ngf_sampler_info samp_info { NGF_FILTER_LINEAR, NGF_FILTER_LINEAR, NGF_FILTER_NEAREST, NGF_WRAP_MODE_CLAMP_TO_EDGE, NGF_WRAP_MODE_CLAMP_TO_EDGE, NGF_WRAP_MODE_CLAMP_TO_EDGE, 0.0f, 0.0f, 0.0f, 1.0f, false}; NGF_MISC_CHECK_NGF_ERROR(state->sampler.initialize(samp_info)); return static_cast(state); } void sample_draw_frame( ngf_render_encoder main_render_pass, float /* time_delta */, ngf_frame_token frame_token, uint32_t w, uint32_t h, float, void* userdata) { auto state = reinterpret_cast(userdata); ngf_irect2d offsc_viewport {0, 0, 512, 512}; ngf_irect2d onsc_viewport {0, 0, w, h}; ngf_cmd_buffer offscr_cmd_buf = nullptr; ngf_cmd_buffer_info cmd_info = {}; ngf_create_cmd_buffer(&cmd_info, &offscr_cmd_buf); ngf_start_cmd_buffer(offscr_cmd_buf, frame_token); { ngf::render_encoder renc { offscr_cmd_buf, state->is_multisample ? state->offscreen_multisample_rt : state->offscreen_rt, .0f, 0.0f, 0.0f, 0.0f, 1.0, 0u}; ngf_cmd_bind_gfx_pipeline( renc, state->is_multisample ? state->offscreen_multisample_pipeline : state->offscreen_pipeline); ngf_cmd_viewport(renc, &offsc_viewport); ngf_cmd_scissor(renc, &offsc_viewport); ngf_cmd_draw(renc, false, 0u, 3u, 1u); } ngf_submit_cmd_buffers(1, &offscr_cmd_buf); ngf_destroy_cmd_buffer(offscr_cmd_buf); ngf_cmd_bind_gfx_pipeline(main_render_pass, state->blit_pipeline); ngf_cmd_viewport(main_render_pass, &onsc_viewport); ngf_cmd_scissor(main_render_pass, &onsc_viewport); ngf::cmd_bind_resources( main_render_pass, ngf::descriptor_set<0>::binding<1>::texture(state->resolve_texture.get()), ngf::descriptor_set<0>::binding<2>::sampler(state->sampler.get())); ngf_cmd_draw(main_render_pass, false, 0u, 3u, 1u); } void sample_pre_draw_frame(ngf_cmd_buffer, void*) { } void sample_post_draw_frame(ngf_cmd_buffer, void*) { } void sample_draw_ui(void* userdata) { auto data = reinterpret_cast(userdata); ImGui::Begin("Multisampling"); ImGui::Checkbox("On/Off", &data->is_multisample); ImGui::End(); } void sample_post_submit(void*) { } void sample_shutdown(void* userdata) { auto data = static_cast(userdata); delete data; printf("shutting down\n"); } } // namespace ngf_samples ================================================ FILE: samples/common/camera-controller.cpp ================================================ /** * Copyright (c) 2021 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "camera-controller.h" #include "check.h" #include "imgui.h" #define _USE_MATH_DEFINES #include namespace ngf_samples { camera_matrices compute_camera_matrices(const camera_state& state, float aspect_ratio) { const float r = state.radius, azimuth = state.azimuth, incline = state.inclination; const nm::float3 point_on_sphere { r * sinf(azimuth) * sinf(incline), r * cosf(incline), r * sinf(incline) * cosf(azimuth)}; return { nm::look_at(state.look_at + point_on_sphere, state.look_at, nm::float3 {0.0f, 1.0f, 0.0f}), nm::perspective(nm::deg2rad(state.vfov), aspect_ratio, 0.01f, 1000.0f)}; } void camera_ui( camera_state& state, std::pair look_at_range, float look_at_speed, std::pair radius_range, float radius_speed) { NGF_MISC_ASSERT(look_at_range.first < look_at_range.second); NGF_MISC_ASSERT(radius_range.first < radius_range.second); ImGui::Text("camera"); ImGui::DragFloat3( "look at", state.look_at.data, look_at_speed, look_at_range.first, look_at_range.second, "%.1f", 0); ImGui::SliderFloat("azimuth", &state.azimuth, 0.0f, (float)M_PI * 2.0f, "%.1f", ImGuiSliderFlags_NoRoundToFormat); ImGui::SliderFloat("inclination", &state.inclination, 0.0f, (float)M_PI, "%.1f", ImGuiSliderFlags_NoRoundToFormat); ImGui::DragFloat( "radius", &state.radius, radius_speed, radius_range.first, radius_range.second, "%.1f", 0); ImGui::SliderFloat("fov", &state.vfov, 25.0f, 90.0f, "%.1f", 0); } } // namespace ngf_samples ================================================ FILE: samples/common/camera-controller.h ================================================ /** * Copyright (c) 2021 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "nicemath.h" #include namespace ngf_samples { struct camera_state { nm::float3 look_at {0.0f, 0.0f, 0.0f}; float radius = 3.0f; float azimuth = 0.0f; float inclination = 3.14f / 2.0f; float vfov = 60.0f; }; struct camera_matrices { nm::float4x4 world_to_view_transform; nm::float4x4 view_to_clip_transform; }; camera_matrices compute_camera_matrices(const camera_state& state, float aspect_ratio); void camera_ui( camera_state& state, std::pair look_at_range, float look_at_speed, std::pair radius_range, float radius_speed); } // namespace ngf_samples ================================================ FILE: samples/common/diagnostic-callback.cpp ================================================ /** * Copyright (c) 2021 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "diagnostic-callback.h" #include "logging.h" #include namespace ngf_samples { void sample_diagnostic_callback(ngf_diagnostic_message_type msg_type, void*, const char* fmt, ...) { va_list args; va_start(args, fmt); switch (msg_type) { case NGF_DIAGNOSTIC_ERROR: case NGF_DIAGNOSTIC_WARNING: ngf_misc::vloge(fmt, args); break; case NGF_DIAGNOSTIC_INFO: ngf_misc::vlogi(fmt, args); break; default:; } va_end(args); } } // namespace ngf_samples ================================================ FILE: samples/common/diagnostic-callback.h ================================================ /** * Copyright (c) 2021 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include "nicegraf.h" namespace ngf_samples { /** * A sample diagnostic callback implementation, which forwards received messages to the log. */ void sample_diagnostic_callback( ngf_diagnostic_message_type msg_type, void* userdata, const char* fmt, ...); } // namespace ngf_samples ================================================ FILE: samples/common/imgui-backend.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "imgui-backend.h" #include "check.h" #include "nicegraf-util.h" #include "shader-loader.h" #include using namespace ngf_misc; namespace ngf_samples { ngf_imgui::ngf_imgui( ngf_xfer_encoder enc, ngf_sample_count main_render_target_sample_count, const unsigned char* font_atlas_bytes, uint32_t font_atlas_width, uint32_t font_atlas_height) { #if !defined(NGF_NO_IMGUI) vertex_stage_ = load_shader_stage("imgui", "VSMain", NGF_STAGE_VERTEX); fragment_stage_ = load_shader_stage("imgui", "PSMain", NGF_STAGE_FRAGMENT); ngf_error err = NGF_ERROR_OK; // Initialize the streamed uniform object. uniform_data_.initialize(3); // Initial pipeline configuration with OpenGL-style defaults. ngf_util_graphics_pipeline_data pipeline_data; ngf_util_create_default_graphics_pipeline_data(&pipeline_data); // Set up blend state. ngf_blend_info blend_info; blend_info.enable = true; blend_info.src_color_blend_factor = NGF_BLEND_FACTOR_SRC_ALPHA; blend_info.dst_color_blend_factor = NGF_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; blend_info.src_alpha_blend_factor = NGF_BLEND_FACTOR_SRC_ALPHA; blend_info.dst_alpha_blend_factor = NGF_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; blend_info.blend_op_color = NGF_BLEND_OP_ADD; blend_info.blend_op_alpha = NGF_BLEND_OP_ADD; blend_info.color_write_mask = NGF_COLOR_MASK_WRITE_BIT_R | NGF_COLOR_MASK_WRITE_BIT_G | NGF_COLOR_MASK_WRITE_BIT_B | NGF_COLOR_MASK_WRITE_BIT_A; pipeline_data.pipeline_info.color_attachment_blend_states = &blend_info; memset( pipeline_data.pipeline_info.blend_consts, 0, sizeof(pipeline_data.pipeline_info.blend_consts)); // Set up depth & stencil state. pipeline_data.depth_stencil_info.depth_test = false; pipeline_data.depth_stencil_info.stencil_test = false; // Set up multisampling. pipeline_data.multisample_info.sample_count = main_render_target_sample_count; // Assign programmable stages. ngf_graphics_pipeline_info& pipeline_info = pipeline_data.pipeline_info; pipeline_info.nshader_stages = 2u; pipeline_info.shader_stages[0] = vertex_stage_.get(); pipeline_info.shader_stages[1] = fragment_stage_.get(); // Disable backface culling. pipeline_data.rasterization_info.cull_mode = NGF_CULL_MODE_NONE; // Configure vertex input. ngf_vertex_attrib_desc vertex_attribs[] = { {0u, 0u, offsetof(ImDrawVert, pos), NGF_TYPE_FLOAT, 2u, false}, {1u, 0u, offsetof(ImDrawVert, uv), NGF_TYPE_FLOAT, 2u, false}, {2u, 0u, offsetof(ImDrawVert, col), NGF_TYPE_UINT8, 4u, true}, }; pipeline_data.vertex_input_info.attribs = vertex_attribs; pipeline_data.vertex_input_info.nattribs = 3u; ngf_vertex_buf_binding_desc binding_desc = { 0u, // binding sizeof(ImDrawVert), // stride NGF_INPUT_RATE_VERTEX // input rate }; pipeline_data.vertex_input_info.nvert_buf_bindings = 1u; pipeline_data.vertex_input_info.vert_buf_bindings = &binding_desc; pipeline_data.pipeline_info.compatible_rt_attachment_descs = ngf_default_render_target_attachment_descs(); err = pipeline_.initialize(pipeline_data.pipeline_info); NGF_MISC_ASSERT(err == NGF_ERROR_OK); // Create and populate font texture. const ngf_image_info font_texture_info = { NGF_IMAGE_TYPE_IMAGE_2D, // type {(uint32_t)font_atlas_width, (uint32_t)font_atlas_height, 1u}, // extent 1u, // nmips 1u, // nlayers NGF_IMAGE_FORMAT_RGBA8, // image_format NGF_SAMPLE_COUNT_1, // samples NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_XFER_DST // usage_hint }; err = font_texture_.initialize(font_texture_info); NGF_MISC_ASSERT(err == NGF_ERROR_OK); ImGui::GetIO().Fonts->TexID = (ImTextureID)(uintptr_t)font_texture_.get(); const ngf_buffer_info pbuffer_info { 4u * (size_t)font_atlas_width * (size_t)font_atlas_height, NGF_BUFFER_STORAGE_HOST_WRITEABLE, NGF_BUFFER_USAGE_XFER_SRC}; err = texture_data_.initialize(pbuffer_info); NGF_MISC_ASSERT(err == NGF_ERROR_OK); void* mapped_texture_data = ngf_buffer_map_range( texture_data_.get(), 0, 4 * (size_t)font_atlas_width * (size_t)font_atlas_height); memcpy( mapped_texture_data, font_atlas_bytes, 4 * (size_t)font_atlas_width * (size_t)font_atlas_height); ngf_buffer_flush_range( texture_data_.get(), 0, 4 * (size_t)font_atlas_width * (size_t)font_atlas_height); ngf_buffer_unmap(texture_data_.get()); const ngf_image_write img_write { .src_offset = 0u, .dst_offset = {0, 0, 0}, .extent = {.width = font_atlas_width, .height = font_atlas_height, .depth = 1u}, .dst_level = 0u, .dst_base_layer = 0u, .nlayers = 1u}; ngf_cmd_write_image(enc, texture_data_.get(), font_texture_.get(), &img_write, 1u); // Create a sampler for the font texture. ngf_sampler_info sampler_info { NGF_FILTER_NEAREST, NGF_FILTER_NEAREST, NGF_FILTER_NEAREST, NGF_WRAP_MODE_CLAMP_TO_EDGE, NGF_WRAP_MODE_CLAMP_TO_EDGE, NGF_WRAP_MODE_CLAMP_TO_EDGE, 0.0f, 0.0f, 0.0f, 1.0f, false, NGF_COMPARE_OP_NEVER}; tex_sampler_.initialize(sampler_info); #endif } void ngf_imgui::record_rendering_commands(ngf_render_encoder enc) { ImGui::Render(); ImDrawData* data = ImGui::GetDrawData(); if (data->TotalIdxCount <= 0) return; // Compute effective viewport width and height, apply scaling for // retina/high-dpi displays. ImGuiIO& io = ImGui::GetIO(); int fb_width = (int)(data->DisplaySize.x * io.DisplayFramebufferScale.x); int fb_height = (int)(data->DisplaySize.y * io.DisplayFramebufferScale.y); data->ScaleClipRects(io.DisplayFramebufferScale); // Avoid rendering when minimized. if (fb_width <= 0 || fb_height <= 0) { return; } // Build projection matrix. const ImVec2& pos = data->DisplayPos; const float L = pos.x; const float R = pos.x + data->DisplaySize.x; const float T = pos.y; const float B = pos.y + data->DisplaySize.y; const uniform_data ortho_projection = {{ {2.0f / (R - L), 0.0f, 0.0f, 0.0f}, {0.0f, 2.0f / (B - T), 0.0f, 0.0f}, {0.0f, 0.0f, -1.0f, 0.0f}, {(R + L) / (L - R), (T + B) / (T - B), 0.0f, 1.0f}, }}; uniform_data_.write(ortho_projection); // Bind the ImGui rendering pipeline. ngf_cmd_bind_gfx_pipeline(enc, pipeline_); // Bind resources. ngf::cmd_bind_resources( enc, uniform_data_.bind_op_at_current_offset(0u, 0u), ngf::descriptor_set<0>::binding<1>::texture(font_texture_.get()), ngf::descriptor_set<0>::binding<2>::sampler(tex_sampler_.get())); // Set viewport. ngf_irect2d viewport_rect = {0u, 0u, (uint32_t)fb_width, (uint32_t)fb_height}; ngf_cmd_viewport(enc, &viewport_rect); ngf_cmd_scissor(enc, &viewport_rect); // These vectors will store vertex and index data for the draw calls. // Later this data will be transferred to GPU buffers. std::vector vertex_data((size_t)data->TotalVtxCount, ImDrawVert()); std::vector index_data((size_t)data->TotalIdxCount, 0u); struct draw_data { ngf_irect2d scissor; uint32_t first_elem; uint32_t nelem; }; std::vector draw_data; uint32_t last_vertex = 0u; uint32_t last_index = 0u; // Process each ImGui command list and translate it into the nicegraf // command buffer. for (int i = 0u; i < data->CmdListsCount; ++i) { // Append vertex data. const ImDrawList* imgui_cmd_list = data->CmdLists[i]; memcpy( &vertex_data[last_vertex], imgui_cmd_list->VtxBuffer.Data, sizeof(ImDrawVert) * (size_t)imgui_cmd_list->VtxBuffer.Size); // Append index data. for (int a = 0u; a < imgui_cmd_list->IdxBuffer.Size; ++a) { // ImGui uses separate index buffers, but we'll use just one. We will // update the index values accordingly. index_data[last_index + (size_t)a] = (ImDrawIdx)(last_vertex + imgui_cmd_list->IdxBuffer[a]); } last_vertex += (uint32_t)imgui_cmd_list->VtxBuffer.Size; // Process each ImGui command in the draw list. uint32_t idx_buffer_sub_offset = 0u; for (int j = 0u; j < imgui_cmd_list->CmdBuffer.Size; ++j) { const ImDrawCmd& cmd = imgui_cmd_list->CmdBuffer[j]; if (cmd.UserCallback != nullptr) { cmd.UserCallback(imgui_cmd_list, &cmd); } else { ImVec4 clip_rect = ImVec4( cmd.ClipRect.x - pos.x, cmd.ClipRect.y - pos.y, cmd.ClipRect.z - pos.x, cmd.ClipRect.w - pos.y); if (clip_rect.x < (float)fb_width && clip_rect.y < (float)fb_height && clip_rect.z >= 0.0f && clip_rect.w >= 0.0f) { const ngf_irect2d scissor_rect { (int32_t)clip_rect.x, (int32_t)clip_rect.y, (uint32_t)(clip_rect.z - clip_rect.x), (uint32_t)(clip_rect.w - clip_rect.y)}; draw_data.push_back( {scissor_rect, last_index + idx_buffer_sub_offset, (uint32_t)cmd.ElemCount}); idx_buffer_sub_offset += (uint32_t)cmd.ElemCount; } } } last_index += (uint32_t)imgui_cmd_list->IdxBuffer.Size; } // Create new vertex and index buffers. ngf_buffer_info attrib_buffer_info { sizeof(ImDrawVert) * vertex_data.size(), // data size NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE, NGF_BUFFER_USAGE_VERTEX_BUFFER}; ngf_buffer attrib_buffer = nullptr; ngf_create_buffer(&attrib_buffer_info, &attrib_buffer); attrib_buffer_.reset(attrib_buffer); void* mapped_attrib_buffer = ngf_buffer_map_range(attrib_buffer, 0, attrib_buffer_info.size); NGF_MISC_ASSERT(mapped_attrib_buffer != nullptr); memcpy(mapped_attrib_buffer, vertex_data.data(), attrib_buffer_info.size); ngf_buffer_flush_range(attrib_buffer, 0, attrib_buffer_info.size); ngf_buffer_unmap(attrib_buffer); ngf_buffer_info index_buffer_info { sizeof(ImDrawIdx) * index_data.size(), NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE, NGF_BUFFER_USAGE_INDEX_BUFFER}; ngf_buffer index_buffer = nullptr; ngf_create_buffer(&index_buffer_info, &index_buffer); index_buffer_.reset(index_buffer); void* mapped_index_buffer = ngf_buffer_map_range(index_buffer, 0, index_buffer_info.size); NGF_MISC_ASSERT(mapped_index_buffer != nullptr); memcpy(mapped_index_buffer, index_data.data(), index_buffer_info.size); ngf_buffer_flush_range(index_buffer, 0, index_buffer_info.size); ngf_buffer_unmap(index_buffer); ngf_cmd_bind_index_buffer( enc, index_buffer, 0u, sizeof(ImDrawIdx) < 4 ? NGF_TYPE_UINT16 : NGF_TYPE_UINT32); ngf_cmd_bind_attrib_buffer(enc, attrib_buffer, 0u, 0u); for (const auto& draw : draw_data) { ngf_cmd_scissor(enc, &draw.scissor); ngf_cmd_draw(enc, true, draw.first_elem, draw.nelem, 1u); } } } // namespace ngf_samples ================================================ FILE: samples/common/imgui-backend.h ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include "imgui.h" #include "nicegraf-wrappers.h" #include "nicegraf.h" namespace ngf_samples { /** * This is a nicegraf-based rendering backend for ImGui. * It's used to render the UI for samples. */ class ngf_imgui { public: /** * Initializes the internal state of the ImGui rendering backend, and uploads * the font texture by recording the appropriate commands into the given * transfer encoder. */ ngf_imgui( ngf_xfer_encoder font_xfer_encoder, ngf_sample_count main_render_target_sample_count, const unsigned char* font_atlast_bytes, uint32_t font_atlas_width, uint32_t font_atlas_height); /** * Records commands for rendering the contents ofteh current ImGui draw data into the * given render encoder. */ void record_rendering_commands(ngf_render_encoder enc); private: struct uniform_data { float ortho_projection[4][4]; }; #if !defined(NGF_NO_IMGUI) ngf::graphics_pipeline pipeline_; ngf::uniform_multibuffer uniform_data_; ngf::image font_texture_; ngf::sampler tex_sampler_; ngf::buffer attrib_buffer_; ngf::buffer index_buffer_; ngf::buffer texture_data_; ngf::shader_stage vertex_stage_; ngf::shader_stage fragment_stage_; ngf::render_target default_rt_; #endif }; } // namespace ngf_samples ================================================ FILE: samples/common/main.cpp ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "nicegraf-mtl-handles.h" #define GLFW_INCLUDE_NONE #include #if defined(_WIN32) || defined(_WIN64) #define GLFW_EXPOSE_NATIVE_WIN32 #elif defined(__APPLE__) #define GLFW_EXPOSE_NATIVE_COCOA #include "platform/macos/glfw-cocoa-contentview.h" #else #define GLFW_EXPOSE_NATIVE_X11 #endif #include "check.h" #include "diagnostic-callback.h" #include "imgui-backend.h" #include "imgui_impl_glfw.h" #include "logging.h" #include "nicegraf-wrappers.h" #include "sample-interface.h" #include #include #include #include int main(int, char**) { /** * We prefer a more verbose diagnostic output from nicegraf in debug builds. */ #if defined(NDEBUG) constexpr ngf_diagnostic_log_verbosity diagnostics_verbosity = NGF_DIAGNOSTICS_VERBOSITY_DEFAULT; #else constexpr ngf_diagnostic_log_verbosity diagnostics_verbosity = NGF_DIAGNOSTICS_VERBOSITY_DETAILED; #endif /** * Select a rendering device to be used by nicegraf. */ uint32_t ndevices = 0u; const ngf_device* devices = NULL; NGF_MISC_CHECK_NGF_ERROR(ngf_get_device_list(&devices, &ndevices)); const char* device_perf_tier_names[NGF_DEVICE_PERFORMANCE_TIER_COUNT] = { "high", "low", "unknown"}; /** * For the sample code, we try to select a high-perf tier device. If one isn't available, we just * fall back on the first device in the list. You may want to choose a different strategy for your * specific application, or allow the user to pick. */ size_t high_power_device_idx = (~0u); ngf_misc::logi("available rendering devices: "); for (uint32_t i = 0; i < ndevices; ++i) { /** * If no preferred index has been selected yet, and the current device is high-power, pick it as * preferred. otherwise, just log the device details. */ ngf_misc::logi( " device %d : %s (perf tier : `%s`)", i, devices[i].name, device_perf_tier_names[devices[i].performance_tier]); if (high_power_device_idx == (~0u) && devices[i].performance_tier == NGF_DEVICE_PERFORMANCE_TIER_HIGH) { high_power_device_idx = i; } } /* Fall back to 1st device if no high-power device was found. */ const size_t preferred_device_idx = (high_power_device_idx == ~0u) ? 0 : high_power_device_idx; const ngf_device_handle device_handle = devices[preferred_device_idx].handle; ngf_misc::logi("selected device %d", preferred_device_idx); /* * Initialize RenderDoc. * Allows capturing of frame data to be opened in the RenderDoc debugger. * To enable RenderDoc functionality, fill in the below struct with the path * to the RenderDoc library (renderdoc.dll on Windows, librenderdoc.so on Linux, * N/A on Mac OSX) and a file path template for where the captures should be stored. * * For example, if your library is saved in C:\example\dir\renderdoc.dll and you want to save * your captures as C:\capture\dir\test. You would fill out the struct as such: * * const ngf_renderdoc_info renderdoc_info = { * .renderdoc_lib_path = "C:\\example\\dir\\renderdoc.dll", * .renderdoc_destination_template = "C:\\capture\\dir\\test"}; * * Provided that the above steps are completed, captures can be taken by pressing the * "C" key while a sample is running. Captures will be saved to the specified directory. * Custom instrumenting within the samples can also be done by making calls to * ngf_capture_begin and ngf_capture end, respectively. */ const ngf_renderdoc_info renderdoc_info = { .renderdoc_lib_path = NULL, .renderdoc_destination_template = NULL}; /* * Initialize nicegraf. * Set our rendering device preference to "discrete" to pick a high-power GPU if one is available, * and install a diagnostic callback. */ const ngf_diagnostic_info diagnostic_info { .verbosity = diagnostics_verbosity, .userdata = nullptr, .callback = ngf_samples::sample_diagnostic_callback, .enable_debug_groups = true }; const ngf_init_info init_info { .diag_info = &diagnostic_info, .allocation_callbacks = NULL, .device = device_handle, .renderdoc_info = (renderdoc_info.renderdoc_lib_path != NULL) ? &renderdoc_info : NULL}; NGF_MISC_CHECK_NGF_ERROR(ngf_initialize(&init_info)); ngf_misc::logi( "device-local memory is host-visible: %s", ngf_get_device_capabilities()->device_local_memory_is_host_visible ? "YES" : "NO"); /** * Initialize imgui and generate its font atlas. */ ImGuiContext* imgui_ctx = ImGui::CreateContext(); ImGui::SetCurrentContext(imgui_ctx); unsigned char* imgui_font_atlas_bytes; int imgui_font_atlas_width, imgui_font_atlas_height; ImGui::GetIO().Fonts->GetTexDataAsRGBA32( &imgui_font_atlas_bytes, &imgui_font_atlas_width, &imgui_font_atlas_height); /** * Initialize glfw. */ glfwInit(); /** * Create a window. * The `width` and `height` here refer to the dimensions of the window's "client area", i.e. the * area that can actually be rendered to (excludes borders and any other decorative elements). The * dimensions we request are a hint, we need to get the actual dimensions after the window is * created. * Note that we deliberately create the window before setting up the nicegraf context. This is * done so that when the destructors are invoked, the context is destroyed before the window - * changing this sequence of events might lead to misbehavior. * Also note that we set a special window hint to make sure GLFW does _not_ attempt to create * an OpenGL (or other API) context for us - this is nicegraf's job. */ constexpr uint32_t window_width_hint = 800, window_height_hint = 600; glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); GLFWwindow* window = glfwCreateWindow(window_width_hint, window_height_hint, "nicegraf sample", nullptr, nullptr); if (window == nullptr) { ngf_misc::loge("Failed to create a window, exiting."); return 0; } int fb_width, fb_height; glfwGetFramebufferSize(window, &fb_width, &fb_height); ngf_misc::logi("created a window with client area of size size %d x %d.", fb_width, fb_height); /** * Make sure keyboard/mouse work with imgui. */ ImGui_ImplGlfw_InitForOther(window, true); /** * Retrieve the native window handle to pass on to nicegraf. */ uintptr_t native_window_handle = 0; #if defined(_WIN32) || defined(_WIN64) native_window_handle = (uintptr_t)glfwGetWin32Window(window); #elif defined(__APPLE__) native_window_handle = (uintptr_t)ngf_samples::get_glfw_contentview(window); #else native_window_handle = (uintptr_t)glfwGetX11Window(window); #endif // Begin Context Scope { /** * Configure the swapchain and create a nicegraf context. * Use an sRGB color attachment and a 32-bit float depth attachment. Enable MSAA with * the highest supported framebuffer sample count. */ const ngf_sample_count main_render_target_sample_count = ngf_get_device_capabilities()->max_supported_framebuffer_color_sample_count; const ngf_swapchain_info swapchain_info = { .color_format = NGF_IMAGE_FORMAT_BGRA8_SRGB, .colorspace = NGF_COLORSPACE_SRGB_NONLINEAR, .depth_format = NGF_IMAGE_FORMAT_DEPTH32, .sample_count = main_render_target_sample_count, .capacity_hint = 3u, .width = (uint32_t)fb_width, .height = (uint32_t)fb_height, .present_mode = NGF_PRESENTATION_MODE_FIFO, .native_handle = native_window_handle}; const ngf_context_info ctx_info = { .swapchain_info = &swapchain_info, .shared_context = nullptr}; ngf::context context; NGF_MISC_CHECK_NGF_ERROR(context.initialize(ctx_info)); /** * Make the newly created context current on this thread. * Once a context has been made current on a thread, it cannot be switched to another thread, * and another context cannot be made current on that thread. */ NGF_MISC_CHECK_NGF_ERROR(ngf_set_context(context)); /** * This is the nicegraf-based rendering backend for ImGui - we will initialize it * on first frame. */ std::optional imgui_backend; /** * Main command buffer that samples will record rendering commands into. */ ngf::cmd_buffer main_cmd_buffer; NGF_MISC_CHECK_NGF_ERROR(main_cmd_buffer.initialize(ngf_cmd_buffer_info {})); /** * Pointer to sample-specific data, returned by sample_initialize. * It shall be passed to the sample on every frame. */ void* sample_opaque_data = nullptr; /** * Main loop. Exit when either the window closes or `poll_events` returns false, indicating that * the application has received a request to exit. */ bool first_frame = true; auto prev_frame_start = std::chrono::system_clock::now(); while (!glfwWindowShouldClose(window)) { glfwPollEvents(); auto frame_start = std::chrono::system_clock::now(); const std::chrono::duration time_delta = frame_start - prev_frame_start; float time_delta_f = time_delta.count(); prev_frame_start = frame_start; if (glfwGetKey(window, GLFW_KEY_C) == GLFW_PRESS) { ngf_renderdoc_capture_next_frame(); } /** * Query the updated size of the window and handle resize events. */ const int old_fb_width = fb_width, old_fb_height = fb_height; glfwGetFramebufferSize(window, &fb_width, &fb_height); bool resize_successful = true; const bool need_resize = (fb_width != old_fb_width || fb_height != old_fb_height); if (need_resize) { ngf_misc::logd( "window resizing detected, calling ngf_resize context. " "old size: %d x %d; new size: %d x %d", old_fb_width, old_fb_height, fb_width, fb_height); resize_successful &= (NGF_ERROR_OK == ngf_resize_context(context, (uint32_t)fb_width, (uint32_t)fb_height)); } if (resize_successful) { /** * Begin the frame and start the main command buffer. */ ngf_frame_token frame_token; if (ngf_begin_frame(&frame_token) != NGF_ERROR_OK) continue; NGF_MISC_CHECK_NGF_ERROR(ngf_start_cmd_buffer(main_cmd_buffer, frame_token)); /** * On first frame, initialize the sample and the ImGui rendering backend. */ if (first_frame) { ngf_cmd_begin_debug_group(main_cmd_buffer, "Initial GPU uploads"); /** * Start a new transfer command encoder for uploading resources to the GPU. */ ngf_xfer_encoder xfer_encoder {}; ngf_xfer_pass_info xfer_pass_info {}; NGF_MISC_CHECK_NGF_ERROR( ngf_cmd_begin_xfer_pass(main_cmd_buffer, &xfer_pass_info, &xfer_encoder)); /** * Initialize the sample, and save the opaque data pointer. */ ngf_misc::logi("Initializing sample"); sample_opaque_data = ngf_samples::sample_initialize( (uint32_t)fb_width, (uint32_t)fb_height, main_render_target_sample_count, xfer_encoder); /** * Exit if sample failed to initialize. */ if (sample_opaque_data == nullptr) { ngf_misc::loge("Sample failed to initialize"); break; } ngf_misc::logi("Sample initialized"); /** * Initialize the ImGui rendering backend. */ imgui_backend.emplace( xfer_encoder, main_render_target_sample_count, imgui_font_atlas_bytes, imgui_font_atlas_width, imgui_font_atlas_height); /** * Finish the transfer encoder. */ NGF_MISC_CHECK_NGF_ERROR(ngf_cmd_end_xfer_pass(xfer_encoder)); ngf_cmd_end_current_debug_group(main_cmd_buffer); } /** * Let the sample code record any commands prior to the main render pass. */ ngf_cmd_begin_debug_group(main_cmd_buffer, "Sample pre-draw frame"); ngf_samples::sample_pre_draw_frame( main_cmd_buffer, sample_opaque_data); ngf_cmd_end_current_debug_group(main_cmd_buffer); /** * Record the commands for the main render pass. */ ngf_cmd_begin_debug_group(main_cmd_buffer, "Main render pass"); { /** * Begin the main render pass. */ ngf::render_encoder main_render_pass_encoder( main_cmd_buffer, ngf_default_render_target(), 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0); /** * Call into the sample code to draw a single frame. */ static float t = 0.0; ngf_samples::sample_draw_frame( main_render_pass_encoder, time_delta_f, frame_token, (uint32_t)fb_width, (uint32_t)fb_height, t, sample_opaque_data); t += 0.008f; /** * Begin a new ImGui frame. */ ImGui_ImplGlfw_NewFrame(); ImGui::NewFrame(); /** * Call into the sample-specific code to execute ImGui UI commands, and end ImGui frame. */ ngf_samples::sample_draw_ui(sample_opaque_data); ImGui::EndFrame(); /** * Draw the UI on top of everything else. */ imgui_backend->record_rendering_commands(main_render_pass_encoder); } ngf_cmd_end_current_debug_group(main_cmd_buffer); /** * Let the sample record commands after the main render pass. */ ngf_cmd_begin_debug_group(main_cmd_buffer, "Sample post-draw frame"); ngf_samples::sample_post_draw_frame(main_cmd_buffer, sample_opaque_data); ngf_cmd_end_current_debug_group(main_cmd_buffer); /** * Submit the main command buffer and end the frame. */ ngf_cmd_buffer submitted_cmd_bufs[] = {main_cmd_buffer.get()}; NGF_MISC_CHECK_NGF_ERROR(ngf_submit_cmd_buffers(1, submitted_cmd_bufs)); ngf_samples::sample_post_submit(sample_opaque_data); if (ngf_end_frame(frame_token) != NGF_ERROR_OK) { ngf_misc::loge("failed to present image to swapchain!"); } } else { ngf_misc::loge("failed to handle window resize!"); } first_frame = false; } /** * De-initialize any sample-specific data, shut down ImGui. */ ngf_misc::logi("Finishing execution"); ngf_samples::sample_shutdown(sample_opaque_data); ImGui::DestroyContext(imgui_ctx); } // End Context Scope ngf_shutdown(); return 0; } ================================================ FILE: samples/common/platform/macos/glfw-cocoa-contentview.h ================================================ /** * Copyright (c) 2021 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include namespace ngf_samples { void* get_glfw_contentview(GLFWwindow *win); } ================================================ FILE: samples/common/platform/macos/glfw-cocoa-contentview.mm ================================================ /** * Copyright (c) 2021 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "platform/macos/glfw-cocoa-contentview.h" #define GLFW_EXPOSE_NATIVE_COCOA #include namespace ngf_samples { /** * On Mac, the NSWindow's ContentView needs to be * passed to nicegraf as the native window handle. */ void* get_glfw_contentview(GLFWwindow *win) { NSWindow* w = glfwGetCocoaWindow(win); return (void*)CFBridgingRetain(w.contentView); } } ================================================ FILE: samples/common/sample-interface.h ================================================ /** * Copyright (c) 2023 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #pragma warning(disable:26812) #include "nicegraf.h" #include /** * Each sample has to implement the functions declared in this header. * They are called by the common sample code. */ namespace ngf_samples { /** * This function is called once at startup, to let the sample set up whatever it needs. * This function may assume that a nicegraf context has already been created and made current on * the calling thread. * It gets passed the dimensions of the window to be rendered to, as well as the sample count * of the main rendertarget. * It also gets a transfer encoder, which can samples can use to upload some resources to the * GPU. * The function shall return a pointer that will be passed in to other callbacks. */ void* sample_initialize( uint32_t initial_window_width, uint32_t initial_window_height, ngf_sample_count main_render_target_sample_count, ngf_xfer_encoder xfer_encoder); /** * This function gets called every frame before beginning the main render pass. * It receives the command buffer that the main render pass will eventually be * recorded into. */ void sample_pre_draw_frame(ngf_cmd_buffer cmd_buffer, void* userdata); /** * This function gets called every frame, to render the frame contents. * It gets passed a token identifying the frame, the current window dimensions, and a (monotonically * increasing) timestamp. Window resizes are generally handled in the common code, but it's up to * the specific sample to monitor for size changes and e.g. resize any rendertargets that have to * match screen resolution. `userdata` is the pointer returned previously by `sample_initialize`. */ void sample_draw_frame( ngf_render_encoder main_render_pass, float time_delta_ms, ngf_frame_token frame_token, uint32_t width, uint32_t height, float time, void* userdata); /** * This function gets called every frame after finishing the main render pass. * It receives the command buffer that the main render pass was previously * recorded into. */ void sample_post_draw_frame(ngf_cmd_buffer cmd_buffer, void* userdata); void sample_post_submit(void* userdata); /** * This function gets called every frame, to render the UI of the sample. It should mostly consist * of ImGui calls. `userdata` is the pointer returned previously by `sample_initialize`. */ void sample_draw_ui(void* userdata); /** * This function gets called once, before the sample ceases execution, to perform any cleanup * actions. This function may assume that a nicegraf context is still present and current on the * calling thread. `userdata` is the pointer returned previously by `sample_initialize`. */ void sample_shutdown(void* userdata); } // namespace ngf_samples ================================================ FILE: samples/common/staging-image.cpp ================================================ #include "staging-image.h" #include "check.h" #include "file-utils.h" #include "targa-loader.h" #include #include using namespace ngf_misc; namespace ngf_samples { staging_image create_staging_image_from_tga(const char* file_name) { /* Read in the texture image file.*/ std::vector texture_tga_data = load_file(file_name); /* this call does nothing but quickly get the width & height. */ uint32_t texture_width, texture_height; load_targa( texture_tga_data.data(), texture_tga_data.size(), nullptr, 0u, &texture_width, &texture_height); /* Create an appropriately sized staging buffer for the texture upload. */ const size_t texture_size_bytes = texture_width * texture_height * 4u; ngf::buffer staging_buf; NGF_MISC_CHECK_NGF_ERROR(staging_buf.initialize(ngf_buffer_info { .size = texture_size_bytes, .storage_type = NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE, .buffer_usage = NGF_BUFFER_USAGE_XFER_SRC})); void* mapped_staging_buf = ngf_buffer_map_range(staging_buf.get(), 0, texture_size_bytes); /* Decode the loaded targa file, writing RGBA values directly into mapped memory. */ load_targa( texture_tga_data.data(), texture_tga_data.size(), mapped_staging_buf, texture_size_bytes, &texture_width, &texture_height); /* Flush and unmap the staging buffer. */ ngf_buffer_flush_range(staging_buf.get(), 0, texture_size_bytes); ngf_buffer_unmap(staging_buf.get()); /* Count the number of mipmaps we'll have to generate for trilinear filtering. Note that we keep generating mip levels until both dimensions are reduced to 1. */ uint32_t nmips = 1 + static_cast(std::floor(std::log2(std::max(texture_width, texture_height)))); return staging_image { .staging_buffer = std::move(staging_buf), .width_px = texture_width, .height_px = texture_height, .nmax_mip_levels = nmips}; } } // namespace ngf_samples ================================================ FILE: samples/common/staging-image.h ================================================ #pragma once #include "nicegraf-wrappers.h" namespace ngf_samples { /** * This is a helper type used by the samples to upload image data to the rendering device. * Usually the samples create a staging buffer that is just enough to upload a given image. The raw * RGBA data is loaded directly into the staging buffer, which the sample can then use to populate * an image. After that, the staging buffer is discarded. This simple method works for the sample * code, but more advanced applications will require a different approach. */ struct staging_image { ngf::buffer staging_buffer; /** Staging buffer containing raw image data. */ uint32_t width_px; /** Image width in pixels. */ uint32_t height_px; /** Image height in pixels. */ uint32_t nmax_mip_levels; /** Maximum number of mip level that may be generated for this image. */ }; /** * Creates a staging_image populated with the raw RGBA data from the given Targa file. */ staging_image create_staging_image_from_tga(const char* file_name); } // namespace ngf_samples ================================================ FILE: samples/shaders/blinn-phong.hlsl ================================================ //T: blinn-phong vs:VSMain ps:PSMain [[vk::constant_id(0)]] const uint enableHalfLambert = 0; struct PixelShaderInput { float4 clipSpacePosition : SV_Position; float4 viewSpaceInterpNormal : ATTR0; float4 viewSpacePosition : ATTR1; }; struct VertexShaderInput { float3 objSpacePosition : SV_Position; float3 objSpaceNormal : ATTR0; }; struct ShaderUniforms { float4x4 objToViewTransform; float4x4 viewToClipTransform; float4 ambientLightIntensity; float4 viewSpacePointLightPosition; float4 pointLightIntensity; float4 viewSpaceDirectionalLightDirection; float4 directionalLightIntensity; float4 diffuseReflectance; float4 specularCoefficient; float shininess; }; [[vk::binding(0, 0)]] ConstantBuffer shaderUniforms; PixelShaderInput VSMain(VertexShaderInput vertexAttrs) { float4 viewSpacePosition = mul(shaderUniforms.objToViewTransform, float4(vertexAttrs.objSpacePosition, 1.0)); float4 viewSpaceNormal = normalize(mul(shaderUniforms.objToViewTransform, float4(vertexAttrs.objSpaceNormal, 0.0))); // TODO inverse transpose. float4 clipSpacePosition = mul(shaderUniforms.viewToClipTransform, viewSpacePosition); clipSpacePosition.y *= -1.0; PixelShaderInput result = { clipSpacePosition, viewSpaceNormal, viewSpacePosition, }; return result; } float computeCosineFactor(float3 direction, float3 normal) { float cosineFactor = dot(direction, normal); if (enableHalfLambert == 0) { return max(0.0, cosineFactor); } else { cosineFactor = 0.5 * cosineFactor + 0.5; cosineFactor *= cosineFactor; return cosineFactor; } } float3 computeIrradiance(float3 intensity, float3 direction, float3 normal, float distSquared) { return intensity * computeCosineFactor(direction, normal) / distSquared; } float3 computeSpecular(float3 position, float3 lightDirection, float3 normal, float shininess) { float3 directionToObserver = normalize(-position); float3 halfwayVector = normalize(directionToObserver + lightDirection); return pow(max(0.0, dot(normal, halfwayVector)), shininess); } float4 PSMain(PixelShaderInput fragmentAttribs) : SV_Target { float4 viewSpaceNormal = normalize(fragmentAttribs.viewSpaceInterpNormal); float4 viewSpaceVectorToPointLight = shaderUniforms.viewSpacePointLightPosition - fragmentAttribs.viewSpacePosition; float distanceToPointLightSquared = dot(viewSpaceVectorToPointLight, viewSpaceVectorToPointLight); float4 viewSpaceDirectionToPointLight = normalize(viewSpaceVectorToPointLight); float3 pointLightIrradiance = computeIrradiance( shaderUniforms.pointLightIntensity.rgb, viewSpaceDirectionToPointLight.xyz, viewSpaceNormal.xyz, distanceToPointLightSquared); float3 directionalLightIrradiance = computeIrradiance( shaderUniforms.directionalLightIntensity.rgb, normalize(shaderUniforms.viewSpaceDirectionalLightDirection.xyz), viewSpaceNormal.xyz, 1.0f); float3 specularReflectanceFromPointLight = shaderUniforms.specularCoefficient.rgb * computeSpecular( fragmentAttribs.viewSpacePosition.xyz, viewSpaceDirectionToPointLight.xyz, viewSpaceNormal.xyz, shaderUniforms.shininess); float3 specularReflectanceFromDirectionalLight = shaderUniforms.specularCoefficient.rgb * computeSpecular( fragmentAttribs.viewSpacePosition.xyz, normalize(shaderUniforms.viewSpaceDirectionalLightDirection.xyz), viewSpaceNormal.xyz, shaderUniforms.shininess); float3 pointLightContribution = (shaderUniforms.diffuseReflectance.rgb + specularReflectanceFromPointLight) * pointLightIrradiance; float3 directionalLightContribution = (shaderUniforms.diffuseReflectance.rgb + specularReflectanceFromDirectionalLight) * directionalLightIrradiance; return float4(pointLightContribution + directionalLightContribution + shaderUniforms.ambientLightIntensity.rgb, 1.0); } ================================================ FILE: samples/shaders/compute-demo.hlsl ================================================ // T: compute-demo cs:CSMain [[vk::binding(0, 0)]] RWTexture2D outputImage; float2 f(float2 x, float2 c) { return mul(x, float2x2(x.x, x.y, -x.y, x.x)) + c; } float3 palette(float t, float3 a, float3 b, float3 c, float3 d) { return a + b * cos(6.28318 * (c * t + d)); /* thanks, iq */ } [numthreads(4, 4, 1)] void CSMain(uint3 tid : SV_DispatchThreadID) { float2 uv = float2 ((float)tid.x / 512.0f, (float)tid.y / 512.0f); float2 c = float2(-0.6, 0.0) + (2.0*uv - 1.0); float2 x = float2(0.0, 0.0); bool escaped = false; int iterations = 0; for (int i = 0; i < 50; i++) { iterations = i; x = f(x, c); if (length(x) > 2.0) { escaped = true; break; } } outputImage[tid.xy] = (escaped ? float4( palette( float(iterations) / 50.0, float3(0.3, 0.2, 0.4), float3(0.2, 0.1, 0.0), float3(1.0, 1.0, 1.0), float3(0.3, 0.5, 0.2)), 1.0) : float4(0.0, 0.0, 0.0, 1.0)); } ================================================ FILE: samples/shaders/compute-vertices.hlsl ================================================ struct VertexData { float3 position; float3 normal; }; struct VertexOutput { float4 position : SV_Position; float height : ATTR0; }; // T: render-vertices vs:VSMain ps:PSMain struct VertexShaderUniforms { float4x4 objToViewTransform; float4x4 viewToClipTransform; }; [[vk::binding(0, 0)]] ConstantBuffer vertShaderUniforms; #define maxAmplitude 0.05 VertexOutput VSMain(uint vertID : SV_VertexID, float4 pos : SV_Position) { VertexOutput result; result.height = pos.y; result.position = mul(vertShaderUniforms.viewToClipTransform, mul(vertShaderUniforms.objToViewTransform, float4(pos.xyz, 1.0))); return result; } float4 PSMain(VertexOutput pxIn) : SV_Target { float shade = saturate(-(pxIn.height / (maxAmplitude + 0.04)) * 0.5 + 0.5); shade = shade * shade; return shade * float4(1., 1., 1.0, 1.0); } // T: compute-vertices cs:CSMain struct ComputeShaderUniforms { float4 time; }; [[vk::binding(0, 1)]] RWStructuredBuffer outputBuffer; [[vk::binding(1, 1)]] ConstantBuffer computeShaderUniforms; [numthreads(2, 2, 1)] void CSMain(uint3 tid : SV_DispatchThreadID) { const uint vertsPerSide = 512; uint vertID = tid.y * vertsPerSide + tid.x; uint2 vertRowColumn = tid.xy; float2 vertUV = float2( (float)vertRowColumn.x / (float)(vertsPerSide - 1), (float)vertRowColumn.y / (float)(vertsPerSide - 1)); float2 vertXZ = vertUV * 2.0 - float2(1.0, 1.0); float height = maxAmplitude * sin(cos(computeShaderUniforms.time.x * 2.0 + vertRowColumn.x * 0.1) + vertRowColumn.y * 0.1); float4 position = float4(vertXZ.x, height, vertXZ.y, 1.0); outputBuffer[vertID] = position; } ================================================ FILE: samples/shaders/cubemap.hlsl ================================================ // T: cubemap vs:VSMain ps:PSMain define:GENERIC_FS_INPUT_HAS_CLIPSPACE_POS=1 // T: cubemap-array vs:VSMain ps:PSMain define:GENERIC_FS_INPUT_HAS_CLIPSPACE_POS=1 define:USE_CUBEMAP_ARRAY=1 #include "triangle.hlsl" struct ShaderUniforms { float4x4 cameraTransform; float aspectRatio; #if defined(USE_CUBEMAP_ARRAY) float cubemapArrayIndex; #endif }; #if defined(USE_CUBEMAP_ARRAY) #define TEXTURE_IMAGE_TYPE TextureCubeArray #else #define TEXTURE_IMAGE_TYPE TextureCube #endif [[vk::binding(0, 0)]] ConstantBuffer shaderUniforms; [[vk::binding(1, 0)]] uniform TEXTURE_IMAGE_TYPE cubemapImage; [[vk::binding(2, 0)]] uniform sampler imageSampler; float4 PSMain(GenericFragShaderInput vertexAttribs) : SV_Target { float3 direction = -mul(shaderUniforms.cameraTransform, float4(vertexAttribs.clipSpacePosition.x * shaderUniforms.aspectRatio, vertexAttribs.clipSpacePosition.y, 1.0, 0.0)).xyz; #if defined(USE_CUBEMAP_ARRAY) float4 cubemapSampleCoords = float4(direction, shaderUniforms.cubemapArrayIndex); #else float3 cubemapSampleCoords = direction; #endif return cubemapImage.Sample(imageSampler, cubemapSampleCoords); } GenericFragShaderInput VSMain(uint vertexId : SV_VertexID) { return TriangleVertex(vertexId, 1.0, 0.0, 0.0); } ================================================ FILE: samples/shaders/fullscreen-triangle.hlsl ================================================ //T: fullscreen-triangle ps:PSMain vs:VSMain //T: small-triangle ps:PSMain vs:VSMain define:SCALE=0.25 #define GENERIC_FS_INPUT_HAS_COLOR #include "triangle.hlsl" #ifndef SCALE #define SCALE 1.0 #endif float4 PSMain(GenericFragShaderInput vertexAttribs) : SV_TARGET { return vertexAttribs.color; } GenericFragShaderInput VSMain(uint vertexId : SV_VertexID) { return TriangleVertex(vertexId, SCALE, 0.0, 0.0); } ================================================ FILE: samples/shaders/generic-frag-shader-input.hlsl ================================================ struct GenericFragShaderInput { float4 position : SV_Position; #if defined(GENERIC_FS_INPUT_HAS_COLOR) float4 color : NGF_COLOR; #endif #if defined(GENERIC_FS_INPUT_HAS_CLIPSPACE_POS) float4 clipSpacePosition : NGF_CLIP_SPACE_POSITION; #endif #if defined(GENERIC_FS_INPUT_HAS_UV) float2 textureUv : NGF_UV; #endif }; ================================================ FILE: samples/shaders/imgui.hlsl ================================================ //T: imgui ps:PSMain vs:VSMain #define GENERIC_FS_INPUT_HAS_UV #define GENERIC_FS_INPUT_HAS_COLOR #include "generic-frag-shader-input.hlsl" struct ImGuiVSInput { float2 position : ATTR0; float2 uv : TEXCOORD0; float4 color : COLOR0; }; struct VertShaderUniforms { float4x4 projectionTransform; }; [[vk::binding(0, 0)]] ConstantBuffer vertShaderUniforms; GenericFragShaderInput VSMain(ImGuiVSInput input) { GenericFragShaderInput vertexData = { mul(vertShaderUniforms.projectionTransform, float4(input.position, 0.0, 1.0)), input.color, input.uv }; return vertexData; } [[vk::binding(1, 0)]] uniform Texture2D textureImage; [[vk::binding(2, 0)]] uniform sampler imageSampler; float4 PSMain(GenericFragShaderInput vertexAttribs) : SV_Target { return vertexAttribs.color * textureImage.Sample(imageSampler, vertexAttribs.textureUv); } ================================================ FILE: samples/shaders/instancing.hlsl ================================================ //T: instancing ps:PSMain vs:VSMain #include "quat.hlsl" #define GENERIC_FS_INPUT_HAS_UV #include "generic-frag-shader-input.hlsl" struct VertexShaderInput { float3 objSpacePosition : SV_Position; float2 textureUv : TEXCOORD0; }; struct ShaderUniforms { float4x4 worldToClipTransform; float timestamp; }; [[vk::binding(0, 0)]] ConstantBuffer shaderUniforms; [[vk::binding(1, 0)]] Buffer perInstanceData; GenericFragShaderInput VSMain(VertexShaderInput vertexAttrs, int instanceIdx : SV_InstanceID) { float4 worldSpaceTranslation = float4(perInstanceData.Load(instanceIdx), 0.0); const float oscillationFrequency = 5.0; float oscillationPhase = worldSpaceTranslation.x * worldSpaceTranslation.y; float4 oscillationOffset = float4(0.0, sin(oscillationFrequency * (shaderUniforms.timestamp + oscillationPhase)), 0.0, 0.0); float4 rotationQuat = quatFromAxisAngle(worldSpaceTranslation.xyz, shaderUniforms.timestamp); float4 worldSpacePosition = rotateByQuat(float4(vertexAttrs.objSpacePosition, 1.0), rotationQuat) + worldSpaceTranslation + oscillationOffset; float4 clipSpacePosition = mul(shaderUniforms.worldToClipTransform, worldSpacePosition); clipSpacePosition.y *= -1.0; GenericFragShaderInput result = { clipSpacePosition, vertexAttrs.textureUv }; return result; } [[vk::binding(2, 0)]] uniform Texture2D modelTexture; [[vk::binding(3, 0)]] uniform sampler textureSampler; float4 PSMain(GenericFragShaderInput fragmentAttribs) : SV_Target { return modelTexture.Sample(textureSampler, fragmentAttribs.textureUv); } ================================================ FILE: samples/shaders/polygon.hlsl ================================================ //T: polygon ps:PSMain vs:VSMain #define GENERIC_FS_INPUT_HAS_COLOR #include "generic-frag-shader-input.hlsl" struct VertShaderUniforms { float scaleA; float scaleB; float time; float aspectRatio; float theta; }; [[vk::binding(0, 0)]] ConstantBuffer vertShaderUniforms; float4 PSMain(GenericFragShaderInput vertexAttribs) : SV_Target { return vertexAttribs.color; } GenericFragShaderInput VSMain(uint vertexId : SV_VertexID) { GenericFragShaderInput polygonVertexData; if (vertexId % 3 == 0) { polygonVertexData.position = float4(0.0, 0.0, 0.0, 1.0); polygonVertexData.color = float4(0.8, 0.7, 0.8, 1.0); } else { float rotationAngle = vertShaderUniforms.time; float2x2 rotationMatrix = { cos(rotationAngle), -sin(rotationAngle), sin(rotationAngle), cos(rotationAngle) }; float effectiveScale = (vertexId % 2 ? vertShaderUniforms.scaleB : vertShaderUniforms.scaleA); int outerVertexId = int(round(float(vertexId)/3.0)); float theta = vertShaderUniforms.theta; float2 vertexPosition = mul(rotationMatrix, float2(sin(outerVertexId * theta), cos(outerVertexId * theta))) * float2(1.0, vertShaderUniforms.aspectRatio) * effectiveScale; polygonVertexData.position = float4(vertexPosition, 0.0, 1.0); polygonVertexData.color = float4(0.5 * (vertexPosition.x + 1.0), 0.5 * (vertexPosition.y + 1.0), abs(1.0 - vertexPosition.x), 1.0); polygonVertexData.position.y *= -1.0; } return polygonVertexData; } ================================================ FILE: samples/shaders/quad.hlsl ================================================ ================================================ FILE: samples/shaders/quat.hlsl ================================================ // Helper functions for quaternions. float4 quatFromAxisAngle(float3 axis, float angle) { float3 n = normalize(axis); return float4(sin(angle/2.0) * n, cos(angle/2.0)); } float4 quatMul(float4 lhs, float4 rhs) { const float x1 = lhs[0], x2 = rhs[0], y1 = lhs[1], y2 = rhs[1], z1 = lhs[2], z2 = rhs[2], w1 = lhs[3], w2 = rhs[3]; return float4(x1 * w1 + y1 * z2 - z1 * y2 + x2 * w1, y1 * w2 - x1 * z2 + z1 * x2 + y2 * w1, x1 * y2 - y1 * x2 + z1 * w2 + z2 * w1, w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2); } float4 rotateByQuat(float4 a, float4 q) { float x = a[0], y = a[1], z = a[2]; float qx = q[0], qy = q[1], qz = q[2], qw = q[3]; float ix = qw * x + qy * z - qz * y; float iy = qw * y + qz * x - qx * z; float iz = qw * z + qx * y - qy * x; float iw = -qx * x - qy * y - qz * z; return float4(ix * qw + iw * -qx + iy * -qz - iz * -qy, iy * qw + iw * -qy + iz * -qx - ix * -qz, iz * qw + iw * -qz + ix * -qy - iy * -qx, a[3]); } ================================================ FILE: samples/shaders/simple-texture.hlsl ================================================ // T: simple-texture vs:VSMain ps:PSMain #define GENERIC_FS_INPUT_HAS_UV #include "triangle.hlsl" [[vk::binding(1, 0)]] uniform Texture2D textureImage; [[vk::binding(2, 0)]] uniform sampler imageSampler; float4 PSMain(GenericFragShaderInput vertexAttribs) : SV_Target { return textureImage.Sample(imageSampler, vertexAttribs.textureUv); } GenericFragShaderInput VSMain(uint vertexId : SV_VertexID) { return TriangleVertex(vertexId, 1.0, 0.0, 0.0); } ================================================ FILE: samples/shaders/textured-quad.hlsl ================================================ //T: textured-quad ps:PSMain vs:VSMain define:GENERIC_FS_INPUT_HAS_UV=1 //T: textured-quad-image-array ps:PSMain vs:VSMain define:GENERIC_FS_INPUT_HAS_UV=1 define:USE_IMAGE_ARRAY=1 //T: textured-quad-multiple-images ps:PSMain vs:VSMain define:GENERIC_FS_INPUT_HAS_UV=1 define:NUM_IMAGES=4 #include "generic-frag-shader-input.hlsl" struct ShaderUniforms { float4x4 transformMatrix; #if defined(USE_IMAGE_ARRAY) float imageArrayIdx; #endif #if defined(NUM_IMAGES) uint imageIdx; #endif }; [[vk::binding(0, 0)]] ConstantBuffer shaderUniforms; GenericFragShaderInput VSMain(uint vertexId : SV_VertexID) { const float2 vertices[] = { float2(1.0, -1.0), float2(-1.0, -1.0), float2(1.0, 1.0), float2(1.0, 1.0), float2(-1.0, -1.0), float2(-1.0, 1.0) }; const float2 uvs[] = { float2(1.0, 1.0), float2(0.0, 1.0), float2(1.0, 0.0), float2(1.0, 0.0), float2(0.0, 1.0), float2(0.0, 0.0) }; vertexId = vertexId % 6; GenericFragShaderInput result = { mul(shaderUniforms.transformMatrix, float4(vertices[vertexId], 0.0, 1.0)), 2 * uvs[vertexId] }; return result; } #if defined(USE_IMAGE_ARRAY) #define TEXTURE_IMAGE_TYPE Texture2DArray #else #define TEXTURE_IMAGE_TYPE Texture2D #endif #if !defined(NUM_IMAGES) #define NUM_IMAGES (1) #endif [[vk::binding(0, 1)]] uniform TEXTURE_IMAGE_TYPE textureImage[NUM_IMAGES]; [[vk::binding(1, 0)]] uniform sampler imageSampler; float4 PSMain(GenericFragShaderInput vertexAttribs) : SV_Target { #if defined(USE_IMAGE_ARRAY) float3 sampleCoords = float3(vertexAttribs.textureUv, shaderUniforms.imageArrayIdx); #else float2 sampleCoords = vertexAttribs.textureUv; #endif #if NUM_IMAGES > 1 uint i = shaderUniforms.imageIdx % NUM_IMAGES; #else uint i = 0u; #endif return textureImage[i].Sample(imageSampler, sampleCoords); } ================================================ FILE: samples/shaders/triangle.hlsl ================================================ #include "generic-frag-shader-input.hlsl" GenericFragShaderInput TriangleVertex(uint vertexId, float scale, float2 offset, float depth) { float4 pos[] = { float4(-1.0, 1.0, 0.0, 1.0), float4( 3.0, 1.0, 0.0, 1.0), float4(-1.0, -3.0, 0.0, 1.0) }; const float2 texcoords[] = { float2(0.0, 1.0), float2(2.0, 1.0), float2(0.0, -1.0) }; const float4 colors[] = { float4(1.0, 0.0, 0.0, 1.0), float4(0.0, 1.0, 0.0, 1.0), float4(0.0, 0.0, 1.0, 1.0) }; GenericFragShaderInput triangleVertexData; vertexId = vertexId % 3; triangleVertexData.position = float4(pos[vertexId].xyz * scale, 1.0) + float4(offset, depth, 0.0); #if defined(GENERIC_FS_INPUT_HAS_UV) triangleVertexData.textureUv = texcoords[vertexId]; #endif #if defined(GENERIC_FS_INPUT_HAS_COLOR) triangleVertexData.color = colors[vertexId]; #endif #if defined(GENERIC_FS_INPUT_HAS_CLIPSPACE_POS) triangleVertexData.clipSpacePosition = triangleVertexData.position; #endif return triangleVertexData; } ================================================ FILE: samples/shaders/volume-renderer.hlsl ================================================ // T: volume-renderer vs:VSMain ps:PSMain struct VertexShaderInput { float4 position : SV_Position; float3 textureCoordinate : TexCoord; }; struct VolumeRendererUniforms { float4x4 transformMatrix; float aspectRatio; }; [[vk::binding(0,1)]] ConstantBuffer shaderUniforms; [[vk::binding(0,0)]] Texture3D volumeImage; VertexShaderInput VSMain(uint vertexId: SV_VertexID, uint instanceId : SV_InstanceID) { const float2 vertices[] = { float2(1.0, -1.0), float2(-1.0, -1.0), float2(1.0, 1.0), float2(1.0, 1.0), float2(-1.0, -1.0), float2(-1.0, 1.0) }; vertexId = vertexId % 6; float w, h, d; volumeImage.GetDimensions(w, h, d); float3 xyz = float3(vertices[vertexId], 2.0 * (instanceId/d) - 1.0); float3 uvw = xyz * float3(1.0, -1.0, 1.0); xyz.y *= shaderUniforms.aspectRatio; uvw = mul(shaderUniforms.transformMatrix, float4(uvw, 1.0)).xyz; uvw.xy = 0.5 * uvw.xy + 0.5; VertexShaderInput result = { float4(xyz.xy, 0.0, 1.0), uvw, }; return result; } [[vk::binding(1,0)]] sampler volumeSampler; float4 PSMain(VertexShaderInput input) : SV_Target { float alpha = volumeImage.Sample(volumeSampler, input.textureCoordinate).r; return float4(1., 1., 1., alpha); } ================================================ FILE: source/ngf-common/arena.cpp ================================================ /** * Copyright (c) 2026 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "arena.h" #include "macros.h" #include "util.h" #include #include namespace ngfi { // Align a pointer to the given alignment (must be power of two) static void* align_ptr(void* ptr, size_t alignment) noexcept { uintptr_t addr = reinterpret_cast(ptr); uintptr_t aligned = (addr + alignment - 1) & ~(alignment - 1); return (void*)(aligned); } // Internal block structure - header followed by data in single allocation struct arena::block { size_t capacity; size_t used; arena::block* next; uint8_t* data() noexcept { uintptr_t header_end = reinterpret_cast(this) + sizeof(arena::block); uintptr_t aligned = (header_end + NGFI_MAX_ALIGNMENT - 1) & ~(NGFI_MAX_ALIGNMENT - 1); return reinterpret_cast(aligned); } const uint8_t* data() const noexcept { return const_cast(this)->data(); } // Calculate total allocation size for a block with given data capacity static size_t alloc_size(size_t data_capacity) noexcept { constexpr size_t header_size = sizeof(arena::block); const size_t align_mask = NGFI_MAX_ALIGNMENT - 1; const size_t aligned_header = (header_size + align_mask) & ~align_mask; return aligned_header + data_capacity; } static arena::block* create(size_t data_capacity) noexcept { size_t total_size = block::alloc_size(data_capacity); void* raw = malloc(total_size); if (!raw || total_size < sizeof(arena::block)) return nullptr; auto* block = new (raw) arena::block {}; block->capacity = data_capacity; block->used = 0; block->next = block; return block; } static void destroy(arena::block* block) noexcept { block->~block(); ::free(block); } static size_t destroy_chain(arena::block* blks, bool destroy_self) noexcept { size_t result = 0; if (blks) { arena::block* cur = blks->next; while (cur != blks) { auto prev = cur; cur = cur->next; result += alloc_size(prev->capacity); destroy(prev); } if (destroy_self) { result += alloc_size(blks->capacity); destroy(blks); } else { blks->next = blks; } } return result; } void* alloc(size_t size, size_t alignment, size_t* out_total_used) noexcept { uint8_t* data_start = data(); uint8_t* current_ptr = data_start + used; void* aligned_ptr = align_ptr(current_ptr, alignment); size_t padding = static_cast(static_cast(aligned_ptr) - current_ptr); size_t total_needed = padding + size; if (used + total_needed > capacity) { return nullptr; } used += total_needed; *out_total_used += total_needed; return aligned_ptr; } }; arena::arena(size_t initial_capacity) noexcept : block_capacity_ { initial_capacity } {} arena::arena(arena&& other) noexcept : current_block_(other.current_block_) , block_capacity_(other.block_capacity_) , total_allocated_(other.total_allocated_) , total_used_(other.total_used_) { other.current_block_ = nullptr; other.total_allocated_ = 0; other.total_used_ = 0; other.block_capacity_ = 0; } arena::~arena() noexcept { block::destroy_chain(current_block_, true); } void* arena::alloc(size_t size) noexcept { return alloc_aligned(size, NGFI_MAX_ALIGNMENT); } void* arena::alloc_aligned(size_t size, size_t alignment) noexcept { size_t min_capacity = size + alignment; // Enough for alignment + allocation if (size == 0 || block_capacity_ == 0 || (!current_block_ && !grow(min_capacity))) { return nullptr; } // Try to allocate from current block void* result = current_block_->alloc(size, alignment, &total_used_); if (result) { return result; } // Need to grow - allocate new block if (!grow(min_capacity)) { return nullptr; } // Allocate from new block (should always succeed) return current_block_->alloc(size, alignment, &total_used_); } void arena::reset() noexcept { if (current_block_) { total_allocated_ -= block::destroy_chain(current_block_, false); current_block_->used = 0; current_block_->next = current_block_; total_used_ = 0; } } size_t arena::total_allocated() const noexcept { return total_allocated_; } size_t arena::total_used() const noexcept { return total_used_; } bool arena::grow(size_t min_capacity) noexcept { // New block size is at least default_block_size_ or min_capacity size_t new_capacity = block_capacity_; if (new_capacity < min_capacity) { new_capacity = min_capacity; } block* new_block = block::create(new_capacity); if (!new_block) { return false; } // Chain to current block if (current_block_) { new_block->next = current_block_->next; current_block_->next = new_block; } current_block_ = new_block; total_allocated_ += block::alloc_size(new_capacity); return true; } } // namespace ngfi ================================================ FILE: source/ngf-common/arena.h ================================================ /** * Copyright (c) 2026 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include #include namespace ngfi { class arena { private: struct block; block* current_block_ = nullptr; size_t block_capacity_ = 0; size_t total_allocated_ = 0; size_t total_used_ = 0; public: arena() = default; explicit arena(size_t initial_capacity) noexcept; arena(arena&& other) noexcept; ~arena() noexcept; arena(const arena&) = delete; arena& operator=(const arena&) = delete; arena& operator=(arena&&) = delete; void reset() noexcept; void* alloc(size_t size) noexcept; void* alloc_aligned(size_t size, size_t alignment) noexcept; template T* alloc() noexcept { return (T*)(alloc_aligned(sizeof(T), alignof(T))); } template T* alloc(size_t n) noexcept { // Check for overflow before computing sizeof(T) * n if (n != 0 && SIZE_MAX / sizeof(T) < n) { return nullptr; } return (T*)(alloc_aligned(sizeof(T) * n, alignof(T))); } size_t total_allocated() const noexcept; size_t total_used() const noexcept; void set_block_size(size_t size) { block_capacity_ = size; } private: bool grow(size_t min_capacity) noexcept; }; } // namespace ngfi ================================================ FILE: source/ngf-common/array.h ================================================ /** * Copyright (c) 2026 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include "macros.h" #include "util.h" #include #include namespace ngfi { /** * A simple dynamic array for trivially-copyable types. * Similar to std::vector but uses NGFI allocation callbacks. */ template class array { private: T* data_ = nullptr; size_t size_ = 0; size_t capacity_ = 0; static constexpr size_t MIN_CAPACITY = 8; public: using value_type = T; using iterator = T*; using const_iterator = const T*; using reference = T&; using const_reference = const T&; array() noexcept = default; explicit array(size_t size) : data_ {ngfi::allocn(size)}, size_ {size}, capacity_ {size} { } array(const T* src, size_t count) : array{count} { if (data_) { memcpy (data_, src, sizeof(T)*size_); } } array(array&& other) noexcept { *this = ngfi::move(other); } ~array() noexcept { destroy(); } array& operator=(array&& other ) noexcept { destroy(); data_ = other.data_; size_ = other.size_; capacity_ = other.capacity_; other.data_ = nullptr; other.size_ = 0; other.capacity_ = 0; return *this; } size_t size() const noexcept { return size_; } size_t capacity() const noexcept { return capacity_; } bool empty() const noexcept { return size_ == 0; } T& operator[](size_t idx) noexcept { return data_[idx]; } const T& operator[](size_t idx) const noexcept { return data_[idx]; } T& front() noexcept { return data_[0]; } const T& front() const noexcept { return data_[0]; } T& back() noexcept { return data_[size_ - 1]; } const T& back() const noexcept { return data_[size_ - 1]; } T* data() noexcept { return data_; } const T* data() const noexcept { return data_; } T* push_back(const T& value) noexcept { static_assert(!FixedSize); if (!ensure_capacity(size_ + 1)) { return nullptr; } if constexpr (__is_trivially_copyable(T)) { memcpy(&data_[size_], &value, sizeof(T)); } else { data_[size_] = value; } ++size_; return &data_[size_ - 1]; } template T* emplace_back(Args... args) { static_assert(!FixedSize); if (!ensure_capacity(size_ + 1)) { return nullptr; } new (&data_[size_]) T {ngfi::forward(args)...}; ++size_; return &data_[size_ - 1]; } void pop_back() noexcept { static_assert(!FixedSize); if (size_ > 0) { --size_; } } void clear() noexcept { static_assert(!FixedSize); size_ = 0; } bool resize(size_t new_size) noexcept { static_assert(!FixedSize); if (new_size > capacity_) { if (!reserve(new_size)) { return false; } } size_ = new_size; return true; } bool reserve(size_t new_capacity) noexcept { static_assert(!FixedSize); if (new_capacity <= capacity_) { return true; } return grow_to(new_capacity); } iterator begin() noexcept { return data_; } const_iterator begin() const noexcept { return data_; } iterator end() noexcept { return data_ + size_; } const_iterator end() const noexcept { return data_ + size_; } private: void destroy() noexcept { if (data_ != nullptr) { AllocT::freen(data_, capacity_); data_ = nullptr; size_ = 0; capacity_ = 0; } } array(const array&) = delete; array& operator=(const array&) = delete; bool ensure_capacity(size_t required) noexcept { if (required <= capacity_) { return true; } size_t new_capacity = capacity_ == 0 ? MIN_CAPACITY : capacity_ * 2; while (new_capacity < required) { new_capacity *= 2; } return grow_to(new_capacity); } bool grow_to(size_t new_capacity) noexcept { T* new_data = AllocT::template allocn(new_capacity); if (new_data == nullptr) { return false; } if (data_ != nullptr) { if (size_ > 0) { if constexpr (__is_trivially_copyable(T)) { memcpy(new_data, data_, size_ * sizeof(T)); } else { for (size_t i = 0u; i < size_; ++i) { new (&new_data[i]) T {ngfi::move(data_[i])}; } } } AllocT::freen(data_, capacity_); } data_ = new_data; capacity_ = new_capacity; return true; } }; template using fixed_array = array; } // namespace ngfi ================================================ FILE: source/ngf-common/chunked-list.h ================================================ #pragma once #include "arena.h" namespace ngfi { template class chunked_list { static_assert(__is_trivially_copyable(T)); private: struct chunk { chunk* next; T* free_slot; T slots[ChunkCapacity]; }; chunk* last_chunk_ = nullptr; public: class iterator { friend class chunked_list; chunk* first_chunk_ = nullptr; chunk* curr_chunk_ = nullptr; T* curr_slot_ = nullptr; iterator() = default; explicit iterator(chunk* first_chunk) : first_chunk_{first_chunk}, curr_chunk_{first_chunk}, curr_slot_ { first_chunk->slots } {} public: iterator& operator++() { ++curr_slot_; if (curr_chunk_->free_slot == curr_slot_) { if (curr_chunk_->next == first_chunk_) { first_chunk_ = nullptr; curr_chunk_ = nullptr; curr_slot_ = nullptr; } else { curr_chunk_ = curr_chunk_->next; curr_slot_ = curr_chunk_->slots; } } return *this; } const T& operator*() { return *curr_slot_; } bool operator==(const iterator& it) const { return curr_slot_ == it.curr_slot_ && first_chunk_ == it.first_chunk_ && curr_chunk_ == it.curr_chunk_; } }; iterator begin() noexcept { return !last_chunk_ ? end() : iterator{last_chunk_->next}; } iterator begin() const noexcept { return !last_chunk_ ? end() : iterator{last_chunk_->next}; } iterator end() const noexcept { return iterator{}; } T* append(const T& element, arena& a) noexcept { const bool need_new_chunk = !last_chunk_ || last_chunk_->free_slot == &last_chunk_->slots[ChunkCapacity]; if (need_new_chunk) { chunk* new_chunk = a.alloc(); if (!new_chunk) return nullptr; new_chunk->free_slot = new_chunk->slots; if (!last_chunk_) { last_chunk_ = new_chunk; last_chunk_->next = last_chunk_; } else { new_chunk->next = last_chunk_->next; last_chunk_->next = new_chunk; last_chunk_ = new_chunk; } } T* result = last_chunk_->free_slot++; *result = element; return result; } void clear() { last_chunk_ = nullptr; } }; } ================================================ FILE: source/ngf-common/cmdbuf-state.h ================================================ /** * Copyright (c) 2026 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include "ngf-common/macros.h" #include "nicegraf.h" namespace ngfi { enum cmd_buffer_state { CMD_BUFFER_STATE_NEW, CMD_BUFFER_STATE_READY, CMD_BUFFER_STATE_RECORDING, CMD_BUFFER_STATE_READY_TO_SUBMIT, CMD_BUFFER_STATE_PENDING, CMD_BUFFER_STATE_SUBMITTED }; template bool transition_cmd_buf(CmdBufT cmd_buf, cmd_buffer_state new_state) { cmd_buffer_state cur_state = cmd_buf->state; bool has_active_pass = (cmd_buf)->renderpass_active || (cmd_buf)->compute_pass_active || (cmd_buf)->xfer_pass_active; bool is_recordable = cur_state == ::ngfi::CMD_BUFFER_STATE_READY || cur_state == ::ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT; switch (new_state) { case ngfi::CMD_BUFFER_STATE_NEW: NGFI_DIAG_ERROR("command buffer cannot go back to a `new` state"); return false; case ngfi::CMD_BUFFER_STATE_READY: if (cur_state != ngfi::CMD_BUFFER_STATE_SUBMITTED && cur_state != ngfi::CMD_BUFFER_STATE_READY && cur_state != ngfi::CMD_BUFFER_STATE_NEW) { NGFI_DIAG_ERROR("command buffer not in a startable state."); return false; } break; case ngfi::CMD_BUFFER_STATE_RECORDING: if (!is_recordable) { NGFI_DIAG_ERROR("command buffer not in a recordable state."); return false; } break; case ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT: if (cur_state != ngfi::CMD_BUFFER_STATE_RECORDING) { NGFI_DIAG_ERROR("command buffer is not actively recording."); return false; } if (has_active_pass) { NGFI_DIAG_ERROR("cannot finish render encoder with an unterminated pass."); return false; } break; case ngfi::CMD_BUFFER_STATE_PENDING: if (cur_state != ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT && cur_state != ngfi::CMD_BUFFER_STATE_READY) { NGFI_DIAG_ERROR("command buffer not ready to be submitted"); return false; } break; case ngfi::CMD_BUFFER_STATE_SUBMITTED: if (cur_state != ngfi::CMD_BUFFER_STATE_PENDING) { NGFI_DIAG_ERROR("command buffer not in a submittable state"); return false; } break; } cmd_buf->state = new_state; return true; } } // namespace ngfi #define NGFI_TRANSITION_CMD_BUF(b, new_state) \ if (!ngfi::transition_cmd_buf(b, new_state)) { return NGF_ERROR_INVALID_OPERATION; } ================================================ FILE: source/ngf-common/create-destroy.cpp ================================================ /** * Copyright (c) 2026 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ // NOTE: this file is meant to be included from the backend implementation file. namespace ngfi { template ngf_error generic_create(const InfoT& info, T** result) { auto maybe_t = T::make(info); if (!maybe_t.has_error()) result[0] = maybe_t.value().release(); return maybe_t.has_error() ? maybe_t.error() : NGF_ERROR_OK; } } // namespace ngfi ngf_error ngf_create_context(const ngf_context_info* info, ngf_context* result) NGF_NOEXCEPT { assert(info); assert(result); return ngfi::generic_create(*info, result); } void ngf_destroy_context(ngf_context ctx) NGF_NOEXCEPT { // TODO: unset current context assert(ctx); NGFI_FREE(ctx); } ngf_error ngf_create_shader_stage(const ngf_shader_stage_info* info, ngf_shader_stage* result) NGF_NOEXCEPT { assert(info); assert(result); return ngfi::generic_create(*info, result); } void ngf_destroy_shader_stage(ngf_shader_stage stage) NGF_NOEXCEPT { if (stage != nullptr) { NGFI_FREE(stage); } } ngf_error ngf_create_render_target(const ngf_render_target_info* info, ngf_render_target* result) NGF_NOEXCEPT { assert(info); assert(result); return ngfi::generic_create(*info, result); } void ngf_destroy_render_target(ngf_render_target rt) NGF_NOEXCEPT { if (rt != nullptr) { if (rt->is_default) { NGFI_DIAG_ERROR("default RT can only be destroyed by owning context\n"); return; } NGFI_FREE(rt); } } ngf_error ngf_create_compute_pipeline( const ngf_compute_pipeline_info* info, ngf_compute_pipeline* result) NGF_NOEXCEPT { assert(info); assert(result); return ngfi::generic_create(*info, result); } ngf_error ngf_create_graphics_pipeline( const ngf_graphics_pipeline_info* info, ngf_graphics_pipeline* result) NGF_NOEXCEPT { assert(info); assert(result); return ngfi::generic_create(*info, result); } void ngf_destroy_graphics_pipeline(ngf_graphics_pipeline pipe) NGF_NOEXCEPT { if (pipe != nullptr) { NGFI_FREE(pipe); } } void ngf_destroy_compute_pipeline(ngf_compute_pipeline pipe) NGF_NOEXCEPT { if (pipe != nullptr) { NGFI_FREE(pipe); } } ngf_error ngf_create_texel_buffer_view( const ngf_texel_buffer_view_info* info, ngf_texel_buffer_view* result) NGF_NOEXCEPT { assert(info); assert(result); return ngfi::generic_create(*info, result); } void ngf_destroy_texel_buffer_view(ngf_texel_buffer_view buf_view) NGF_NOEXCEPT { if (buf_view) { NGFI_FREE(buf_view); } } ngf_error ngf_create_buffer(const ngf_buffer_info* info, ngf_buffer* result) NGF_NOEXCEPT { assert(info); assert(result); return ngfi::generic_create(*info, result); } void ngf_destroy_buffer(ngf_buffer buf) NGF_NOEXCEPT { if (buf != nullptr) { NGFI_FREE(buf); } } ngf_error ngf_create_sampler(const ngf_sampler_info* info, ngf_sampler* result) NGF_NOEXCEPT { assert(info); assert(result); return ngfi::generic_create(*info, result); } void ngf_destroy_sampler(ngf_sampler sampler) NGF_NOEXCEPT { if (sampler) { NGFI_FREE(sampler); } } ngf_error ngf_create_cmd_buffer(const ngf_cmd_buffer_info* info, ngf_cmd_buffer* result) NGF_NOEXCEPT { assert(info); assert(result); return ngfi::generic_create(*info, result); } ngf_error ngf_create_image_view(const ngf_image_view_info* info, ngf_image_view* result) NGF_NOEXCEPT { assert(info); assert(result); return ngfi::generic_create(*info, result); } void ngf_destroy_image_view(ngf_image_view view) NGF_NOEXCEPT { if (view != nullptr) { NGFI_FREE(view); } } ngf_error ngf_create_image(const ngf_image_info* info, ngf_image* result) NGF_NOEXCEPT { assert(info); assert(result); return ngfi::generic_create(*info, result); } void ngf_destroy_image(ngf_image image) NGF_NOEXCEPT { if (image != nullptr) { NGFI_FREE(image); } } void ngf_destroy_cmd_buffer(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT { if (cmd_buffer != nullptr) { NGFI_FREE(cmd_buffer); } } ================================================ FILE: source/ngf-common/default-arenas.cpp ================================================ /** * Copyright (c) 2025 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "default-arenas.h" namespace ngfi { arena& tmp_arena() noexcept { static thread_local arena a = arena{100u * 1024u}; // 100KB return a; } arena& frame_arena() noexcept { static thread_local arena a = arena{4u * 1024u}; // 4KB return a; } } // namespace ngfi ================================================ FILE: source/ngf-common/default-arenas.h ================================================ /** * Copyright (c) 2025 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include "arena.h" namespace ngfi { /** * Get the thread-local temporary arena. * This arena is reset frequently within operations. */ arena& tmp_arena() noexcept; /** * Get the thread-local frame arena. * This arena is reset only at frame boundaries. */ arena& frame_arena() noexcept; /** * Allocate a single element from the temporary arena. */ template inline T* tmp_alloc() noexcept { return tmp_arena().alloc(); } /** * Allocate an array of n elements from the temporary arena. */ template inline T* tmp_alloc(size_t n) noexcept { return tmp_arena().alloc(n); } /** * Allocate a single element from the frame arena. */ template inline T* frame_alloc() noexcept { return frame_arena().alloc(); } /** * Allocate an array of n elements from the frame arena. */ template inline T* frame_alloc(size_t n) noexcept { return frame_arena().alloc(n); } } // namespace ngfi ================================================ FILE: source/ngf-common/frame-token.h ================================================ /** * Copyright (c) 2021 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #pragma once #include #ifdef __cplusplus extern "C" { #endif static inline uintptr_t ngfi_encode_frame_token(uint16_t ctx_id, uint8_t max_inflight_frames, uint8_t frame_id) { const uint32_t ctx_id_ext = ctx_id, max_inflight_frames_ext = max_inflight_frames, frame_id_ext = frame_id; return (ctx_id_ext << 0x10) | (max_inflight_frames_ext << 0x08) | frame_id_ext; } static inline uint16_t ngfi_frame_ctx_id(uintptr_t frame_token) { return ((uint16_t)(frame_token >> 0x10)) & 0xffff; } static inline uint8_t ngfi_frame_max_inflight_frames(uintptr_t frame_token) { return ((uint8_t)(frame_token >> 0x08)) & 0xff; } static inline uint8_t ngfi_frame_id(uintptr_t frame_token) { return (uint8_t)(frame_token & 0xff); } #ifdef __cplusplus } #endif ================================================ FILE: source/ngf-common/hashtable.h ================================================ /** * Copyright (c) 2026 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include "macros.h" #include "util.h" #include #include namespace ngfi { namespace detail { /** * murmur3 hash function implementation. * This is a simplified version for keys 8 bytes in length. */ inline uint64_t rotl64(uint64_t x, int8_t r) { return (x << r) | (x >> (64 - r)); } inline uint64_t fmix64(uint64_t k) { k ^= k >> 33; k *= 0xff51afd7ed558ccdLLU; k ^= k >> 33; k *= 0xc4ceb9fe1a85ec53LLU; k ^= k >> 33; return k; } inline void mmh3_x64_128(uintptr_t key, uint32_t seed, uint64_t* out) { const auto* data = reinterpret_cast(&key); uint64_t h1 = seed; uint64_t h2 = seed; uint64_t c1 = 0x87c37b91114253d5LLU; uint64_t c2 = 0x4cf5ad432745937fLLU; uint64_t k1 = 0; k1 ^= static_cast(data[7]) << 56; k1 ^= static_cast(data[6]) << 48; k1 ^= static_cast(data[5]) << 40; k1 ^= static_cast(data[4]) << 32; k1 ^= static_cast(data[3]) << 24; k1 ^= static_cast(data[2]) << 16; k1 ^= static_cast(data[1]) << 8; k1 ^= static_cast(data[0]) << 0; k1 *= c1; k1 = rotl64(k1, 31); k1 *= c2; h1 ^= k1; h1 ^= sizeof(key); h2 ^= sizeof(key); h1 += h2; h2 += h1; h1 = fmix64(h1); h2 = fmix64(h2); h1 += h2; h2 += h1; out[0] = h1; out[1] = h2; } } // namespace detail /** * A hash table with open addressing (linear probing). * Keys are 64-bit unsigned integers, values can be any trivially copyable type. * Does not support individual element deletion - only full clear. */ template class hashtable { static_assert( __is_trivially_copyable(V), "hashtable only supports trivially copyable value types"); public: using key_type = uint64_t; static constexpr key_type EMPTY_KEY = ~key_type {0}; struct keyhash { key_type key; uint64_t hash; }; struct entry { key_type key; V value; }; private: static constexpr uint32_t HASH_SEED = 0x9e3779b9u; static constexpr float MAX_LOAD_FACTOR = 0.7f; entry* slots_ = nullptr; size_t capacity_ = 0; size_t initial_capacity_ = 100u; size_t size_ = 0; public: hashtable() noexcept = default; explicit hashtable(size_t capacity) : initial_capacity_ {capacity} { } hashtable(hashtable&& other) noexcept { *this = ngfi::move(other); } ~hashtable() noexcept { destroy(); } hashtable& operator=(hashtable&& other) noexcept { destroy(); slots_ = other.slots_; capacity_ = other.capacity_; size_ = other.size_; other.slots_ = nullptr; other.capacity_ = 0; other.size_ = 0; return *this; } size_t size() const noexcept { return size_; } size_t capacity() const noexcept { return capacity_; } bool empty() const noexcept { return size_ == 0; } static keyhash compute_hash(key_type key) noexcept { uint64_t mmh3_out[2] = {0, 0}; detail::mmh3_x64_128(static_cast(key), HASH_SEED, mmh3_out); return keyhash {key, mmh3_out[0] ^ mmh3_out[1]}; } V* get(key_type key) noexcept { return get_prehashed(compute_hash(key)); } const V* get(key_type key) const noexcept { return get_prehashed(compute_hash(key)); } V* get_prehashed(const keyhash& kh) noexcept { if (!slots_) { return nullptr; } const size_t start_idx = kh.hash % capacity_; for (size_t offset = 0; offset < capacity_; ++offset) { const size_t idx = (start_idx + offset) % capacity_; if (slots_[idx].key == kh.key) { return &slots_[idx].value; } if (slots_[idx].key == EMPTY_KEY) { return nullptr; // Key not found } } return nullptr; // Table is full and key not found } const V* get_prehashed(const keyhash& kh) const noexcept { return const_cast(this)->get_prehashed(kh); } V* insert(key_type key, const V& value) noexcept { return insert_prehashed(compute_hash(key), value); } V* insert_prehashed(const keyhash& kh, const V& value) noexcept { // Check if we need to rehash if (capacity_ == 0 || static_cast(size_ + 1) / static_cast(capacity_) > MAX_LOAD_FACTOR) { if (!rehash(capacity_ ? capacity_ * 2 : initial_capacity_)) { return nullptr; } } return insert_internal(kh, value); } V* get_or_insert(key_type key, const V& default_value, bool& is_new) noexcept { return get_or_insert_prehashed(compute_hash(key), default_value, is_new); } V* get_or_insert_prehashed(const keyhash& kh, const V& default_value, bool& is_new) noexcept { if (!slots_) { is_new = true; return insert_prehashed(kh, default_value); } else { // First try to find existing entry const size_t start_idx = kh.hash % capacity_; for (size_t offset = 0; offset < capacity_; ++offset) { const size_t idx = (start_idx + offset) % capacity_; if (slots_[idx].key == kh.key) { is_new = false; return &slots_[idx].value; } if (slots_[idx].key == EMPTY_KEY) { // Key not found, insert new entry // Check if we need to rehash first is_new = true; return insert_prehashed(kh, default_value); } } return nullptr; // Table is full (should not happen with proper load factor) } } void clear() noexcept { if (slots_) { for (size_t i = 0; i < capacity_; ++i) { slots_[i].key = EMPTY_KEY; } size_ = 0; } } class iterator { public: using value_type = entry; using pointer = entry*; using reference = entry&; using difference_type = std::ptrdiff_t; iterator() noexcept = default; reference operator*() const noexcept { return *slot_; } pointer operator->() const noexcept { return slot_; } iterator& operator++() noexcept { ++slot_; advance_to_valid(); return *this; } iterator operator++(int) noexcept { iterator tmp = *this; ++(*this); return tmp; } bool operator==(const iterator& other) const noexcept { return slot_ == other.slot_; } bool operator!=(const iterator& other) const noexcept { return slot_ != other.slot_; } private: friend class hashtable; iterator(entry* slot, entry* end) noexcept : slot_(slot), end_(end) { advance_to_valid(); } void advance_to_valid() noexcept { while (slot_ != end_ && slot_->key == EMPTY_KEY) { ++slot_; } } entry* slot_ = nullptr; entry* end_ = nullptr; }; iterator begin() noexcept { return !slots_ ? end() : iterator(slots_, slots_ + capacity_); } iterator end() noexcept { return !slots_ ? iterator(nullptr, nullptr) : iterator {slots_ + capacity_, slots_ + capacity_}; } private: void destroy() noexcept { if (slots_ != nullptr) { ngfi::freen(slots_, capacity_); slots_ = nullptr; capacity_ = 0; size_ = 0; } } // Prevent copying hashtable(const hashtable&) = delete; hashtable& operator=(const hashtable&) = delete; /** * Internal insert without load factor check. */ V* insert_internal(const keyhash& kh, const V& value) noexcept { const size_t start_idx = kh.hash % capacity_; for (size_t offset = 0; offset < capacity_; ++offset) { const size_t idx = (start_idx + offset) % capacity_; if (slots_[idx].key == kh.key) { // Update existing memcpy(&slots_[idx].value, &value, sizeof(V)); return &slots_[idx].value; } if (slots_[idx].key == EMPTY_KEY) { // Insert new slots_[idx].key = kh.key; memcpy(&slots_[idx].value, &value, sizeof(V)); ++size_; return &slots_[idx].value; } } return nullptr; // Should not happen if load factor is maintained } /** * Rehash the table to a new capacity. */ bool rehash(size_t new_capacity) noexcept { entry* old_slots = slots_; size_t old_capacity = capacity_; slots_ = AllocT::template allocn(new_capacity); if (slots_ == nullptr) { slots_ = old_slots; return false; } capacity_ = new_capacity; size_ = 0; // Initialize new slots as empty for (size_t i = 0; i < capacity_; ++i) { slots_[i].key = EMPTY_KEY; } // Reinsert all existing entries for (size_t i = 0; i < old_capacity; ++i) { if (old_slots[i].key != EMPTY_KEY) { insert_internal(compute_hash(old_slots[i].key), old_slots[i].value); } } AllocT::freen(old_slots, old_capacity); return true; } }; } // namespace ngfi ================================================ FILE: source/ngf-common/internal.cpp ================================================ /** * Copyright (c) 2026 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "nicegraf.h" #include ngf_diagnostic_info ngfi_diag_info = { .verbosity = NGF_DIAGNOSTICS_VERBOSITY_DEFAULT, .userdata = NULL, .callback = NULL}; // Default allocation callbacks. void* ngf_default_alloc(size_t obj_size, size_t nobjs, void*) { return malloc(obj_size * nobjs); } void ngf_default_free(void* ptr, size_t, size_t, void*) { free(ptr); } const ngf_allocation_callbacks NGF_DEFAULT_ALLOC_CB = {ngf_default_alloc, ngf_default_free, NULL}; const ngf_allocation_callbacks* NGF_ALLOC_CB = &NGF_DEFAULT_ALLOC_CB; void ngfi_set_allocation_callbacks(const ngf_allocation_callbacks* callbacks) { if (callbacks == NULL) { NGF_ALLOC_CB = &NGF_DEFAULT_ALLOC_CB; } else { NGF_ALLOC_CB = callbacks; } } ngf_sample_count ngfi_get_highest_sample_count(size_t counts_bitmap) { size_t res = (size_t)NGF_SAMPLE_COUNT_64; while ((res & counts_bitmap) == 0 && res > 1) { res >>= 1; } return (ngf_sample_count)res; } ================================================ FILE: source/ngf-common/macros.h ================================================ /** * Copyright (c) 2026 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once #include "nicegraf.h" #include #include #if defined(_WIN32) || defined(_WIN64) #define NGFI_THREADLOCAL __declspec(thread) #define WIN32_LEAN_AND_MEAN #include // emulate pthread mutexes typedef CRITICAL_SECTION pthread_mutex_t; #define pthread_mutex_lock(m) (EnterCriticalSection(m), 0) #define pthread_mutex_unlock(m) (LeaveCriticalSection(m), 0) #define pthread_mutex_init(m, a) (InitializeCriticalSection(m), 0) #define pthread_mutex_destroy(m) (DeleteCriticalSection(m), 0) // dynamic module loading typedef HMODULE ngfi_module_handle; #else #define NGFI_THREADLOCAL __thread #include // dynamic module loading (emulate win32 api) #define LoadLibraryA(name) dlopen(name, RTLD_NOW) #define GetProcAddress(h, n) dlsym(h, n) typedef void* ngfi_module_handle; #endif // Custom allocation callbacks. extern const ngf_allocation_callbacks* NGF_ALLOC_CB; // Convenience macros for invoking custom memory allocation callbacks. // C++ versions are defined after the template functions below. #ifndef __cplusplus #define NGFI_ALLOC(type) ((type*)NGF_ALLOC_CB->allocate(sizeof(type), 1, NGF_ALLOC_CB->userdata)) #define NGFI_ALLOCN(type, n) ((type*)NGF_ALLOC_CB->allocate(sizeof(type), n, NGF_ALLOC_CB->userdata)) #define NGFI_FREE(ptr) (NGF_ALLOC_CB->free((void*)(ptr), sizeof(*ptr), 1, NGF_ALLOC_CB->userdata)) #define NGFI_FREEN(ptr, n) (NGF_ALLOC_CB->free((void*)(ptr), sizeof(*ptr), n, NGF_ALLOC_CB->userdata)) #endif // Macro for determining size of arrays. #if defined(_MSC_VER) #include #define NGFI_ARRAYSIZE(arr) _countof(arr) #else #define NGFI_ARRAYSIZE(arr) (sizeof(arr) / sizeof(arr[0])) #endif // For when you don't feel like comparing structs field-by-field. #define NGFI_STRUCT_EQ(s1, s2) \ (sizeof(s1) == sizeof(s2) && memcmp((void*)&s1, (void*)&s2, sizeof(s1)) == 0) // It is $CURRENT_YEAR and C does not have a standard thing for this. #define NGFI_MAX(a, b) (a > b ? a : b) #define NGFI_MIN(a, b) (a < b ? a : b) // For fixing unreferenced parameter warnings. #define NGFI_IGNORE_VAR(name) \ { (void)name; } // MSVC warnings that are safe to ignore. #pragma warning(disable : 4201) #pragma warning(disable : 4200) #pragma warning(disable : 4204) #pragma warning(disable : 4221) extern ngf_diagnostic_info ngfi_diag_info; // Invoke diagnostic message callback directly. #define NGFI_DIAG_MSG(level, fmt, ...) \ if (ngfi_diag_info.callback) { \ ngfi_diag_info.callback(level, ngfi_diag_info.userdata, fmt, ##__VA_ARGS__); \ } #define NGFI_DIAG_INFO(fmt, ...) NGFI_DIAG_MSG(NGF_DIAGNOSTIC_INFO, fmt, ##__VA_ARGS__) #define NGFI_DIAG_WARNING(fmt, ...) NGFI_DIAG_MSG(NGF_DIAGNOSTIC_WARNING, fmt, ##__VA_ARGS__) #define NGFI_DIAG_ERROR(fmt, ...) NGFI_DIAG_MSG(NGF_DIAGNOSTIC_ERROR, fmt, ##__VA_ARGS__) // Convenience macro to invoke diagnostic callback and raise error on unmet precondition. #define NGFI_CHECK_CONDITION(cond, err_code, err_fmtstring, ...) \ if (!(cond)) { \ NGFI_DIAG_ERROR(err_fmtstring, ##__VA_ARGS__); \ return err_code; \ } // Convenience macro to immediately die on an unmet precondition. #define NGFI_CHECK_FATAL(cond, err_fmtstring, ...) \ if (!(cond)) { \ NGFI_DIAG_ERROR(err_fmtstring, ##__VA_ARGS__); \ exit(1); \ } typedef long double ngfi_max_align_t; #define NGFI_MAX_ALIGNMENT (sizeof(ngfi_max_align_t)) static inline size_t ngfi_align_size(size_t s) { static const size_t align_mask = NGFI_MAX_ALIGNMENT - 1u; const size_t q = s & (~align_mask); const size_t r = s & align_mask; return q + ((r == 0) ? 0 : NGFI_MAX_ALIGNMENT); } typedef struct ngfi_range { size_t first_idx; size_t last_idx; } ngfi_range; void ngfi_set_allocation_callbacks(const ngf_allocation_callbacks* callbacks); #ifdef __cplusplus #include #include "ngf-common/util.h" namespace ngfi { template T* alloc(Args&&... arg) noexcept { T* ptr = static_cast(NGF_ALLOC_CB->allocate(sizeof(T), 1, NGF_ALLOC_CB->userdata)); if (ptr != nullptr) { new (ptr) T(ngfi::forward(arg)...); } return ptr; } template T* allocn(size_t n) noexcept { if (n == 0) return nullptr; T* ptr = static_cast(NGF_ALLOC_CB->allocate(sizeof(T), n, NGF_ALLOC_CB->userdata)); if (ptr != nullptr) { for (size_t i = 0; i < n; ++i) { new (&ptr[i]) T(); } } return ptr; } template void free(T* ptr) noexcept { if (ptr != nullptr) { if constexpr (! __is_trivially_copyable(T)) { ptr->~T(); } NGF_ALLOC_CB->free(ptr, sizeof(T), 1, NGF_ALLOC_CB->userdata); } } template void freen(T* ptr, size_t n) noexcept { if (ptr != nullptr) { if constexpr (!__is_trivially_copyable(T)) { for (size_t i = 0; i < n; ++i) { ptr[i].~T(); } } NGF_ALLOC_CB->free((void*)ptr, sizeof(T), n, NGF_ALLOC_CB->userdata); } } struct configured_alloc_callbacks { template static T* alloc() noexcept { return ::ngfi::alloc(); } template static T* allocn(size_t n) noexcept { return ::ngfi::allocn(n); } template static void free(T* ptr) noexcept { ::ngfi::free(ptr); } template static void freen(T* ptr, size_t n) noexcept { ::ngfi::freen(ptr, n); } }; struct system_alloc_callbacks { template static T* alloc() noexcept { return new T{}; } template static T* allocn(size_t n) noexcept { return new T[n]; } template static void free(T* ptr) noexcept { delete ptr; } template static void freen(T* ptr, size_t) noexcept { delete[] ptr; } }; } // namespace ngfi // C++ versions of allocation macros that use the template functions. #define NGFI_ALLOC(type) (ngfi::alloc()) #define NGFI_ALLOCN(type, n) (ngfi::allocn(n)) #define NGFI_FREE(ptr) (ngfi::free(ptr)) #define NGFI_FREEN(ptr, n) (ngfi::freen(ptr, n)) #endif ================================================ FILE: source/ngf-common/silence.h ================================================ #pragma once #ifndef _CRT_SECURE_NO_WARNINGS #define _CRT_SECURE_NO_WARNINGS #endif #ifdef __clang__ #pragma clang diagnostic ignored "-Wnullability-completeness" #if __has_warning("-Wcast-function-type-mismatch") #pragma clang diagnostic ignored "-Wcast-function-type-mismatch" #endif #endif ================================================ FILE: source/ngf-common/unique-ptr.h ================================================ #pragma once #include "macros.h" namespace ngfi { template class unique_ptr { private: T* obj_ = nullptr; public: unique_ptr() noexcept = default; unique_ptr(T* obj) : obj_ {obj} { } ~unique_ptr() noexcept { destroy(); } unique_ptr(unique_ptr&& other) { *this = ngfi::move(other); } unique_ptr(const unique_ptr&) = delete; unique_ptr& operator=(unique_ptr&& other) { destroy(); obj_ = other.obj_; other.obj_ = nullptr; return *this; } unique_ptr& operator=(const unique_ptr&) = delete; T* release() noexcept { auto r = obj_; obj_ = nullptr; return r; } T* get() noexcept { return obj_; } const T* get() const noexcept { return obj_; } T* operator->() noexcept { return get(); } operator bool() const noexcept { return obj_ != nullptr; } template static unique_ptr make(Args&&... args) { return unique_ptr {ngfi::alloc(ngfi::forward(args)...)}; } private: void destroy() noexcept { if (obj_) ngfi::free(obj_); } }; } // namespace ngfi ================================================ FILE: source/ngf-common/util.c ================================================ /** * Copyright (c) 2021 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "ngf-common/macros.h" #include "nicegraf-util.h" #include #include #if defined(_WIN32) || defined(_WIN64) #pragma comment(lib, "ws2_32.lib") #include #else #include #endif void ngf_util_create_default_graphics_pipeline_data(ngf_util_graphics_pipeline_data* result) { ngf_stencil_info default_stencil = { .fail_op = NGF_STENCIL_OP_KEEP, .pass_op = NGF_STENCIL_OP_KEEP, .depth_fail_op = NGF_STENCIL_OP_KEEP, .compare_op = NGF_COMPARE_OP_EQUAL, .compare_mask = 0, .write_mask = 0, .reference = 0}; ngf_depth_stencil_info dsi = { .stencil_test = false, .depth_test = false, .depth_write = false, .depth_compare = NGF_COMPARE_OP_LESS, .front_stencil = default_stencil, .back_stencil = default_stencil}; result->depth_stencil_info = dsi; ngf_vertex_input_info vii = {.nattribs = 0, .nvert_buf_bindings = 0}; result->vertex_input_info = vii; ngf_multisample_info msi = {.sample_count = NGF_SAMPLE_COUNT_1, .alpha_to_coverage = false}; result->multisample_info = msi; ngf_rasterization_info ri = { .cull_mode = NGF_CULL_MODE_BACK, .discard = false, .front_face = NGF_FRONT_FACE_COUNTER_CLOCKWISE, .polygon_mode = NGF_POLYGON_MODE_FILL}; result->rasterization_info = ri; ngf_specialization_info spi = { .specializations = NULL, .nspecializations = 0u, .value_buffer = NULL}; result->spec_info = spi; ngf_input_assembly_info iai = { .enable_primitive_restart = false, .primitive_topology = NGF_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST}; result->input_assembly_info = iai; ngf_graphics_pipeline_info gpi = { .color_attachment_blend_states = NULL, .depth_stencil = &result->depth_stencil_info, .input_info = &result->vertex_input_info, .multisample = &result->multisample_info, .input_assembly_info = &result->input_assembly_info, .shader_stages = {NULL}, .nshader_stages = 0u, .rasterization = &result->rasterization_info, .spec_info = &result->spec_info, .debug_name = NULL}; result->pipeline_info = gpi; } const char* ngf_util_get_error_name(const ngf_error err) { static const char* ngf_error_names[] = { "NGF_ERROR_OK", "NGF_ERROR_OUT_OF_MEM", "NGF_ERROR_OBJECT_CREATION_FAILED", "NGF_ERROR_OUT_OF_BOUNDS", "NGF_ERROR_INVALID_FORMAT", "NGF_ERROR_INVALID_SIZE", "NGF_ERROR_INVALID_ENUM", "NGF_ERROR_INVALID_OPERATION"}; if ((size_t)err > NGFI_ARRAYSIZE(ngf_error_names)) { return "invalid error code"; } return ngf_error_names[err]; } ================================================ FILE: source/ngf-common/util.h ================================================ /** * Copyright (c) 2025 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #pragma once namespace ngfi { /** * Remove reference from a type. */ template struct remove_reference { using type = T; }; template struct remove_reference { using type = T; }; template struct remove_reference { using type = T; }; template using remove_reference_t = typename remove_reference::type; /** * Cast to rvalue reference to enable move semantics. * Equivalent to std::move. */ template constexpr remove_reference_t&& move(T&& t) noexcept { return static_cast&&>(t); } template constexpr T&& forward(typename remove_reference::type& t) noexcept { return static_cast(t); } template constexpr T&& forward(typename remove_reference::type&& t) noexcept { return static_cast(t); } } // namespace ngfi ================================================ FILE: source/ngf-common/value-or-error.h ================================================ #pragma once #include "ngf-common/util.h" #include namespace ngfi { template ErrorT missing_value_error() noexcept; template ErrorT non_error() noexcept; template class value_or_error { private: alignas(ValueT) char value_[sizeof(ValueT)]; ErrorT error_; public: value_or_error(const ValueT& v) noexcept : error_{non_error()} { new (value_) ValueT { v }; } value_or_error(ValueT&& v) noexcept : error_{non_error()} { new (value_) ValueT {ngfi::move(v)}; } value_or_error(ErrorT err) noexcept : error_ {err} { if (error_ == non_error()) abort(); } value_or_error(value_or_error&& other) { *this = ngfi::move(other); } value_or_error(const value_or_error&) = delete; ~value_or_error() noexcept { maybe_destroy_value(); } bool has_error() const noexcept { return error_ != non_error(); } ErrorT error() const noexcept { return error_; } const ValueT& value() const noexcept { if (has_error()) { abort(); } return *((const ValueT*)value_); } ValueT& value() noexcept { if (has_error()) abort(); return *((ValueT*)value_); } value_or_error& operator=(value_or_error&& other) { maybe_destroy_value(); if (other.has_error()) { error_ = other.error(); } else { new (value_) ValueT {ngfi::move(other.value())}; error_ = non_error(); other.error_ = missing_value_error(); } return *this; } value_or_error& operator=(const value_or_error&) = delete; bool has_value() const noexcept { return !has_error(); } operator bool() const noexcept { return has_value(); } private: void maybe_destroy_value() noexcept { if (!has_error()) { ((ValueT*)value_)->~ValueT(); error_ = missing_value_error(); } } }; template using value_or_ngferr = value_or_error; template using maybe_ngfptr = value_or_error, ngf_error>; template<> ngf_error missing_value_error() noexcept { return NGF_ERROR_INVALID_OPERATION; } template<> ngf_error non_error() noexcept { return NGF_ERROR_OK; } } // namespace ngfi ================================================ FILE: source/ngf-mtl/impl.cpp ================================================ /** * Copyright (c) 2026 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "ngf-common/array.h" #include "ngf-common/cmdbuf-state.h" #include "ngf-common/default-arenas.h" #include "ngf-common/macros.h" #include "ngf-common/unique-ptr.h" #include "ngf-common/value-or-error.h" #include "nicegraf-mtl-handles.h" #include "nicegraf-wrappers.h" #include "nicegraf.h" #define NS_PRIVATE_IMPLEMENTATION #define MTL_PRIVATE_IMPLEMENTATION #define CA_PRIVATE_IMPLEMENTATION #include // Indicates the maximum amount of buffers (attrib, index and uniform) that // can be bound at the same time. // This is required to work around a discrepancy between nicegraf's and Metal's // buffer binding models. // In Metal, bindings for vertex attribute buffers share the same space of IDs // with regular buffers. Therefore assigning binding 0 to a vertex // attrib buffer would cause a conflict if a vertex shader also requires a // uniform buffer bound at 0. // In order to solve this, attribute buffer bindings are remapped in the // following way: // nicegraf's attrib binding 0 becomes Metal vertex buffer binding 30 // attrib binding 1 becomes Metal vertex buffer binding 29 // ...and so on. // NOTE: the specific value 30 is based on the max total number of buffer bindings // specified in metal feature set tables. // TODO: consider using information from pipeline metadata to use an alternative // remapping scheme: attrib binding 0 -> N; attrib binding 1 -> N+1;... // etc. where N is the total number of uniform buffers consumed by the // vertex stage. static constexpr uint32_t MAX_BUFFER_BINDINGS = 30u; // Metal device handle. We choose one upon initialization and always use that // one. MTL::Device* MTL_DEVICE = nullptr; ngf_device_capabilities DEVICE_CAPS; #pragma mark ngf_enum_maps static MTL::BlendFactor get_mtl_blend_factor(ngf_blend_factor f) { static constexpr MTL::BlendFactor factors[NGF_BLEND_FACTOR_COUNT] = { MTL::BlendFactorZero, MTL::BlendFactorOne, MTL::BlendFactorSourceColor, MTL::BlendFactorOneMinusSourceColor, MTL::BlendFactorDestinationColor, MTL::BlendFactorOneMinusDestinationColor, MTL::BlendFactorSourceAlpha, MTL::BlendFactorOneMinusSourceAlpha, MTL::BlendFactorDestinationAlpha, MTL::BlendFactorOneMinusDestinationAlpha, MTL::BlendFactorBlendColor, MTL::BlendFactorOneMinusBlendColor, MTL::BlendFactorBlendAlpha, MTL::BlendFactorOneMinusBlendAlpha}; return factors[f]; } static MTL::BlendOperation get_mtl_blend_operation(ngf_blend_op op) { static constexpr MTL::BlendOperation ops[NGF_BLEND_OP_COUNT] = { MTL::BlendOperationAdd, MTL::BlendOperationSubtract, MTL::BlendOperationReverseSubtract, MTL::BlendOperationMin, MTL::BlendOperationMax}; return ops[op]; } struct mtl_format { const MTL::PixelFormat format = MTL::PixelFormatInvalid; const uint8_t bits_per_block = 0; const bool srgb = false; const uint8_t block_width = 1; const uint8_t block_height = 1; }; static mtl_format get_mtl_pixel_format(ngf_image_format f) { static const mtl_format formats[NGF_IMAGE_FORMAT_COUNT] = { {MTL::PixelFormatR8Unorm, 8}, {MTL::PixelFormatRG8Unorm, 16}, {MTL::PixelFormatRG8Snorm, 16}, {}, // RGB8, unsupported {MTL::PixelFormatRGBA8Unorm, 32}, {}, // SRGB8, unsupported {MTL::PixelFormatRGBA8Unorm_sRGB, 32, true}, {}, // BGR8, unsupported {MTL::PixelFormatBGRA8Unorm, 32}, {}, // BGR8_SRGB, unsupported {MTL::PixelFormatBGRA8Unorm_sRGB, 32, true}, {MTL::PixelFormatRGB10A2Unorm, 32}, {MTL::PixelFormatR32Float, 32}, {MTL::PixelFormatRG32Float, 64}, {}, // RGB32F, unsupported {MTL::PixelFormatRGBA32Float, 128}, {MTL::PixelFormatR16Float, 16}, {MTL::PixelFormatRG16Float, 32}, {}, // RGB16F, unsupported {MTL::PixelFormatRGBA16Float, 64}, {MTL::PixelFormatRG11B10Float, 32}, {MTL::PixelFormatRGB9E5Float, 32}, {MTL::PixelFormatR16Unorm, 16}, {MTL::PixelFormatR16Snorm, 16}, {MTL::PixelFormatRG16Unorm, 32}, {MTL::PixelFormatRG16Snorm, 32}, {MTL::PixelFormatRGBA16Unorm, 64}, {MTL::PixelFormatRGBA16Snorm, 64}, {MTL::PixelFormatR8Uint, 8}, {MTL::PixelFormatR8Sint, 8}, {MTL::PixelFormatR16Uint, 16}, {MTL::PixelFormatR16Sint, 16}, {MTL::PixelFormatRG16Uint, 32}, {}, // RGB16U, unsupported {MTL::PixelFormatRGBA16Uint, 64}, {MTL::PixelFormatR32Uint, 32}, {MTL::PixelFormatRG32Uint, 64}, {}, // RGB32U, unsupported {MTL::PixelFormatRGBA32Uint, 128}, #if TARGET_OS_OSX {MTL::PixelFormatBC7_RGBAUnorm, 128, false, 4, 4}, {MTL::PixelFormatBC7_RGBAUnorm_sRGB, 128, true, 4, 4}, {MTL::PixelFormatBC6H_RGBFloat, 128, false, 4, 4}, {MTL::PixelFormatBC6H_RGBUfloat, 128, false, 4, 4}, {MTL::PixelFormatBC5_RGUnorm, 128, false, 4, 4}, {MTL::PixelFormatBC5_RGSnorm, 128, false, 4, 4}, #else // BCn formats unsupported un iOS until 16.4 {}, {}, {}, {}, {}, {}, #endif #if TARGET_OS_OSX && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 110000 // ASTC is not supported till macOS 11.0 {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, #else {MTL::PixelFormatASTC_4x4_LDR, 128, false, 4, 4}, {MTL::PixelFormatASTC_4x4_sRGB, 128, true, 4, 4}, {MTL::PixelFormatASTC_5x4_LDR, 128, false, 5, 4}, {MTL::PixelFormatASTC_5x4_sRGB, 128, true, 5, 4}, {MTL::PixelFormatASTC_5x5_LDR, 128, false, 5, 5}, {MTL::PixelFormatASTC_5x5_sRGB, 128, true, 5, 5}, {MTL::PixelFormatASTC_6x5_LDR, 128, false, 6, 5}, {MTL::PixelFormatASTC_6x5_sRGB, 128, true, 6, 5}, {MTL::PixelFormatASTC_6x6_LDR, 128, false, 6, 6}, {MTL::PixelFormatASTC_6x6_sRGB, 128, true, 6, 6}, {MTL::PixelFormatASTC_8x5_LDR, 128, false, 8, 5}, {MTL::PixelFormatASTC_8x5_sRGB, 128, true, 8, 5}, {MTL::PixelFormatASTC_8x6_LDR, 128, false, 8, 6}, {MTL::PixelFormatASTC_8x6_sRGB, 128, true, 8, 6}, {MTL::PixelFormatASTC_8x8_LDR, 128, false, 8, 8}, {MTL::PixelFormatASTC_8x8_sRGB, 128, true, 8, 8}, {MTL::PixelFormatASTC_10x5_LDR, 128, false, 10, 5}, {MTL::PixelFormatASTC_10x5_sRGB, 128, true, 10, 5}, {MTL::PixelFormatASTC_10x6_LDR, 128, false, 10, 6}, {MTL::PixelFormatASTC_10x6_sRGB, 128, true, 10, 6}, {MTL::PixelFormatASTC_10x8_LDR, 128, false, 10, 8}, {MTL::PixelFormatASTC_10x8_sRGB, 128, true, 10, 8}, {MTL::PixelFormatASTC_10x10_LDR, 128, false, 10, 10}, {MTL::PixelFormatASTC_10x10_sRGB, 128, true, 10, 10}, {MTL::PixelFormatASTC_12x10_LDR, 128, false, 12, 10}, {MTL::PixelFormatASTC_12x10_sRGB, 128, true, 12, 10}, {MTL::PixelFormatASTC_12x12_LDR, 128, false, 12, 12}, {MTL::PixelFormatASTC_12x12_sRGB, 128, true, 12, 12}, #endif {MTL::PixelFormatDepth32Float, 32}, {MTL::PixelFormatDepth16Unorm, 16}, {MTL::PixelFormatDepth32Float_Stencil8, 32}, // Emulate DEPTH24_STENCIL8 on iOS {} }; return formats[f]; } static MTL::LoadAction get_mtl_load_action(ngf_attachment_load_op op) { static const MTL::LoadAction action[NGF_LOAD_OP_COUNT] = { MTL::LoadActionDontCare, MTL::LoadActionLoad, MTL::LoadActionClear}; return action[op]; } static MTL::StoreAction get_mtl_store_action(ngf_attachment_store_op op) { static const MTL::StoreAction action[NGF_STORE_OP_COUNT] = { MTL::StoreActionDontCare, MTL::StoreActionStore, MTL::StoreActionMultisampleResolve}; return action[op]; } static MTL::DataType get_mtl_type(ngf_type type) { static const MTL::DataType types[NGF_TYPE_COUNT] = { MTL::DataTypeNone, /* Int8, Metal does not support.*/ MTL::DataTypeNone, /*UInt8, Metal does not support*/ MTL::DataTypeShort, MTL::DataTypeUShort, MTL::DataTypeInt, MTL::DataTypeUInt, MTL::DataTypeFloat, MTL::DataTypeHalf, MTL::DataTypeNone /* Double,Metal does not support.*/ }; return types[type]; } static MTL::VertexFormat get_mtl_attrib_format(ngf_type type, uint32_t size, bool normalized) { static const MTL::VertexFormat formats[NGF_TYPE_COUNT][2][4] = { {{MTL::VertexFormatChar, MTL::VertexFormatChar2, MTL::VertexFormatChar3, MTL::VertexFormatChar4}, {MTL::VertexFormatCharNormalized, MTL::VertexFormatChar2Normalized, MTL::VertexFormatChar3Normalized, MTL::VertexFormatChar4Normalized}}, {{MTL::VertexFormatUChar, MTL::VertexFormatUChar2, MTL::VertexFormatUChar3, MTL::VertexFormatUChar4}, {MTL::VertexFormatUCharNormalized, MTL::VertexFormatUChar2Normalized, MTL::VertexFormatUChar3Normalized, MTL::VertexFormatUChar4Normalized}}, {{MTL::VertexFormatShort, MTL::VertexFormatShort2, MTL::VertexFormatShort3, MTL::VertexFormatShort4}, {MTL::VertexFormatShortNormalized, MTL::VertexFormatShort2Normalized, MTL::VertexFormatShort3Normalized, MTL::VertexFormatShort4Normalized}}, {{MTL::VertexFormatUShort, MTL::VertexFormatUShort2, MTL::VertexFormatUShort3, MTL::VertexFormatUShort4}, {MTL::VertexFormatUShortNormalized, MTL::VertexFormatUShort2Normalized, MTL::VertexFormatUShort3Normalized, MTL::VertexFormatUShort4Normalized}}, {{MTL::VertexFormatInt, MTL::VertexFormatInt2, MTL::VertexFormatInt3, MTL::VertexFormatInt4}, {MTL::VertexFormatInvalid, MTL::VertexFormatInvalid, MTL::VertexFormatInvalid, MTL::VertexFormatInvalid}}, {{MTL::VertexFormatUInt, MTL::VertexFormatUInt2, MTL::VertexFormatUInt3, MTL::VertexFormatUInt4}, {MTL::VertexFormatInvalid, MTL::VertexFormatInvalid, MTL::VertexFormatInvalid, MTL::VertexFormatInvalid}}, {{MTL::VertexFormatFloat, MTL::VertexFormatFloat2, MTL::VertexFormatFloat3, MTL::VertexFormatFloat4}, {MTL::VertexFormatInvalid, MTL::VertexFormatInvalid, MTL::VertexFormatInvalid, MTL::VertexFormatInvalid}}, {{MTL::VertexFormatHalf, MTL::VertexFormatHalf2, MTL::VertexFormatHalf3, MTL::VertexFormatHalf4}, {MTL::VertexFormatInvalid, MTL::VertexFormatInvalid, MTL::VertexFormatInvalid, MTL::VertexFormatInvalid}}, {{MTL::VertexFormatInvalid, MTL::VertexFormatInvalid, // Double, Metal does not support. MTL::VertexFormatInvalid, MTL::VertexFormatInvalid}, {MTL::VertexFormatInvalid, MTL::VertexFormatInvalid, MTL::VertexFormatInvalid, MTL::VertexFormatInvalid}}}; assert(size <= 4u && size > 0u); return formats[type][normalized ? 1 : 0][size - 1u]; } static MTL::VertexStepFunction get_mtl_step_function(ngf_vertex_input_rate rate) { static const MTL::VertexStepFunction funcs[NGF_VERTEX_INPUT_RATE_COUNT] = { MTL::VertexStepFunctionPerVertex, MTL::VertexStepFunctionPerInstance}; return funcs[rate]; } static MTL::PrimitiveTopologyClass get_mtl_primitive_topology_class(ngf_primitive_topology t) { static const MTL::PrimitiveTopologyClass topo_class[NGF_PRIMITIVE_TOPOLOGY_COUNT] = { MTL::PrimitiveTopologyClassTriangle, MTL::PrimitiveTopologyClassTriangle, MTL::PrimitiveTopologyClassLine, MTL::PrimitiveTopologyClassLine, }; return topo_class[t]; } static MTL::PrimitiveType get_mtl_primitive_type(ngf_primitive_topology type) { static const MTL::PrimitiveType types[NGF_PRIMITIVE_TOPOLOGY_COUNT] = { MTL::PrimitiveTypeTriangle, MTL::PrimitiveTypeTriangleStrip, MTL::PrimitiveTypeLine, MTL::PrimitiveTypeLineStrip}; return types[type]; } static MTL::IndexType get_mtl_index_type(ngf_type type) { assert(type == NGF_TYPE_UINT16 || type == NGF_TYPE_UINT32); return type == NGF_TYPE_UINT16 ? MTL::IndexTypeUInt16 : MTL::IndexTypeUInt32; } static MTL::CompareFunction get_mtl_compare_function(ngf_compare_op op) { static const MTL::CompareFunction compare_fns[NGF_COMPARE_OP_COUNT] = { MTL::CompareFunctionNever, MTL::CompareFunctionLess, MTL::CompareFunctionLessEqual, MTL::CompareFunctionEqual, MTL::CompareFunctionGreaterEqual, MTL::CompareFunctionGreater, MTL::CompareFunctionNotEqual, MTL::CompareFunctionAlways}; return compare_fns[op]; } static MTL::StencilOperation get_mtl_stencil_op(ngf_stencil_op op) { static const MTL::StencilOperation stencil_ops[NGF_STENCIL_OP_COUNT] = { MTL::StencilOperationKeep, MTL::StencilOperationZero, MTL::StencilOperationReplace, MTL::StencilOperationIncrementClamp, MTL::StencilOperationIncrementWrap, MTL::StencilOperationDecrementClamp, MTL::StencilOperationDecrementWrap, MTL::StencilOperationInvert}; return stencil_ops[op]; } static MTL::CullMode get_mtl_culling(ngf_cull_mode c) { static const MTL::CullMode cull_modes[NGF_CULL_MODE_COUNT] = { MTL::CullModeBack, MTL::CullModeFront, MTL::CullModeNone, /* Metal has no front + back culling */ MTL::CullModeNone}; return cull_modes[c]; } static MTL::Winding get_mtl_winding(ngf_front_face_mode w) { static const MTL::Winding windings[NGF_FRONT_FACE_COUNT] = { MTL::WindingCounterClockwise, MTL::WindingClockwise}; return windings[w]; } static ngfi::value_or_ngferr get_mtl_texture_type(ngf_image_type type, uint32_t nlayers, ngf_sample_count sample_count) { if (type == NGF_IMAGE_TYPE_IMAGE_2D && nlayers == 1 && sample_count == NGF_SAMPLE_COUNT_1) { return MTL::TextureType2D; } else if (type == NGF_IMAGE_TYPE_IMAGE_2D && nlayers > 1 && sample_count == NGF_SAMPLE_COUNT_1) { return MTL::TextureType2DArray; } if (type == NGF_IMAGE_TYPE_IMAGE_2D && nlayers == 1 && sample_count != NGF_SAMPLE_COUNT_1) { return MTL::TextureType2DMultisample; } else if (type == NGF_IMAGE_TYPE_IMAGE_2D && nlayers > 1 && sample_count != NGF_SAMPLE_COUNT_1) { if (__builtin_available(iOS 14.0, *)) return MTL::TextureType2DMultisampleArray; } else if (type == NGF_IMAGE_TYPE_IMAGE_3D) { return MTL::TextureType3D; } else if (type == NGF_IMAGE_TYPE_CUBE && nlayers == 1) { return MTL::TextureTypeCube; } else if (type == NGF_IMAGE_TYPE_CUBE && nlayers > 1) { return MTL::TextureTypeCubeArray; } return NGF_ERROR_INVALID_FORMAT; } static ngfi::value_or_ngferr get_mtl_address_mode(ngf_sampler_wrap_mode mode) { static const MTL::SamplerAddressMode modes[NGF_WRAP_MODE_COUNT] = { MTL::SamplerAddressModeClampToEdge, MTL::SamplerAddressModeRepeat, MTL::SamplerAddressModeMirrorRepeat}; return modes[mode]; } static MTL::SamplerMinMagFilter get_mtl_minmag_filter(ngf_sampler_filter f) { static MTL::SamplerMinMagFilter filters[NGF_FILTER_COUNT] = { MTL::SamplerMinMagFilterNearest, MTL::SamplerMinMagFilterLinear}; return filters[f]; } static MTL::SamplerMipFilter get_mtl_mip_filter(ngf_sampler_filter f) { static MTL::SamplerMipFilter filters[NGF_FILTER_COUNT] = { MTL::SamplerMipFilterNearest, MTL::SamplerMipFilterLinear}; return filters[f]; } static uint32_t ngfmtl_get_bytesperpel(const ngf_image_format format) { const mtl_format f = get_mtl_pixel_format(format); assert((f.block_width | f.block_height) == 1); // invalid op for compressed formats return f.bits_per_block / 8; } static uint32_t ngfmtl_get_pitch(const uint32_t width, const ngf_image_format format) { const mtl_format f = get_mtl_pixel_format(format); const bool is_compressed_format = (f.block_width | f.block_height) > 1; return is_compressed_format ? (width + f.block_width - 1) / f.block_width * f.bits_per_block / 8 : width * f.bits_per_block / 8; } static uint32_t ngfmtl_get_num_rows(const uint32_t height, const ngf_image_format format) { const mtl_format f = get_mtl_pixel_format(format); const bool is_compressed_format = (f.block_width | f.block_height) > 1; return is_compressed_format ? (height + f.block_height - 1) / f.block_height : height; } #pragma mark ngf_struct_definitions enum ngf_id_init_types { id_default }; // Shared pointer for managed objects template class ngf_id { public: // Create an ngf_id with an additional retain count // Useful for keeping AutoReleasePool managed objects alive beyond // Their pool lifetime static ngf_id add_retain(T* ptr) { ngf_id res = ptr; if (res) { res->retain(); } return res; } ngf_id() : ptr_(nullptr) { } ngf_id(const ngf_id_init_types& type) : ptr_(T::alloc()->init()) { } // Note: Does NOT increment ref count. You can use this directly after calling // alloc()->init() ngf_id(T* starting_ptr) : ptr_(starting_ptr) { } ~ngf_id() { destroy_if_necessary(); } ngf_id(const ngf_id&) = delete; ngf_id& operator=(const ngf_id&) = delete; ngf_id(ngf_id&& other) : ptr_(nullptr) { *this = ngfi::move(other); } ngf_id& operator=(ngf_id&& other) { destroy_if_necessary(); ptr_ = other.ptr_; other.ptr_ = nullptr; return *this; } T* get() const { return ptr_; } T* operator->() const { return ptr_; } operator bool() const { return ptr_ != nullptr; } private: void destroy_if_necessary() { if (ptr_) { ptr_->release(); } } T* ptr_; }; struct ngf_render_target_t { static ngfi::maybe_ngfptr make(const ngf_render_target_info& info) NGF_NOEXCEPT; static ngfi::maybe_ngfptr make( const ngf_attachment_descriptions& attachment_descs, const ngf_image_ref* img_refs, uint32_t rt_width, uint32_t rt_height) NGF_NOEXCEPT; ~ngf_render_target_t() NGF_NOEXCEPT { if (attachment_descs.descs) { NGFI_FREEN(attachment_descs.descs, attachment_descs.ndescs); } } ngf_attachment_descriptions attachment_descs; ngfi::fixed_array render_image_refs; ngfi::fixed_array resolve_image_refs; uint32_t nrender_attachments = 0u; uint32_t nresolve_attachments = 0u; bool is_default = false; NS::UInteger width; NS::UInteger height; }; struct ngf_cmd_buffer_t { ngfi::cmd_buffer_state state = ngfi::CMD_BUFFER_STATE_NEW; bool renderpass_active = false; bool compute_pass_active = false; bool xfer_pass_active = false; MTL::CommandBuffer* mtl_cmd_buffer = nullptr; MTL::RenderCommandEncoder* active_rce = nullptr; MTL::BlitCommandEncoder* active_bce = nullptr; MTL::ComputeCommandEncoder* active_cce = nullptr; ngf_graphics_pipeline active_gfx_pipe = nullptr; ngf_compute_pipeline active_compute_pipe = nullptr; ngf_render_target active_rt = nullptr; ngf_id bound_index_buffer = nullptr; MTL::IndexType bound_index_buffer_type = MTL::IndexTypeUInt16; size_t bound_index_buffer_offset = 0u; ngf_id sample_buf_attachment_for_next_render_pass = nullptr; ngf_id sample_buf_attachment_for_next_compute_pass = nullptr; // Re-applied via setBytes on every pipeline bind in this encoder; 0 size = none pending. uint32_t pending_pc_size = 0u; uint8_t pending_pc_data[NGF_MAX_ENCODER_INLINE_BYTES] = {}; static ngfi::maybe_ngfptr make(const ngf_cmd_buffer_info&) NGF_NOEXCEPT { return ngfi::unique_ptr::make(); } }; #define NGFMTL_ENC2CMDBUF(enc) ((ngf_cmd_buffer)((void*)enc.pvt_data_donotuse.d0)) struct ngfmtl_niceshade_metadata { ngfi::array> native_binding_map; uint32_t threadgroup_size[3]; // Metal buffer slot for the push-constant block; ~0u if the shader has none. uint32_t push_const_native_binding = ~0u; }; struct ngf_shader_stage_t { ngf_id func_lib = nullptr; ngf_stage_type type; ngfi::fixed_array entry_point_name; ngfi::fixed_array source_code; static ngfi::maybe_ngfptr make(const ngf_shader_stage_info&) NGF_NOEXCEPT; }; struct ngf_graphics_pipeline_t { ngf_id pipeline = nullptr; ngf_id depth_stencil = nullptr; ngf_id depth_stencil_desc = nullptr; uint32_t front_stencil_reference = 0u; uint32_t back_stencil_reference = 0u; MTL::PrimitiveType primitive_type = MTL::PrimitiveTypeTriangle; MTL::Winding winding = MTL::WindingCounterClockwise; MTL::CullMode culling = MTL::CullModeBack; float blend_color[4] {0}; ngfmtl_niceshade_metadata niceshade_metadata; static ngfi::maybe_ngfptr make(const ngf_graphics_pipeline_info&) NGF_NOEXCEPT; }; struct ngf_compute_pipeline_t { ngf_id pipeline = nullptr; ngfmtl_niceshade_metadata niceshade_metadata; static ngfi::maybe_ngfptr make(const ngf_compute_pipeline_info&) NGF_NOEXCEPT; }; struct ngf_buffer_t { ngf_id mtl_buffer = nullptr; size_t mapped_offset = 0; static ngfi::maybe_ngfptr make(const ngf_buffer_info&) NGF_NOEXCEPT; }; struct ngf_texel_buffer_view_t { ngf_id mtl_buffer_view = nullptr; static ngfi::maybe_ngfptr make(const ngf_texel_buffer_view_info&) NGF_NOEXCEPT; }; struct ngf_sampler_t { ngf_id sampler = nullptr; static ngfi::maybe_ngfptr make(const ngf_sampler_info&) NGF_NOEXCEPT; }; struct ngf_image_t { ngf_id texture = nullptr; // Workaround for binding srgb images as writeable storage images. ngf_id non_srgb_view = nullptr; ngf_image_format format; uint32_t usage_flags = 0u; static ngfi::maybe_ngfptr make(const ngf_image_info&) NGF_NOEXCEPT; }; struct ngf_image_view_t { ngf_id view = nullptr; static ngfi::maybe_ngfptr make(const ngf_image_view_info& info) NGF_NOEXCEPT; }; CA::MetalLayer* ngf_layer_add_to_view( MTL::Device* device, uint32_t width, uint32_t height, MTL::PixelFormat pixel_format, ngf_colorspace colorspace, uint32_t capacity_hint, bool display_sync_enabled, bool compute_access_enabled, uintptr_t native_handle); CA::MetalDrawable* ngf_layer_next_drawable(CA::MetalLayer* layer); void ngf_resize_swapchain( CA::MetalLayer* layer, uint32_t width, uint32_t height, uintptr_t native_handle); // Manages the final presentation surfaces. class ngfmtl_swapchain { public: struct frame { MTL::Texture* color_attachment_texture() { return multisample_texture ? multisample_texture : color_drawable->texture(); } MTL::Texture* resolve_attachment_texture() { return multisample_texture ? color_drawable->texture() : nullptr; } MTL::Texture* depth_attachment_texture() { return depth_texture; } CA::MetalDrawable* color_drawable = nullptr; MTL::Texture* depth_texture = nullptr; MTL::Texture* multisample_texture = nullptr; ngf_image_t img_wrapper; }; ngfmtl_swapchain() = default; ngfmtl_swapchain(ngfmtl_swapchain&& other) { *this = ngfi::move(other); } ngfmtl_swapchain& operator=(ngfmtl_swapchain&& other) { layer_ = other.layer_; other.layer_ = nullptr; depth_images_ = ngfi::move(other.depth_images_); capacity_ = other.capacity_; img_idx_ = other.img_idx_; return *this; } // Delete copy ctor and copy assignment to make this type move-only. ngfmtl_swapchain& operator=(const ngfmtl_swapchain&) = delete; ngfmtl_swapchain(const ngfmtl_swapchain&) = delete; ngf_error initialize(const ngf_swapchain_info& swapchain_info, MTL::Device* device) noexcept { // Initialize the Metal layer. pixel_format_ = get_mtl_pixel_format(swapchain_info.color_format).format; if (pixel_format_ == MTL::PixelFormatInvalid) { NGFI_DIAG_ERROR("Image format not supported by Metal backend"); return NGF_ERROR_INVALID_FORMAT; } layer_ = ngf_layer_add_to_view( device, swapchain_info.width, swapchain_info.height, pixel_format_, swapchain_info.colorspace, swapchain_info.capacity_hint, (swapchain_info.present_mode == NGF_PRESENTATION_MODE_FIFO), swapchain_info.enable_compute_access, swapchain_info.native_handle); // Remember the number of images in the swapchain. capacity_ = swapchain_info.capacity_hint; // Initialize depth attachments if necessary. initialize_depth_attachments(swapchain_info); initialize_multisample_images(swapchain_info); compute_access_enabled_ = swapchain_info.enable_compute_access; return NGF_ERROR_OK; } bool compute_access_enabled() const noexcept { return compute_access_enabled_; } ngf_error resize(const ngf_swapchain_info& swapchain_info) { ngf_resize_swapchain( layer_, swapchain_info.width, swapchain_info.height, swapchain_info.native_handle); // ReiInitialize depth attachments & multisample images if necessary. initialize_depth_attachments(swapchain_info); initialize_multisample_images(swapchain_info); return NGF_ERROR_OK; } frame next_frame() { img_idx_ = (img_idx_ + 1u) % capacity_; return { ngf_layer_next_drawable(layer_), depth_images_.size() > 0 ? depth_images_[img_idx_].get() : nullptr, is_multisampled() ? multisample_images_[img_idx_]->texture.get() : nullptr}; } MTL::PixelFormat get_pixel_format() const { return pixel_format_; } operator bool() { return layer_; } bool is_multisampled() const { return !multisample_images_.empty(); } private: void initialize_depth_attachments(const ngf_swapchain_info& swapchain_info) { destroy_depth_attachments(); if (swapchain_info.depth_format != NGF_IMAGE_FORMAT_UNDEFINED) { depth_images_.resize(swapchain_info.capacity_hint); MTL::PixelFormat depth_format = get_mtl_pixel_format(swapchain_info.depth_format).format; // assert(depth_format != MTL::PixelFormatInvalid); for (uint32_t i = 0u; i < swapchain_info.capacity_hint; ++i) { ngf_id depth_texture_desc = id_default; depth_texture_desc->setTextureType( swapchain_info.sample_count > 1u ? MTL::TextureType2DMultisample : MTL::TextureType2D); depth_texture_desc->setWidth(swapchain_info.width); depth_texture_desc->setHeight(swapchain_info.height); depth_texture_desc->setPixelFormat(depth_format); depth_texture_desc->setDepth(1u); depth_texture_desc->setSampleCount((NS::UInteger)swapchain_info.sample_count); depth_texture_desc->setMipmapLevelCount(1u); depth_texture_desc->setArrayLength(1u); depth_texture_desc->setUsage(MTL::TextureUsageRenderTarget); depth_texture_desc->setStorageMode(MTL::StorageModePrivate); depth_texture_desc->setResourceOptions(MTL::ResourceStorageModePrivate); if (__builtin_available(macOS 10.14, *)) { depth_texture_desc->setAllowGPUOptimizedContents(true); } depth_images_[i] = MTL_DEVICE->newTexture(depth_texture_desc.get()); } } } void destroy_depth_attachments() { depth_images_.resize(0); } void initialize_multisample_images(const ngf_swapchain_info& swapchain_info) { destroy_multisample_images(); if (swapchain_info.sample_count > NGF_SAMPLE_COUNT_1) { multisample_images_.resize(capacity_); for (size_t i = 0; i < capacity_; ++i) { const ngf_image_info info = { .type = NGF_IMAGE_TYPE_IMAGE_2D, .extent = {.width = swapchain_info.width, .height = swapchain_info.height, .depth = 1u}, .nmips = 1u, .nlayers = 1u, .format = swapchain_info.color_format, .sample_count = (ngf_sample_count)swapchain_info.sample_count, .usage_hint = NGF_IMAGE_USAGE_ATTACHMENT}; ngf_create_image(&info, &multisample_images_[i]); } } } void destroy_multisample_images() { assert(multisample_images_.empty() || capacity_ == multisample_images_.size()); for (size_t i = 0; i < multisample_images_.size(); ++i) { ngf_destroy_image(multisample_images_[i]); } multisample_images_.resize(0); } CA::MetalLayer* layer_ = nullptr; uint32_t img_idx_ = 0u; uint32_t capacity_ = 0u; ngfi::array> depth_images_; ngfi::array multisample_images_; MTL::PixelFormat pixel_format_; bool compute_access_enabled_; }; struct ngf_context_t { ngf_id device = nullptr; ngfmtl_swapchain swapchain; ngfmtl_swapchain::frame frame; ngf_id queue = nullptr; bool is_current = false; ngf_swapchain_info swapchain_info; MTL::CommandBuffer* pending_cmd_buffer = nullptr; ngf_id last_cmd_buffer = nullptr; dispatch_semaphore_t frame_sync_sem = nullptr; ngf_render_target default_rt; static ngfi::maybe_ngfptr make(const ngf_context_info&) NGF_NOEXCEPT; ~ngf_context_t() NGF_NOEXCEPT { if (last_cmd_buffer) { last_cmd_buffer->waitUntilCompleted(); } } }; constexpr MTL::GPUFamily NGFMTL_GPU_FAMILIES[] = { MTL::GPUFamilyCommon1, MTL::GPUFamilyCommon2, MTL::GPUFamilyApple1, MTL::GPUFamilyApple2, MTL::GPUFamilyApple3, MTL::GPUFamilyApple4, MTL::GPUFamilyApple5, MTL::GPUFamilyApple6, MTL::GPUFamilyApple7, MTL::GPUFamilyMac2, MTL::GPUFamilyMac2, MTL::GPUFamilyMac2, MTL::GPUFamilyMac2, }; constexpr size_t NGFMTL_NUM_GPU_FAMILIES = sizeof(NGFMTL_GPU_FAMILIES) / sizeof(MTL::GPUFamily); static constexpr size_t ngfmtl_gpufam_idx(MTL::GPUFamily fam) { for (size_t i = 0; i < NGFMTL_NUM_GPU_FAMILIES; ++i) if (NGFMTL_GPU_FAMILIES[i] == fam) return i; return 0; } static size_t ngfmtl_max_supported_gpu_family(MTL::Device* mtldev) { for (size_t fam_idx = NGFMTL_NUM_GPU_FAMILIES - 1; fam_idx >= 0; --fam_idx) if (mtldev->supportsFamily(NGFMTL_GPU_FAMILIES[fam_idx])) return fam_idx; return 0; } void ngfi_set_allocation_callbacks(const ngf_allocation_callbacks* callbacks); ngf_sample_count ngfi_get_highest_sample_count(size_t counts_bitmap); static void ngfmtl_populate_ngf_device(uint32_t handle, ngf_device& ngfdev, MTL::Device* mtldev) { ngfdev.handle = handle; #if TARGET_OS_OSX ngfdev.performance_tier = mtldev->lowPower() ? NGF_DEVICE_PERFORMANCE_TIER_LOW : NGF_DEVICE_PERFORMANCE_TIER_HIGH; #else ngfdev.performance_tier = NGF_DEVICE_PERFORMANCE_TIER_UNKNOWN; #endif const size_t device_name_length = mtldev->name()->length(); strncpy( ngfdev.name, mtldev->name()->utf8String(), NGFI_MIN(NGF_DEVICE_NAME_MAX_LENGTH, device_name_length)); ngf_device_capabilities& caps = ngfdev.capabilities; const size_t gpu_family_idx = ngfmtl_max_supported_gpu_family(mtldev); caps.clipspace_z_zero_to_one = true; caps.max_vertex_input_attributes_per_pipeline = 31; caps.max_uniform_buffers_per_stage = 31; caps.max_sampler_anisotropy = 16.0f; caps.max_samplers_per_stage = 16; caps.max_3d_image_dimension = 2048; caps.max_image_layers = 2048; caps.max_uniform_buffer_range = NGF_DEVICE_LIMIT_UNKNOWN; caps.device_local_memory_is_host_visible = mtldev->hasUnifiedMemory(); if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple6)) { caps.max_sampled_images_per_stage = 128; } else if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple4)) { caps.max_sampled_images_per_stage = 96; } else { caps.max_sampled_images_per_stage = 31; } if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple4)) { caps.max_fragment_input_components = 124; } else { caps.max_fragment_input_components = 60; } if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple4)) { caps.max_fragment_inputs = 124; } else { caps.max_fragment_inputs = 60; } if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyMac2)) { caps.uniform_buffer_offset_alignment = 32; } else { caps.uniform_buffer_offset_alignment = 4; } caps.storage_buffer_offset_alignment = 64; caps.texel_buffer_offset_alignment = 64; if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple3)) { caps.max_1d_image_dimension = 16384; caps.max_2d_image_dimension = 16384; caps.max_cube_image_dimension = 16384; } else { caps.max_1d_image_dimension = 8192; caps.max_2d_image_dimension = 8192; caps.max_cube_image_dimension = 8192; } if (gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple2)) { caps.max_color_attachments_per_pass = 8; } else { caps.max_color_attachments_per_pass = 4; } caps.cubemap_arrays_supported = gpu_family_idx == ngfmtl_gpufam_idx(MTL::GPUFamilyCommon2) || gpu_family_idx == ngfmtl_gpufam_idx(MTL::GPUFamilyCommon3) || gpu_family_idx >= ngfmtl_gpufam_idx(MTL::GPUFamilyApple3); size_t supports_samples_bitmap = (mtldev->supportsTextureSampleCount(1) ? 1 : 0) | (mtldev->supportsTextureSampleCount(2) ? 2 : 0) | (mtldev->supportsTextureSampleCount(4) ? 4 : 0) | (mtldev->supportsTextureSampleCount(8) ? 8 : 0); ngf_sample_count max_supported_sample_count = ngfi_get_highest_sample_count(supports_samples_bitmap); caps.texture_color_sample_counts = supports_samples_bitmap; caps.max_supported_texture_color_sample_count = max_supported_sample_count; caps.texture_depth_sample_counts = supports_samples_bitmap; caps.max_supported_texture_depth_sample_count = max_supported_sample_count; caps.framebuffer_color_sample_counts = supports_samples_bitmap; caps.max_supported_framebuffer_color_sample_count = max_supported_sample_count; caps.framebuffer_depth_sample_counts = supports_samples_bitmap; caps.max_supported_framebuffer_depth_sample_count = max_supported_sample_count; } NGFI_THREADLOCAL ngf_context CURRENT_CONTEXT = nullptr; static ngf_error ngfmtl_parse_niceshade_metadata( const char* input, bool need_threadgroup_size, ngfmtl_niceshade_metadata* output) { static const char binding_map_tag[] = "NGF_NATIVE_BINDING_MAP"; static const char threadgroup_size_tag[] = "NGF_THREADGROUP_SIZE"; const char* serialized_binding_map = NULL; const char* serialized_threadgroup_size = NULL; bool in_comment = false; for (; *input != '\0' && (serialized_binding_map == NULL || (!need_threadgroup_size || serialized_threadgroup_size == NULL)); ++input) { if (!in_comment && *input == '/' && *(input + 1) == '*') { in_comment = true; input++; continue; } if (in_comment && *input == '*' && *(input + 1) == '/') { in_comment = false; input++; continue; } if (!in_comment) continue; if (serialized_binding_map == NULL && strncmp(input, binding_map_tag, sizeof(binding_map_tag) - 1) == 0) { serialized_binding_map = input + sizeof(binding_map_tag) - 1; } if (need_threadgroup_size && serialized_threadgroup_size == NULL && strncmp(input, threadgroup_size_tag, sizeof(threadgroup_size_tag) - 1) == 0) { serialized_threadgroup_size = input + sizeof(threadgroup_size_tag) - 1; } } if (!serialized_binding_map) { NGFI_DIAG_ERROR("Failed to find a serialized binding map"); return NGF_ERROR_INVALID_OPERATION; } if (need_threadgroup_size && !serialized_threadgroup_size) { NGFI_DIAG_ERROR("Failed to find a serialized threadgroup size"); return NGF_ERROR_INVALID_OPERATION; } // Parse the native binding map. struct ngfmtl_binding_map_entry { uint32_t set; uint32_t binding; uint32_t native_binding; }; ngfi::array tmp_binding_map_entries; uint32_t consumed_input_bytes; uint32_t max_set = 0u; uint32_t max_binding = 0u; ngfmtl_binding_map_entry current_binding_map_entry; while (sscanf( serialized_binding_map, " ( %d %d ) : %d%n", ¤t_binding_map_entry.set, ¤t_binding_map_entry.binding, ¤t_binding_map_entry.native_binding, &consumed_input_bytes) == 3 && current_binding_map_entry.set != -1 && current_binding_map_entry.binding != -1 && current_binding_map_entry.native_binding != -1) { serialized_binding_map += consumed_input_bytes; max_set = NGFI_MAX(max_set, current_binding_map_entry.set); max_binding = NGFI_MAX(max_binding, current_binding_map_entry.binding); tmp_binding_map_entries.emplace_back(current_binding_map_entry); } ngfi::array> native_binding_map {max_set + 1}; for (uint32_t e = 0u; e < tmp_binding_map_entries.size(); ++e) { auto& set_map = native_binding_map[tmp_binding_map_entries[e].set]; if (set_map.size() == 0) { set_map.resize(max_binding + 1); memset(set_map.data(), ~0, sizeof(set_map[0]) * set_map.size()); } set_map[tmp_binding_map_entries[e].binding] = tmp_binding_map_entries[e].native_binding; } output->native_binding_map = ngfi::move(native_binding_map); // Skip the binding-map sentinel and read the trailing push-constant slot if any. if (current_binding_map_entry.set == -1) { serialized_binding_map += consumed_input_bytes; int pc_slot = -1; if (sscanf(serialized_binding_map, " %d", &pc_slot) == 1 && pc_slot >= 0) { output->push_const_native_binding = static_cast(pc_slot); } } if (need_threadgroup_size && serialized_threadgroup_size) { if (sscanf( serialized_threadgroup_size, "%d %d %d", &output->threadgroup_size[0], &output->threadgroup_size[1], &output->threadgroup_size[2]) != 3) { NGFI_DIAG_ERROR("Failed to parse threadgroup size"); return NGF_ERROR_INVALID_OPERATION; } } return NGF_ERROR_OK; } static ngf_id ngfmtl_get_shader_main( MTL::Library* func_lib, const char* entry_point_name, MTL::FunctionConstantValues* spec_consts) { NS::Error* err = nullptr; NS::String* ns_entry_point_name = NS::String::string(entry_point_name, NS::UTF8StringEncoding); ngf_id result = func_lib->newFunction(ns_entry_point_name, spec_consts, &err); if (err) { NGFI_DIAG_ERROR(err->localizedDescription()->utf8String()); return nullptr; } else { return result; } } static ngf_id ngfmtl_function_consts(const ngf_specialization_info* spec_info) { // Populate specialization constant values. ngf_id spec_consts = id_default; if (spec_info != nullptr) { for (uint32_t s = 0u; s < spec_info->nspecializations; ++s) { const ngf_constant_specialization* spec = &spec_info->specializations[s]; MTL::DataType type = get_mtl_type(spec->type); if (type == MTL::DataTypeNone) { return nullptr; } void* write_ptr = ((uint8_t*)spec_info->value_buffer + spec->offset); spec_consts->setConstantValue(write_ptr, type, spec->constant_id); } } return spec_consts; } static ngf_id ngfmtl_create_stencil_descriptor(const ngf_stencil_info& info) { ngf_id result = id_default; result->setStencilCompareFunction(get_mtl_compare_function(info.compare_op)); result->setStencilFailureOperation(get_mtl_stencil_op(info.fail_op)); result->setDepthStencilPassOperation(get_mtl_stencil_op(info.pass_op)); result->setDepthFailureOperation(get_mtl_stencil_op(info.depth_fail_op)); result->setWriteMask(info.write_mask); result->setReadMask(info.compare_mask); return result; } ngfi::maybe_ngfptr ngf_compute_pipeline_t::make(const ngf_compute_pipeline_info& info) NGF_NOEXCEPT { ngfmtl_niceshade_metadata metadata; const ngf_error metadata_parse_error = ngfmtl_parse_niceshade_metadata(info.shader_stage->source_code.data(), true, &metadata); if (metadata_parse_error != NGF_ERROR_OK) return metadata_parse_error; ngf_id func_const_values = ngfmtl_function_consts(info.spec_info); ngf_id function = ngfmtl_get_shader_main( info.shader_stage->func_lib.get(), info.shader_stage->entry_point_name.data(), func_const_values.get()); if (!function) { return NGF_ERROR_OBJECT_CREATION_FAILED; } ngf_id mtl_compute_desc = id_default; mtl_compute_desc->setComputeFunction(function.get()); if (info.debug_name != nullptr) { mtl_compute_desc->setLabel(NS::String::string(info.debug_name, NS::UTF8StringEncoding)); } NS::Error* err = nullptr; ngf_id computePSO = CURRENT_CONTEXT->device->newComputePipelineState( mtl_compute_desc.get(), MTL::PipelineOptionNone, nullptr, &err); if (err) { NGFI_DIAG_ERROR(err->localizedDescription()->utf8String()); return NGF_ERROR_OBJECT_CREATION_FAILED; } auto compute_pipeline = ngfi::unique_ptr::make(); compute_pipeline->pipeline = ngfi::move(computePSO); compute_pipeline->niceshade_metadata = ngfi::move(metadata); return ngfi::move(compute_pipeline); } ngfi::maybe_ngfptr ngf_graphics_pipeline_t::make(const ngf_graphics_pipeline_info& info) NGF_NOEXCEPT { ngf_id mtl_pipe_desc = id_default; const ngf_attachment_descriptions& attachment_descs = *info.compatible_rt_attachment_descs; uint32_t ncolor_attachments = 0u; for (uint32_t i = 0u; i < attachment_descs.ndescs; ++i) { const ngf_attachment_description& attachment_desc = attachment_descs.descs[i]; if (attachment_desc.is_resolve) continue; if (attachment_desc.type == NGF_ATTACHMENT_COLOR) { const ngf_blend_info blend = info.color_attachment_blend_states ? info.color_attachment_blend_states[ncolor_attachments] : ngf_blend_info {}; MTL::RenderPipelineColorAttachmentDescriptor* mtl_attachment_desc = mtl_pipe_desc->colorAttachments()->object(ncolor_attachments++); mtl_attachment_desc->setPixelFormat(get_mtl_pixel_format(attachment_desc.format).format); mtl_attachment_desc->setBlendingEnabled(blend.enable); if (blend.enable) { mtl_attachment_desc->setSourceRGBBlendFactor( get_mtl_blend_factor(blend.src_color_blend_factor)); mtl_attachment_desc->setDestinationRGBBlendFactor( get_mtl_blend_factor(blend.dst_color_blend_factor)); mtl_attachment_desc->setSourceAlphaBlendFactor( get_mtl_blend_factor(blend.src_alpha_blend_factor)); mtl_attachment_desc->setDestinationAlphaBlendFactor( get_mtl_blend_factor(blend.dst_alpha_blend_factor)); mtl_attachment_desc->setRgbBlendOperation(get_mtl_blend_operation(blend.blend_op_color)); mtl_attachment_desc->setAlphaBlendOperation(get_mtl_blend_operation(blend.blend_op_alpha)); } if (info.color_attachment_blend_states) { mtl_attachment_desc->setWriteMask( (blend.color_write_mask & NGF_COLOR_MASK_WRITE_BIT_R ? MTL::ColorWriteMaskRed : 0) | (blend.color_write_mask & NGF_COLOR_MASK_WRITE_BIT_G ? MTL::ColorWriteMaskGreen : 0) | (blend.color_write_mask & NGF_COLOR_MASK_WRITE_BIT_B ? MTL::ColorWriteMaskBlue : 0) | (blend.color_write_mask & NGF_COLOR_MASK_WRITE_BIT_A ? MTL::ColorWriteMaskAlpha : 0)); } } else if ( attachment_desc.type == NGF_ATTACHMENT_DEPTH || attachment_desc.type == NGF_ATTACHMENT_DEPTH_STENCIL) { mtl_pipe_desc->setDepthAttachmentPixelFormat( get_mtl_pixel_format(attachment_desc.format).format); } } mtl_pipe_desc->setRasterSampleCount(info.multisample->sample_count); mtl_pipe_desc->setAlphaToCoverageEnabled(info.multisample->alpha_to_coverage); mtl_pipe_desc->setStencilAttachmentPixelFormat(MTL::PixelFormatInvalid); if (mtl_pipe_desc->depthAttachmentPixelFormat() == MTL::PixelFormatDepth32Float_Stencil8) { mtl_pipe_desc->setStencilAttachmentPixelFormat(MTL::PixelFormatDepth32Float_Stencil8); } // Populate specialization constant values. ngf_id spec_consts = ngfmtl_function_consts(info.spec_info); // Set stage functions. bool have_niceshade_metadata = false; ngfmtl_niceshade_metadata metadata; for (uint32_t s = 0u; s < info.nshader_stages; ++s) { const ngf_shader_stage stage = info.shader_stages[s]; if (!have_niceshade_metadata) { const ngf_error metadata_parse_result = ngfmtl_parse_niceshade_metadata( stage->source_code.data(), stage->type == NGF_STAGE_COMPUTE, &metadata); have_niceshade_metadata = (metadata_parse_result == NGF_ERROR_OK); } if (stage->type == NGF_STAGE_VERTEX) { assert(!mtl_pipe_desc->vertexFunction()); mtl_pipe_desc->setVertexFunction(ngfmtl_get_shader_main( stage->func_lib.get(), stage->entry_point_name.data(), spec_consts.get()) .get()); } else if (stage->type == NGF_STAGE_FRAGMENT) { assert(!mtl_pipe_desc->fragmentFunction()); mtl_pipe_desc->setFragmentFunction(ngfmtl_get_shader_main( stage->func_lib.get(), stage->entry_point_name.data(), spec_consts.get()) .get()); } } if (!have_niceshade_metadata) { NGFI_DIAG_ERROR("Native binding map not found."); return NGF_ERROR_OBJECT_CREATION_FAILED; } // Configure vertex input. const ngf_vertex_input_info& vertex_input_info = *info.input_info; MTL::VertexDescriptor* vert_desc = mtl_pipe_desc->vertexDescriptor(); for (uint32_t a = 0u; a < vertex_input_info.nattribs; ++a) { MTL::VertexAttributeDescriptor* attr_desc = vert_desc->attributes()->object(a); const ngf_vertex_attrib_desc& attr_info = vertex_input_info.attribs[a]; attr_desc->setOffset(vertex_input_info.attribs[a].offset); attr_desc->setBufferIndex(MAX_BUFFER_BINDINGS - vertex_input_info.attribs[a].binding); attr_desc->setFormat( get_mtl_attrib_format(attr_info.type, attr_info.size, attr_info.normalized)); if (attr_desc->format() == MTL::VertexFormatInvalid) { NGFI_DIAG_ERROR("Vertex attrib format not supported by Metal backend."); return NGF_ERROR_INVALID_FORMAT; } } for (uint32_t b = 0u; b < vertex_input_info.nvert_buf_bindings; ++b) { MTL::VertexBufferLayoutDescriptor* binding_desc = vert_desc->layouts()->object(MAX_BUFFER_BINDINGS - b); const ngf_vertex_buf_binding_desc& binding_info = vertex_input_info.vert_buf_bindings[b]; binding_desc->setStride(binding_info.stride); binding_desc->setStepFunction(get_mtl_step_function(binding_info.input_rate)); } // Set primitive topology. mtl_pipe_desc->setInputPrimitiveTopology( get_mtl_primitive_topology_class(info.input_assembly_info->primitive_topology)); if (mtl_pipe_desc->inputPrimitiveTopology() == MTL::PrimitiveTopologyClassUnspecified) { return NGF_ERROR_OBJECT_CREATION_FAILED; } auto pipeline = ngfi::unique_ptr::make(); pipeline->niceshade_metadata = ngfi::move(metadata); memcpy(pipeline->blend_color, info.blend_consts, sizeof(pipeline->blend_color)); if (info.debug_name != nullptr) { mtl_pipe_desc->setLabel(NS::String::string(info.debug_name, NS::UTF8StringEncoding)); } NS::Error* err = nullptr; pipeline->pipeline = CURRENT_CONTEXT->device->newRenderPipelineState(mtl_pipe_desc.get(), &err); pipeline->primitive_type = get_mtl_primitive_type(info.input_assembly_info->primitive_topology); // Set winding order and culling mode. pipeline->winding = get_mtl_winding(info.rasterization->front_face); pipeline->culling = get_mtl_culling(info.rasterization->cull_mode); // Set up depth and stencil state. pipeline->depth_stencil_desc = id_default; const ngf_depth_stencil_info& depth_stencil_info = *info.depth_stencil; pipeline->depth_stencil_desc->setDepthCompareFunction( depth_stencil_info.depth_test ? get_mtl_compare_function(depth_stencil_info.depth_compare) : MTL::CompareFunctionAlways); pipeline->depth_stencil_desc->setDepthWriteEnabled(info.depth_stencil->depth_write); ngf_id backface_descriptor = ngfmtl_create_stencil_descriptor(depth_stencil_info.back_stencil); ngf_id frontface_descriptor = ngfmtl_create_stencil_descriptor(depth_stencil_info.front_stencil); pipeline->depth_stencil_desc->setBackFaceStencil(backface_descriptor.get()); pipeline->depth_stencil_desc->setFrontFaceStencil(frontface_descriptor.get()); pipeline->front_stencil_reference = depth_stencil_info.front_stencil.reference; pipeline->back_stencil_reference = depth_stencil_info.back_stencil.reference; pipeline->depth_stencil = CURRENT_CONTEXT->device->newDepthStencilState(pipeline->depth_stencil_desc.get()); if (err) { NGFI_DIAG_ERROR(err->localizedDescription()->utf8String()); return NGF_ERROR_OBJECT_CREATION_FAILED; } else { return ngfi::move(pipeline); } } ngfi::maybe_ngfptr ngf_buffer_t::make(const ngf_buffer_info& info) NGF_NOEXCEPT { MTL::ResourceOptions options = 0u; switch (info.storage_type) { case NGF_BUFFER_STORAGE_HOST_READABLE: case NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE: case NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_READABLE_WRITEABLE: options = MTL::ResourceCPUCacheModeDefaultCache | MTL::ResourceStorageModeShared; break; case NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABLE: case NGF_BUFFER_STORAGE_HOST_WRITEABLE: options = MTL::ResourceCPUCacheModeWriteCombined | MTL::ResourceStorageModeShared; break; case NGF_BUFFER_STORAGE_DEVICE_LOCAL: options = MTL::ResourceStorageModePrivate; break; default: assert(false); } auto result = ngfi::unique_ptr::make(); result->mtl_buffer = ngf_id {CURRENT_CONTEXT->device->newBuffer(info.size, options)}; if (!result->mtl_buffer) { return NGF_ERROR_OBJECT_CREATION_FAILED; } return ngfi::move(result); } ngfi::maybe_ngfptr ngf_texel_buffer_view_t::make(const ngf_texel_buffer_view_info& info) NGF_NOEXCEPT { auto view = ngfi::unique_ptr::make(); ngf_id texel_buf_descriptor = id_default; texel_buf_descriptor->setDepth(1); texel_buf_descriptor->setMipmapLevelCount(1); texel_buf_descriptor->setPixelFormat(get_mtl_pixel_format(info.texel_format).format); texel_buf_descriptor->setTextureType(MTL::TextureTypeTextureBuffer); texel_buf_descriptor->setArrayLength(1); texel_buf_descriptor->setSampleCount(1); texel_buf_descriptor->setUsage(MTL::TextureUsageShaderRead); texel_buf_descriptor->setStorageMode(info.buffer->mtl_buffer->storageMode()); texel_buf_descriptor->setWidth(info.size / ngfmtl_get_bytesperpel(info.texel_format)); texel_buf_descriptor->setHeight(1); view->mtl_buffer_view = info.buffer->mtl_buffer->newTexture(texel_buf_descriptor.get(), info.offset, info.size); return ngfi::move(view); } static ngf_sample_count ngfmtl_get_ngf_sample_count(NS::UInteger sc) { switch (sc) { case 0: case 1: return NGF_SAMPLE_COUNT_1; case 2: return NGF_SAMPLE_COUNT_2; case 4: return NGF_SAMPLE_COUNT_4; case 8: return NGF_SAMPLE_COUNT_8; case 16: return NGF_SAMPLE_COUNT_16; case 32: return NGF_SAMPLE_COUNT_32; case 64: return NGF_SAMPLE_COUNT_64; } return NGF_SAMPLE_COUNT_1; } ngfi::maybe_ngfptr ngf_image_view_t::make(const ngf_image_view_info& info) NGF_NOEXCEPT { const auto maybe_texture_type = get_mtl_texture_type( info.view_type, info.nlayers, ngfmtl_get_ngf_sample_count(info.src_image->texture->sampleCount())); if (!maybe_texture_type) { return NGF_ERROR_OBJECT_CREATION_FAILED; } MTL::Texture* view = info.src_image->texture->newTextureView( get_mtl_pixel_format(info.view_format).format, maybe_texture_type.value(), NS::Range(info.base_mip_level, info.nmips), NS::Range(info.base_layer, info.nlayers)); if (!view) { return NGF_ERROR_OBJECT_CREATION_FAILED; } auto image_view = ngfi::unique_ptr::make(); image_view->view = view; return ngfi::move(image_view); } ngfi::maybe_ngfptr ngf_image_t::make(const ngf_image_info& info) NGF_NOEXCEPT { ngf_id mtl_img_desc = id_default; const MTL::PixelFormat fmt = get_mtl_pixel_format(info.format).format; if (fmt == MTL::PixelFormatInvalid) { NGFI_DIAG_ERROR("Image format %d not supported by Metal backend.", info.format); return NGF_ERROR_INVALID_FORMAT; } ngfi::value_or_ngferr maybe_texture_type = get_mtl_texture_type(info.type, info.nlayers, info.sample_count); if (!maybe_texture_type.has_value()) { NGFI_DIAG_ERROR("Image type %d not supported by Metal backend.", info.type); return NGF_ERROR_INVALID_ENUM; } mtl_img_desc->setTextureType(maybe_texture_type.value()); mtl_img_desc->setPixelFormat(fmt); mtl_img_desc->setWidth(info.extent.width); mtl_img_desc->setHeight(info.extent.height); mtl_img_desc->setDepth(info.extent.depth); mtl_img_desc->setArrayLength(info.nlayers); mtl_img_desc->setMipmapLevelCount(info.nmips); mtl_img_desc->setStorageMode(MTL::StorageModePrivate); mtl_img_desc->setSampleCount(info.sample_count); if (info.usage_hint & NGF_IMAGE_USAGE_ATTACHMENT) { mtl_img_desc->setUsage(mtl_img_desc->usage() | MTL::TextureUsageRenderTarget); } if (info.usage_hint & NGF_IMAGE_USAGE_SAMPLE_FROM) { mtl_img_desc->setUsage(mtl_img_desc->usage() | MTL::TextureUsageShaderRead); } if (info.usage_hint & NGF_IMAGE_USAGE_STORAGE) { mtl_img_desc->setUsage(mtl_img_desc->usage() | MTL::TextureUsageShaderWrite); } auto image = ngfi::unique_ptr::make(); image->texture = MTL_DEVICE->newTexture(mtl_img_desc.get()); image->usage_flags = info.usage_hint; image->format = info.format; return ngfi::move(image); } ngfi::maybe_ngfptr ngf_sampler_t::make(const ngf_sampler_info& info) NGF_NOEXCEPT { ngf_id sampler_desc = id_default; auto s = get_mtl_address_mode(info.wrap_u), t = get_mtl_address_mode(info.wrap_v), r = get_mtl_address_mode(info.wrap_w); if (!(s && t && r)) { return NGF_ERROR_INVALID_ENUM; } sampler_desc->setSAddressMode(s.value()); sampler_desc->setTAddressMode(t.value()); sampler_desc->setRAddressMode(r.value()); sampler_desc->setMinFilter(get_mtl_minmag_filter(info.min_filter)); sampler_desc->setMagFilter(get_mtl_minmag_filter(info.mag_filter)); sampler_desc->setMipFilter(get_mtl_mip_filter(info.mip_filter)); sampler_desc->setMaxAnisotropy(info.enable_anisotropy ? (NS::UInteger)info.max_anisotropy : 1); sampler_desc->setLodMinClamp(info.lod_min); sampler_desc->setLodMaxClamp(info.lod_max); if (info.compare_op != NGF_COMPARE_OP_NEVER) { sampler_desc->setCompareFunction(get_mtl_compare_function(info.compare_op)); } auto sampler = ngfi::unique_ptr::make(); sampler->sampler = CURRENT_CONTEXT->device->newSamplerState(sampler_desc.get()); return ngfi::move(sampler); } ngfi::maybe_ngfptr ngf_render_target_t::make(const ngf_render_target_info& info) NGF_NOEXCEPT { return ngf_render_target_t::make( *info.attachment_descriptions, info.attachment_image_refs, (uint32_t)info.attachment_image_refs[0].image->texture->width(), (uint32_t)info.attachment_image_refs[0].image->texture->height()); } ngfi::maybe_ngfptr ngf_render_target_t::make( const ngf_attachment_descriptions& attachment_descs, const ngf_image_ref* img_refs, uint32_t rt_width, uint32_t rt_height) NGF_NOEXCEPT { auto rt = ngfi::unique_ptr::make(); rt->width = rt_width; rt->height = rt_height; ngf_attachment_description* attachment_descs_copy = NGFI_ALLOCN(ngf_attachment_description, attachment_descs.ndescs); rt->attachment_descs.descs = attachment_descs_copy; if (!rt->attachment_descs.descs) { return NGF_ERROR_OUT_OF_MEM; } rt->attachment_descs.ndescs = attachment_descs.ndescs; for (uint32_t i = 0; i < rt->attachment_descs.ndescs; ++i) { if (attachment_descs.descs[i].is_resolve) { ++rt->nresolve_attachments; } else { ++rt->nrender_attachments; } attachment_descs_copy[i] = attachment_descs.descs[i]; } if (img_refs) { rt->render_image_refs = ngfi::fixed_array {rt->nrender_attachments}; if (rt->nresolve_attachments > 0u) { rt->resolve_image_refs = ngfi::fixed_array {rt->nresolve_attachments}; } uint32_t image_ref_idx = 0u; uint32_t resolve_image_ref_idx = 0u; for (uint32_t i = 0; i < rt->attachment_descs.ndescs; ++i) { if (!rt->attachment_descs.descs[i].is_resolve) { rt->render_image_refs[image_ref_idx++] = img_refs[i]; } else if (rt->nresolve_attachments > 0u) { rt->resolve_image_refs[resolve_image_ref_idx++] = img_refs[i]; } else { assert(0); } } } return ngfi::move(rt); } ngfi::maybe_ngfptr ngf_context_t::make(const ngf_context_info& info) NGF_NOEXCEPT { auto ctx = ngfi::unique_ptr::make(); if (!ctx) { return NGF_ERROR_OUT_OF_MEM; } ctx->device = MTL_DEVICE; ctx->queue = ctx->device->newCommandQueue(); if (info.swapchain_info) { ctx->swapchain_info = *(info.swapchain_info); ngf_error err = ctx->swapchain.initialize(ctx->swapchain_info, ctx->device.get()); if (err != NGF_ERROR_OK) return err; ngf_attachment_descriptions attachment_descs; ngf_attachment_description desc_array[3]; attachment_descs.descs = desc_array; attachment_descs.ndescs = 1; desc_array[0].format = ctx->swapchain_info.color_format; desc_array[0].type = NGF_ATTACHMENT_COLOR; desc_array[0].sample_count = ctx->swapchain_info.sample_count; desc_array[0].is_resolve = false; if (ctx->swapchain_info.depth_format != NGF_IMAGE_FORMAT_UNDEFINED) { attachment_descs.ndescs++; desc_array[1].format = ctx->swapchain_info.depth_format; desc_array[1].type = ctx->swapchain_info.depth_format == NGF_IMAGE_FORMAT_DEPTH24_STENCIL8 ? NGF_ATTACHMENT_DEPTH_STENCIL : NGF_ATTACHMENT_DEPTH; desc_array[1].sample_count = ctx->swapchain_info.sample_count; desc_array[1].is_resolve = false; } auto maybe_default_rt = ngf_render_target_t::make( attachment_descs, nullptr, info.swapchain_info->width, info.swapchain_info->height); if (maybe_default_rt.has_error()) { return maybe_default_rt.error(); } ctx->default_rt = maybe_default_rt.value().release(); ctx->default_rt->is_default = true; } ctx->frame_sync_sem = dispatch_semaphore_create(ctx->swapchain_info.capacity_hint); return ngfi::move(ctx); } ngfi::maybe_ngfptr ngf_shader_stage_t::make(const ngf_shader_stage_info& info) NGF_NOEXCEPT { auto stage = ngfi::unique_ptr::make(); if (!stage) { return NGF_ERROR_OUT_OF_MEM; } stage->type = info.type; stage->source_code = ngfi::fixed_array {(const char*)info.content, info.content_length}; // Create a MTLLibrary for this stage. ngf_id source = NS::String::alloc()->init( (void*)info.content, info.content_length, NS::UTF8StringEncoding, false); ngf_id opts = id_default; NS::Error* err = nullptr; stage->func_lib = CURRENT_CONTEXT->device->newLibrary(source.get(), opts.get(), &err); if (!stage->func_lib) { NGFI_DIAG_ERROR(err->localizedDescription()->utf8String()); return NGF_ERROR_OBJECT_CREATION_FAILED; } // Set debug name. if (info.debug_name != nullptr) { stage->func_lib->setLabel( ngf_id(NS::String::alloc()->init(info.debug_name, NS::UTF8StringEncoding)) .get()); } stage->entry_point_name = ngfi::fixed_array {info.entry_point_name, strlen(info.entry_point_name) + 1}; return ngfi::move(stage); } ngfi::array NGFMTL_DEVICES_LIST; const NS::Array* NGFMTL_MTL_DEVICES; #pragma mark ngf_function_implementations ngf_error ngf_get_device_list(const ngf_device** devices, uint32_t* ndevices) NGF_NOEXCEPT { if (NGFMTL_DEVICES_LIST.empty()) { #if TARGET_OS_OSX NGFMTL_MTL_DEVICES = MTL::CopyAllDevices(); NGFMTL_DEVICES_LIST.resize(NGFMTL_MTL_DEVICES->count()); for (uint32_t d = 0u; d < NGFMTL_MTL_DEVICES->count(); ++d) { ngfmtl_populate_ngf_device( d, NGFMTL_DEVICES_LIST[d], static_cast(NGFMTL_MTL_DEVICES->object(d))); } #else NGFMTL_MTL_DEVICES = NS::Array::array(MTLCreateSystemDefaultDevice()); NGFMTL_DEVICES_LIST.resize(1); ngfmtl_populate_ngf_device( 0, NGFMTL_DEVICES_LIST[0], (MTL::Device*)NGFMTL_MTL_DEVICES->object(0)); #endif } if (devices) { *devices = NGFMTL_DEVICES_LIST.data(); } if (ndevices) { *ndevices = (uint32_t)NGFMTL_DEVICES_LIST.size(); } return NGF_ERROR_OK; } ngf_error ngf_initialize(const ngf_init_info* init_info) NGF_NOEXCEPT { if (MTL_DEVICE != nullptr || init_info->device >= NGFMTL_DEVICES_LIST.size()) { return NGF_ERROR_INVALID_OPERATION; } if (init_info->diag_info != NULL) { ngfi_diag_info = *init_info->diag_info; } else { ngfi_diag_info.callback = NULL; ngfi_diag_info.userdata = NULL; ngfi_diag_info.verbosity = NGF_DIAGNOSTICS_VERBOSITY_DEFAULT; } ngfi_set_allocation_callbacks(init_info->allocation_callbacks); MTL_DEVICE = static_cast(NGFMTL_MTL_DEVICES->object(init_info->device)); // Initialize device capabilities. DEVICE_CAPS = NGFMTL_DEVICES_LIST[init_info->device].capabilities; return (MTL_DEVICE != nullptr) ? NGF_ERROR_OK : NGF_ERROR_INVALID_OPERATION; } void ngf_shutdown() NGF_NOEXCEPT { NGFI_DIAG_INFO("Shutting down nicegraf."); } const ngf_device_capabilities* ngf_get_device_capabilities() NGF_NOEXCEPT { return &DEVICE_CAPS; } extern "C" { void* objc_autoreleasePoolPush(void); void objc_autoreleasePoolPop(void* pool); } ngf_error ngf_begin_frame(ngf_frame_token* token) NGF_NOEXCEPT { *token = (uintptr_t)objc_autoreleasePoolPush(); dispatch_semaphore_wait(CURRENT_CONTEXT->frame_sync_sem, DISPATCH_TIME_FOREVER); CURRENT_CONTEXT->frame = CURRENT_CONTEXT->swapchain.next_frame(); if (CURRENT_CONTEXT->frame.color_drawable && CURRENT_CONTEXT->swapchain.compute_access_enabled()) { CURRENT_CONTEXT->frame.img_wrapper.texture = CURRENT_CONTEXT->frame.color_drawable->texture()->newTextureView( CURRENT_CONTEXT->swapchain.get_pixel_format()); } return (!CURRENT_CONTEXT->frame.color_drawable) ? NGF_ERROR_INVALID_OPERATION : NGF_ERROR_OK; } ngf_error ngf_end_frame(ngf_frame_token token) NGF_NOEXCEPT { ngf_context ctx = CURRENT_CONTEXT; if (CURRENT_CONTEXT->frame.color_drawable && CURRENT_CONTEXT->pending_cmd_buffer) { CURRENT_CONTEXT->pending_cmd_buffer->addCompletedHandler( [ctx](MTL::CommandBuffer*) { dispatch_semaphore_signal(ctx->frame_sync_sem); }); CURRENT_CONTEXT->pending_cmd_buffer->presentDrawable(CURRENT_CONTEXT->frame.color_drawable); CURRENT_CONTEXT->last_cmd_buffer = ngf_id::add_retain(CURRENT_CONTEXT->pending_cmd_buffer); CURRENT_CONTEXT->pending_cmd_buffer->commit(); CURRENT_CONTEXT->pending_cmd_buffer = nullptr; CURRENT_CONTEXT->frame = ngfmtl_swapchain::frame {}; } else { dispatch_semaphore_signal(ctx->frame_sync_sem); } objc_autoreleasePoolPop((void*)token); return NGF_ERROR_OK; } ngf_error ngf_get_current_swapchain_image(ngf_frame_token token, ngf_image* result) NGF_NOEXCEPT { assert(CURRENT_CONTEXT); *result = &CURRENT_CONTEXT->frame.img_wrapper; return NGF_ERROR_OK; } ngf_render_target ngf_default_render_target() NGF_NOEXCEPT { return CURRENT_CONTEXT->default_rt; } const ngf_attachment_descriptions* ngf_default_render_target_attachment_descs() NGF_NOEXCEPT { return &CURRENT_CONTEXT->default_rt->attachment_descs; } ngf_error ngf_resize_context(ngf_context ctx, uint32_t new_width, uint32_t new_height) NGF_NOEXCEPT { assert(ctx); ctx->swapchain_info.width = new_width; ctx->swapchain_info.height = new_height; ctx->default_rt->width = new_width; ctx->default_rt->height = new_height; return ctx->swapchain.resize(ctx->swapchain_info); } ngf_error ngf_set_context(ngf_context ctx) NGF_NOEXCEPT { CURRENT_CONTEXT = ctx; ctx->is_current = true; return NGF_ERROR_OK; } ngf_context ngf_get_context() NGF_NOEXCEPT { return CURRENT_CONTEXT; } void ngfmtl_attachment_set_common( MTL::RenderPassAttachmentDescriptor* attachment, uint32_t render_image_idx, ngf_attachment_type type, const ngf_render_target rt, ngf_attachment_load_op load_op, ngf_attachment_store_op store_op) NGF_NOEXCEPT { if (!rt->is_default) { attachment->setTexture(rt->render_image_refs[render_image_idx].image->texture.get()); attachment->setLevel(rt->render_image_refs[render_image_idx].mip_level); attachment->setSlice(rt->render_image_refs[render_image_idx].layer); } else { attachment->setTexture( type == NGF_ATTACHMENT_COLOR ? CURRENT_CONTEXT->frame.color_attachment_texture() : CURRENT_CONTEXT->frame.depth_attachment_texture()); attachment->setLevel(0); attachment->setSlice(0); } attachment->setLoadAction(get_mtl_load_action(load_op)); attachment->setStoreAction(get_mtl_store_action(store_op)); } uint8_t* ngf_map_buffer(MTL::Buffer* buffer, size_t offset, [[maybe_unused]] size_t size) { return (uint8_t*)buffer->contents() + offset; } void* ngf_buffer_map_range(ngf_buffer buf, size_t offset, size_t size) NGF_NOEXCEPT { buf->mapped_offset = offset; return (void*)ngf_map_buffer(buf->mtl_buffer.get(), offset, size); } void ngf_buffer_flush_range( [[maybe_unused]] ngf_buffer buf, [[maybe_unused]] size_t offset, [[maybe_unused]] size_t size) NGF_NOEXCEPT { } void ngf_buffer_unmap(ngf_buffer) NGF_NOEXCEPT { } ngf_error ngf_start_cmd_buffer(ngf_cmd_buffer cmd_buffer, ngf_frame_token) NGF_NOEXCEPT { assert(cmd_buffer); cmd_buffer->mtl_cmd_buffer = CURRENT_CONTEXT->queue->commandBuffer(); assert(!cmd_buffer->active_rce); assert(!cmd_buffer->active_bce); NGFI_TRANSITION_CMD_BUF(cmd_buffer, ngfi::CMD_BUFFER_STATE_READY); return NGF_ERROR_OK; } ngf_error ngf_submit_cmd_buffers(uint32_t n, ngf_cmd_buffer* cmd_buffers) NGF_NOEXCEPT { if (CURRENT_CONTEXT->pending_cmd_buffer) { CURRENT_CONTEXT->pending_cmd_buffer->commit(); CURRENT_CONTEXT->pending_cmd_buffer = nullptr; } for (uint32_t b = 0u; b < n; ++b) { NGFI_TRANSITION_CMD_BUF(cmd_buffers[b], ngfi::CMD_BUFFER_STATE_PENDING); if (b < n - 1u) { cmd_buffers[b]->mtl_cmd_buffer->commit(); } else { CURRENT_CONTEXT->pending_cmd_buffer = cmd_buffers[b]->mtl_cmd_buffer; } cmd_buffers[b]->mtl_cmd_buffer = nullptr; NGFI_TRANSITION_CMD_BUF(cmd_buffers[b], ngfi::CMD_BUFFER_STATE_SUBMITTED); } return NGF_ERROR_OK; } void ngfmtl_finish_pending_encoders(ngf_cmd_buffer cmd_buffer) { /* End any current Metal encoders.*/ if (cmd_buffer->active_rce) { cmd_buffer->active_rce->endEncoding(); cmd_buffer->active_rce = nullptr; } else if (cmd_buffer->active_bce) { cmd_buffer->active_bce->endEncoding(); cmd_buffer->active_bce = nullptr; } else if (cmd_buffer->active_cce) { cmd_buffer->active_cce->endEncoding(); cmd_buffer->active_cce = nullptr; } } ngf_error ngf_cmd_begin_render_pass_simple( ngf_cmd_buffer cmd_buf, ngf_render_target rt, float clear_color_r, float clear_color_g, float clear_color_b, float clear_color_a, float clear_depth, uint32_t clear_stencil, ngf_render_encoder* enc) NGF_NOEXCEPT { ngfi::tmp_arena().reset(); const uint32_t nattachments = rt->attachment_descs.ndescs; auto load_ops = ngfi::tmp_alloc(nattachments); auto store_ops = ngfi::tmp_alloc(nattachments); auto clears = ngfi::tmp_alloc(nattachments); for (size_t i = 0u; i < nattachments; ++i) { load_ops[i] = NGF_LOAD_OP_CLEAR; if (rt->attachment_descs.descs[i].type == NGF_ATTACHMENT_COLOR) { clears[i].clear_color[0] = clear_color_r; clears[i].clear_color[1] = clear_color_g; clears[i].clear_color[2] = clear_color_b; clears[i].clear_color[3] = clear_color_a; } else if (rt->attachment_descs.descs[i].type == NGF_ATTACHMENT_DEPTH || rt->attachment_descs.descs[i].type == NGF_ATTACHMENT_DEPTH_STENCIL) { clears[i].clear_depth_stencil.clear_depth = clear_depth; clears[i].clear_depth_stencil.clear_stencil = clear_stencil; } else { assert(false); } const bool needs_resolve = rt->attachment_descs.descs[i].type == NGF_ATTACHMENT_COLOR && rt->attachment_descs.descs[i].sample_count > NGF_SAMPLE_COUNT_1 && (rt->resolve_image_refs.data() || rt->is_default); store_ops[i] = (needs_resolve) ? NGF_STORE_OP_RESOLVE : NGF_STORE_OP_STORE; } const ngf_render_pass_info pass_info = {.render_target = rt, .load_ops = load_ops, .store_ops = store_ops, .clears = clears}; return ngf_cmd_begin_render_pass(cmd_buf, &pass_info, enc); } ngf_error ngf_cmd_begin_render_pass( ngf_cmd_buffer cmd_buffer, const ngf_render_pass_info* pass_info, ngf_render_encoder* enc) NGF_NOEXCEPT { NGFI_TRANSITION_CMD_BUF(cmd_buffer, ngfi::CMD_BUFFER_STATE_RECORDING); assert(pass_info); const ngf_render_target rt = pass_info->render_target; assert(rt); assert(cmd_buffer); ngfmtl_finish_pending_encoders(cmd_buffer); cmd_buffer->renderpass_active = true; uint32_t color_attachment_idx = 0u; uint32_t resolve_attachment_idx = 0u; uint32_t render_image_idx = 0u; ngf_id pass_descriptor = id_default; pass_descriptor->setRenderTargetWidth(rt->width); pass_descriptor->setRenderTargetHeight(rt->height); pass_descriptor->setDepthAttachment(nullptr); pass_descriptor->setStencilAttachment(nullptr); if (cmd_buffer->sample_buf_attachment_for_next_render_pass) { const auto& attachment_descriptor = cmd_buffer->sample_buf_attachment_for_next_render_pass; const auto attachment = pass_descriptor->sampleBufferAttachments()->object(0); attachment->setSampleBuffer(attachment_descriptor->sampleBuffer()); if (attachment_descriptor->startOfVertexSampleIndex() < attachment_descriptor->endOfVertexSampleIndex()) { attachment->setStartOfVertexSampleIndex(attachment_descriptor->startOfVertexSampleIndex()); attachment->setEndOfVertexSampleIndex(attachment_descriptor->endOfVertexSampleIndex()); } if (attachment_descriptor->startOfFragmentSampleIndex() < attachment_descriptor->endOfFragmentSampleIndex()) { attachment->setStartOfFragmentSampleIndex( attachment_descriptor->startOfFragmentSampleIndex()); attachment->setEndOfFragmentSampleIndex(attachment_descriptor->endOfFragmentSampleIndex()); } cmd_buffer->sample_buf_attachment_for_next_render_pass = nullptr; } for (uint32_t i = 0u; i < rt->attachment_descs.ndescs; ++i) { const ngf_attachment_description& attachment_desc = rt->attachment_descs.descs[i]; if (attachment_desc.is_resolve) { continue; } const ngf_attachment_load_op load_op = pass_info->load_ops[i]; const ngf_attachment_store_op store_op = pass_info->store_ops[i]; const ngf_clear_info* clear_info = load_op == NGF_LOAD_OP_CLEAR && pass_info->clears ? &pass_info->clears[i] : nullptr; switch (attachment_desc.type) { case NGF_ATTACHMENT_COLOR: { ngf_id mtl_desc = id_default; ngfmtl_attachment_set_common( mtl_desc.get(), render_image_idx++, attachment_desc.type, rt, load_op, store_op); if (clear_info) { mtl_desc->setClearColor(MTL::ClearColor::Make( clear_info->clear_color[0], clear_info->clear_color[1], clear_info->clear_color[2], clear_info->clear_color[3])); } if (attachment_desc.sample_count > NGF_SAMPLE_COUNT_1) { if (rt->is_default) { mtl_desc->setResolveTexture(CURRENT_CONTEXT->frame.resolve_attachment_texture()); } else if (rt->resolve_image_refs.data()) { mtl_desc->setResolveTexture( rt->resolve_image_refs[resolve_attachment_idx++].image->texture.get()); } } pass_descriptor->colorAttachments()->setObject(mtl_desc.get(), color_attachment_idx++); break; } case NGF_ATTACHMENT_DEPTH: { ngf_id mtl_desc = id_default; ngfmtl_attachment_set_common( mtl_desc.get(), render_image_idx++, attachment_desc.type, rt, load_op, store_op); if (clear_info) { mtl_desc->setClearDepth(clear_info->clear_depth_stencil.clear_depth); } pass_descriptor->setDepthAttachment(mtl_desc.get()); break; } case NGF_ATTACHMENT_DEPTH_STENCIL: { const uint32_t ds_image_idx = render_image_idx++; ngf_id mtl_depth_desc = id_default; ngfmtl_attachment_set_common( mtl_depth_desc.get(), ds_image_idx, attachment_desc.type, rt, load_op, store_op); if (clear_info) { mtl_depth_desc->setClearDepth(clear_info->clear_depth_stencil.clear_depth); } pass_descriptor->setDepthAttachment(mtl_depth_desc.get()); ngf_id mtl_stencil_desc = id_default; ngfmtl_attachment_set_common( mtl_stencil_desc.get(), ds_image_idx, attachment_desc.type, rt, load_op, store_op); if (clear_info) { mtl_stencil_desc->setClearStencil(clear_info->clear_depth_stencil.clear_stencil); } pass_descriptor->setStencilAttachment(mtl_stencil_desc.get()); break; } } } assert(!cmd_buffer->active_rce); cmd_buffer->active_rce = cmd_buffer->mtl_cmd_buffer->renderCommandEncoder(pass_descriptor.get()); cmd_buffer->active_rt = rt; enc->pvt_data_donotuse.d0 = (uintptr_t)cmd_buffer; return NGF_ERROR_OK; } ngf_error ngf_cmd_end_render_pass(ngf_render_encoder enc) NGF_NOEXCEPT { auto cmd_buffer = NGFMTL_ENC2CMDBUF(enc); if (cmd_buffer->active_rce) { cmd_buffer->active_rce->endEncoding(); cmd_buffer->active_rce = nullptr; cmd_buffer->active_gfx_pipe = nullptr; } cmd_buffer->renderpass_active = false; cmd_buffer->active_rt = nullptr; NGFI_TRANSITION_CMD_BUF(cmd_buffer, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT); return NGF_ERROR_OK; } ngf_error ngf_cmd_begin_xfer_pass(ngf_cmd_buffer cmd_buf, const ngf_xfer_pass_info*, ngf_xfer_encoder* enc) NGF_NOEXCEPT { NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_RECORDING); ngfmtl_finish_pending_encoders(cmd_buf); cmd_buf->xfer_pass_active = true; enc->pvt_data_donotuse.d0 = (uintptr_t)cmd_buf; cmd_buf->active_bce = cmd_buf->mtl_cmd_buffer->blitCommandEncoder(); return NGF_ERROR_OK; } ngf_error ngf_cmd_end_xfer_pass(ngf_xfer_encoder enc) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); cmd_buf->xfer_pass_active = false; NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT); if (cmd_buf->active_bce) { cmd_buf->active_bce->endEncoding(); cmd_buf->active_bce = nullptr; } return NGF_ERROR_OK; } ngf_error ngf_cmd_begin_compute_pass( ngf_cmd_buffer cmd_buffer, const ngf_compute_pass_info* pass_info, ngf_compute_encoder* enc) NGF_NOEXCEPT { NGFI_TRANSITION_CMD_BUF(cmd_buffer, ngfi::CMD_BUFFER_STATE_RECORDING); cmd_buffer->compute_pass_active = true; ngf_id pass_descriptor = id_default; if (cmd_buffer->sample_buf_attachment_for_next_compute_pass) { const auto& attachment_descriptor = cmd_buffer->sample_buf_attachment_for_next_compute_pass; const auto attachment = pass_descriptor->sampleBufferAttachments()->object(0); attachment->setSampleBuffer(attachment_descriptor->sampleBuffer()); assert( attachment_descriptor->startOfEncoderSampleIndex() < attachment_descriptor->endOfEncoderSampleIndex()); attachment->setStartOfEncoderSampleIndex(attachment_descriptor->startOfEncoderSampleIndex()); attachment->setEndOfEncoderSampleIndex(attachment_descriptor->endOfEncoderSampleIndex()); cmd_buffer->sample_buf_attachment_for_next_compute_pass = nullptr; } enc->pvt_data_donotuse.d0 = (uintptr_t)cmd_buffer; cmd_buffer->active_cce = cmd_buffer->mtl_cmd_buffer->computeCommandEncoder(pass_descriptor.get()); return NGF_ERROR_OK; } ngf_error ngf_cmd_end_compute_pass(ngf_compute_encoder enc) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); assert(cmd_buf); cmd_buf->compute_pass_active = false; NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT); if (cmd_buf->active_cce) { cmd_buf->active_cce->endEncoding(); cmd_buf->active_cce = nullptr; cmd_buf->active_compute_pipe = nullptr; } return NGF_ERROR_OK; } static void ngfmtl_apply_set_bytes_gfx(ngf_cmd_buffer cmd_buf) { if (cmd_buf->pending_pc_size == 0u || !cmd_buf->active_rce || !cmd_buf->active_gfx_pipe) return; const uint32_t slot = cmd_buf->active_gfx_pipe->niceshade_metadata.push_const_native_binding; if (slot == ~0u) return; cmd_buf->active_rce->setVertexBytes(cmd_buf->pending_pc_data, cmd_buf->pending_pc_size, slot); cmd_buf->active_rce->setFragmentBytes(cmd_buf->pending_pc_data, cmd_buf->pending_pc_size, slot); } static void ngfmtl_apply_set_bytes_compute(ngf_cmd_buffer cmd_buf) { if (cmd_buf->pending_pc_size == 0u || !cmd_buf->active_cce || !cmd_buf->active_compute_pipe) return; const uint32_t slot = cmd_buf->active_compute_pipe->niceshade_metadata.push_const_native_binding; if (slot == ~0u) return; cmd_buf->active_cce->setBytes(cmd_buf->pending_pc_data, cmd_buf->pending_pc_size, slot); } void ngf_cmd_bind_compute_pipeline(ngf_compute_encoder enc, ngf_compute_pipeline pipeline) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); assert(cmd_buf); assert(cmd_buf->active_cce); if (!cmd_buf->active_cce) { NGFI_DIAG_ERROR("Attempt to bind compute pipeline without an active compute encoder"); return; } cmd_buf->active_cce->setComputePipelineState(pipeline->pipeline.get()); cmd_buf->active_compute_pipe = pipeline; ngfmtl_apply_set_bytes_compute(cmd_buf); } void ngf_cmd_dispatch( ngf_compute_encoder enc, uint32_t x_threadgroups, uint32_t y_threadgroups, uint32_t z_threadgroups) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); assert(cmd_buf->active_cce); if (!cmd_buf->active_cce) { NGFI_DIAG_ERROR("Attempt to perform a compute dispatch without an active " "compute encoder."); return; } assert(cmd_buf->active_compute_pipe); if (!cmd_buf->active_compute_pipe) { NGFI_DIAG_ERROR("Attempt to perform a compute dispatch without a bound " "compute pipeline."); return; } const uint32_t* threadgroup_size = cmd_buf->active_compute_pipe->niceshade_metadata.threadgroup_size; cmd_buf->active_cce->dispatchThreadgroups(MTL::Size::Make(x_threadgroups, y_threadgroups, z_threadgroups), MTL::Size::Make(threadgroup_size[0], threadgroup_size[1], threadgroup_size[2])); } void ngf_cmd_bind_gfx_pipeline(ngf_render_encoder enc, const ngf_graphics_pipeline pipeline) NGF_NOEXCEPT { auto buf = NGFMTL_ENC2CMDBUF(enc); buf->active_rce->setRenderPipelineState(pipeline->pipeline.get()); buf->active_rce->setCullMode(pipeline->culling); buf->active_rce->setFrontFacingWinding(pipeline->winding); buf->active_rce->setBlendColor( pipeline->blend_color[0], pipeline->blend_color[1], pipeline->blend_color[2], pipeline->blend_color[3]); if (pipeline->depth_stencil) { buf->active_rce->setDepthStencilState(pipeline->depth_stencil.get()); } buf->active_rce->setStencilReferenceValues( pipeline->front_stencil_reference, pipeline->back_stencil_reference); buf->active_gfx_pipe = pipeline; ngfmtl_apply_set_bytes_gfx(buf); } void ngf_cmd_viewport(ngf_render_encoder enc, const ngf_irect2d* r) NGF_NOEXCEPT { auto buf = NGFMTL_ENC2CMDBUF(enc); MTL::Viewport viewport; viewport.originX = r->x; viewport.originY = r->y + (int32_t)r->height; viewport.width = r->width; viewport.height = -1.0 * r->height; // TODO: fix viewport.znear = 0.0f; viewport.zfar = 1.0f; buf->active_rce->setViewport(viewport); } void ngf_cmd_scissor(ngf_render_encoder enc, const ngf_irect2d* r) NGF_NOEXCEPT { auto buf = NGFMTL_ENC2CMDBUF(enc); MTL::ScissorRect scissor; scissor.x = (uint32_t)r->x; scissor.y = (uint32_t)r->y; scissor.width = r->width; scissor.height = r->height; buf->active_rce->setScissorRect(scissor); } void ngf_cmd_draw( ngf_render_encoder enc, bool indexed, uint32_t first_element, uint32_t nelements, uint32_t ninstances) NGF_NOEXCEPT { auto buf = NGFMTL_ENC2CMDBUF(enc); MTL::PrimitiveType prim_type = buf->active_gfx_pipe->primitive_type; if (!indexed) { buf->active_rce->drawPrimitives(prim_type, first_element, nelements, ninstances, 0); } else { buf->active_rce->drawIndexedPrimitives( prim_type, nelements, buf->bound_index_buffer_type, buf->bound_index_buffer.get(), buf->bound_index_buffer_offset + first_element * (buf->bound_index_buffer_type == MTL::IndexTypeUInt16 ? 2 : 4), ninstances, 0, 0); } } void ngf_cmd_bind_attrib_buffer( ngf_render_encoder enc, const ngf_buffer buf, uint32_t binding, size_t offset) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); cmd_buf->active_rce->setVertexBuffer( buf->mtl_buffer.get(), offset, MAX_BUFFER_BINDINGS - binding); } void ngf_cmd_bind_index_buffer( ngf_render_encoder enc, const ngf_buffer buf, size_t offset, ngf_type type) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); cmd_buf->bound_index_buffer = ngf_id::add_retain(buf->mtl_buffer.get()); cmd_buf->bound_index_buffer_type = get_mtl_index_type(type); cmd_buf->bound_index_buffer_offset = offset; } void ngf_cmd_bind_resources( ngf_render_encoder enc, const ngf_resource_bind_op* bind_ops, uint32_t nbind_ops) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); assert(cmd_buf); for (uint32_t o = 0u; o < nbind_ops; ++o) { const ngf_resource_bind_op& bind_op = bind_ops[o]; assert(cmd_buf->active_gfx_pipe); if (!cmd_buf->active_gfx_pipe) { NGFI_DIAG_ERROR("Attempt to bind resources without a bound graphics pipeline."); return; } assert(cmd_buf->active_rce); if (!cmd_buf->active_rce) { NGFI_DIAG_ERROR("Attempt to bind resources without an active render " "command encoder."); return; } const uint32_t native_binding = cmd_buf->active_gfx_pipe->niceshade_metadata .native_binding_map[bind_op.target_set][bind_op.target_binding] + bind_op.array_index; if (native_binding == ~0) { NGFI_DIAG_ERROR( "Failed to find native binding for set %d binding %d", bind_op.target_set, bind_op.target_binding); continue; } switch (bind_op.type) { case NGF_DESCRIPTOR_TEXEL_BUFFER: { cmd_buf->active_rce->setVertexTexture( bind_op.info.texel_buffer_view->mtl_buffer_view.get(), native_binding); cmd_buf->active_rce->setFragmentTexture( bind_op.info.texel_buffer_view->mtl_buffer_view.get(), native_binding); break; } case NGF_DESCRIPTOR_STORAGE_BUFFER: case NGF_DESCRIPTOR_UNIFORM_BUFFER: { const ngf_buffer_bind_info& buf_bind_op = bind_op.info.buffer; const ngf_buffer buf = buf_bind_op.buffer; size_t offset = buf_bind_op.offset; cmd_buf->active_rce->setVertexBuffer(buf->mtl_buffer.get(), offset, native_binding); cmd_buf->active_rce->setFragmentBuffer(buf->mtl_buffer.get(), offset, native_binding); break; } case NGF_DESCRIPTOR_IMAGE_AND_SAMPLER: { const ngf_image_sampler_bind_info& img_bind_op = bind_op.info.image_sampler; MTL::Texture* t = img_bind_op.is_image_view ? img_bind_op.resource.view->view.get() : img_bind_op.resource.image->texture.get(); cmd_buf->active_rce->setVertexTexture(t, native_binding); cmd_buf->active_rce->setVertexSamplerState( img_bind_op.sampler->sampler.get(), native_binding); cmd_buf->active_rce->setFragmentTexture(t, native_binding); cmd_buf->active_rce->setFragmentSamplerState( img_bind_op.sampler->sampler.get(), native_binding); break; } case NGF_DESCRIPTOR_IMAGE: { const ngf_image_sampler_bind_info& img_bind_op = bind_op.info.image_sampler; MTL::Texture* t = img_bind_op.is_image_view ? img_bind_op.resource.view->view.get() : img_bind_op.resource.image->texture.get(); cmd_buf->active_rce->setVertexTexture(t, native_binding); cmd_buf->active_rce->setFragmentTexture(t, native_binding); break; } case NGF_DESCRIPTOR_SAMPLER: { const ngf_image_sampler_bind_info& img_bind_op = bind_op.info.image_sampler; cmd_buf->active_rce->setVertexSamplerState( img_bind_op.sampler->sampler.get(), native_binding); cmd_buf->active_rce->setFragmentSamplerState( img_bind_op.sampler->sampler.get(), native_binding); break; } case NGF_DESCRIPTOR_STORAGE_IMAGE: NGFI_DIAG_ERROR("Binding storage images to non-compute shader is " "currently unsupported."); break; case NGF_DESCRIPTOR_ACCELERATION_STRUCTURE: cmd_buf->active_rce->setVertexAccelerationStructure( (MTL::AccelerationStructure*)bind_op.info.acceleration_structure, native_binding); cmd_buf->active_rce->setFragmentAccelerationStructure( (MTL::AccelerationStructure*)bind_op.info.acceleration_structure, native_binding); break; case NGF_DESCRIPTOR_TYPE_COUNT: assert(false); } } } static ngfi::value_or_ngferr get_regular_format_from_srgb(const ngf_image_format f) { switch (f) { case NGF_IMAGE_FORMAT_SRGB8: return NGF_IMAGE_FORMAT_RGB8; case NGF_IMAGE_FORMAT_SRGBA8: return NGF_IMAGE_FORMAT_RGBA8; case NGF_IMAGE_FORMAT_BGR8_SRGB: return NGF_IMAGE_FORMAT_BGR8; case NGF_IMAGE_FORMAT_BGRA8_SRGB: return NGF_IMAGE_FORMAT_BGRA8; default: return NGF_ERROR_INVALID_ENUM; } } void ngf_cmd_bind_compute_resources( ngf_compute_encoder enc, const ngf_resource_bind_op* bind_ops, uint32_t nbind_ops) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); assert(cmd_buf); for (uint32_t o = 0u; o < nbind_ops; ++o) { const ngf_resource_bind_op& bind_op = bind_ops[o]; assert(cmd_buf->active_compute_pipe); if (!cmd_buf->active_compute_pipe) { NGFI_DIAG_ERROR("Attempt to bind resources without a bound compute pipeline."); return; } assert(cmd_buf->active_cce); if (!cmd_buf->active_cce) { NGFI_DIAG_ERROR("Attempt to bind resources without an active compute " "command encoder."); return; } const uint32_t native_binding = cmd_buf->active_compute_pipe->niceshade_metadata .native_binding_map[bind_op.target_set][bind_op.target_binding] + bind_op.array_index; if (native_binding == ~0) { NGFI_DIAG_ERROR( "Failed to find native binding for set %d binding %d", bind_op.target_set, bind_op.target_binding); continue; } switch (bind_op.type) { case NGF_DESCRIPTOR_TEXEL_BUFFER: { cmd_buf->active_cce->setTexture( bind_op.info.texel_buffer_view->mtl_buffer_view.get(), native_binding); break; } case NGF_DESCRIPTOR_STORAGE_BUFFER: case NGF_DESCRIPTOR_UNIFORM_BUFFER: { const ngf_buffer_bind_info& buf_bind_op = bind_op.info.buffer; const ngf_buffer buf = buf_bind_op.buffer; size_t offset = buf_bind_op.offset; cmd_buf->active_cce->setBuffer(buf->mtl_buffer.get(), offset, native_binding); break; } case NGF_DESCRIPTOR_IMAGE_AND_SAMPLER: { const ngf_image_sampler_bind_info& img_bind_op = bind_op.info.image_sampler; MTL::Texture* t = img_bind_op.is_image_view ? img_bind_op.resource.view->view.get() : img_bind_op.resource.image->texture.get(); cmd_buf->active_cce->setTexture(t, native_binding); cmd_buf->active_cce->setSamplerState(img_bind_op.sampler->sampler.get(), native_binding); break; } case NGF_DESCRIPTOR_STORAGE_IMAGE: case NGF_DESCRIPTOR_IMAGE: { const ngf_image_sampler_bind_info& img_bind_op = bind_op.info.image_sampler; if (img_bind_op.is_image_view) { cmd_buf->active_cce->setTexture(img_bind_op.resource.view->view.get(), native_binding); } else { if (const auto maybe_format = get_regular_format_from_srgb(img_bind_op.resource.image->format)) { if (!img_bind_op.resource.image->non_srgb_view) img_bind_op.resource.image->non_srgb_view = img_bind_op.resource.image->texture.get()->newTextureView( get_mtl_pixel_format(maybe_format.value()).format); cmd_buf->active_cce->setTexture( img_bind_op.resource.image->non_srgb_view.get(), native_binding); } else { cmd_buf->active_cce->setTexture( img_bind_op.resource.image->texture.get(), native_binding); } } break; } case NGF_DESCRIPTOR_SAMPLER: { const ngf_image_sampler_bind_info& img_bind_op = bind_op.info.image_sampler; cmd_buf->active_cce->setSamplerState(img_bind_op.sampler->sampler.get(), native_binding); break; } case NGF_DESCRIPTOR_ACCELERATION_STRUCTURE: cmd_buf->active_cce->setAccelerationStructure( (MTL::AccelerationStructure*)bind_op.info.acceleration_structure, native_binding); break; case NGF_DESCRIPTOR_TYPE_COUNT: assert(false); } } } void ngfmtl_cmd_copy_buffer( ngf_xfer_encoder enc, MTL::Buffer* src, MTL::Buffer* dst, size_t size, size_t src_offset, size_t dst_offset) { auto buf = NGFMTL_ENC2CMDBUF(enc); assert(buf->active_rce == nullptr); buf->active_bce->copyFromBuffer(src, src_offset, dst, dst_offset, size); } void ngf_cmd_copy_buffer( ngf_xfer_encoder enc, const ngf_buffer src, ngf_buffer dst, size_t size, size_t src_offset, size_t dst_offset) NGF_NOEXCEPT { ngfmtl_cmd_copy_buffer( enc, src->mtl_buffer.get(), dst->mtl_buffer.get(), size, src_offset, dst_offset); } void ngf_cmd_write_image( ngf_xfer_encoder enc, ngf_buffer src, ngf_image dst, const ngf_image_write* writes, uint32_t nwrites) NGF_NOEXCEPT { auto buf = NGFMTL_ENC2CMDBUF(enc); assert(buf->active_rce == nil); for (size_t i = 0u; i < nwrites; ++i) { const ngf_image_write* w = &writes[i]; for (uint32_t l = 0u; l < w->nlayers; ++l) { const uint32_t pitch = ngfmtl_get_pitch(w->extent.width, dst->format); const uint32_t num_rows = ngfmtl_get_num_rows(w->extent.height, dst->format); buf->active_bce->copyFromBuffer( src->mtl_buffer.get(), w->src_offset + (l * pitch * num_rows), pitch, pitch * num_rows, MTL::Size::Make(w->extent.width, w->extent.height, w->extent.depth), dst->texture.get(), w->dst_base_layer + l, w->dst_level, MTL::Origin::Make( (NS::UInteger)w->dst_offset.x, (NS::UInteger)w->dst_offset.y, (NS::UInteger)w->dst_offset.z)); } } } void ngf_cmd_copy_image_to_buffer( ngf_xfer_encoder enc, const ngf_image_ref src, ngf_offset3d src_offset, ngf_extent3d src_extent, uint32_t nlayers, ngf_buffer dst, size_t dst_offset) NGF_NOEXCEPT { auto buf = NGFMTL_ENC2CMDBUF(enc); assert(buf->active_rce == nullptr); const MTL::TextureType texture_type = src.image->texture->textureType(); const bool is_cubemap = texture_type == MTL::TextureTypeCube || texture_type == MTL::TextureTypeCubeArray; const uint32_t src_slice = (is_cubemap ? 6u : 1u) * src.layer + (is_cubemap ? src.cubemap_face : 0); const uint32_t pitch = ngfmtl_get_pitch(src_extent.width, src.image->format); const uint32_t num_rows = ngfmtl_get_num_rows(src_extent.height, src.image->format); for (uint32_t l = 0; l < nlayers; ++l) { buf->active_bce->copyFromTexture( src.image->texture.get(), src_slice + l, src.mip_level, MTL::Origin::Make( (NS::UInteger)src_offset.x, (NS::UInteger)src_offset.y, (NS::UInteger)src_offset.z), MTL::Size::Make(src_extent.width, src_extent.height, src_extent.depth), dst->mtl_buffer.get(), dst_offset + (l * pitch * num_rows), pitch, pitch * num_rows); } } ngf_error ngf_cmd_generate_mipmaps(ngf_xfer_encoder xfenc, ngf_image img) NGF_NOEXCEPT { if (!(img->usage_flags & NGF_IMAGE_USAGE_MIPMAP_GENERATION)) { NGFI_DIAG_ERROR("mipmap generation was requested for an image that was created " "without the NGF_IMAGE_USAGE_MIPMAP_GENERATION flag"); return NGF_ERROR_INVALID_OPERATION; } auto buf = NGFMTL_ENC2CMDBUF(xfenc); assert(buf->active_rce == nullptr); buf->active_bce->generateMipmaps(img->texture.get()); return NGF_ERROR_OK; } void ngf_cmd_stencil_reference(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); cmd_buf->active_rce->setStencilReferenceValues(front, back); } void ngf_cmd_stencil_compare_mask(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); cmd_buf->active_gfx_pipe->depth_stencil_desc->frontFaceStencil()->setReadMask(front); cmd_buf->active_gfx_pipe->depth_stencil_desc->backFaceStencil()->setReadMask(back); ngf_id depth_stencil_state = CURRENT_CONTEXT->device->newDepthStencilState( cmd_buf->active_gfx_pipe->depth_stencil_desc.get()); cmd_buf->active_rce->setDepthStencilState(depth_stencil_state.get()); } void ngf_cmd_stencil_write_mask(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); cmd_buf->active_gfx_pipe->depth_stencil_desc->frontFaceStencil()->setWriteMask(front); cmd_buf->active_gfx_pipe->depth_stencil_desc->backFaceStencil()->setWriteMask(back); ngf_id depth_stencil_state = CURRENT_CONTEXT->device->newDepthStencilState( cmd_buf->active_gfx_pipe->depth_stencil_desc.get()); cmd_buf->active_rce->setDepthStencilState(depth_stencil_state.get()); } void ngf_cmd_set_depth_bias( ngf_render_encoder enc, float const_scale, float slope_scale, float clamp) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); cmd_buf->active_rce->setDepthBias(const_scale, slope_scale, clamp); } void ngf_cmd_begin_debug_group(ngf_cmd_buffer cmd_buf, const char* name) NGF_NOEXCEPT { auto name_nsstr = NS::String::string(name, NS::ASCIIStringEncoding); cmd_buf->mtl_cmd_buffer->pushDebugGroup(name_nsstr); } void ngf_cmd_end_current_debug_group(ngf_cmd_buffer cmd_buf) NGF_NOEXCEPT { cmd_buf->mtl_cmd_buffer->popDebugGroup(); } void ngf_finish() NGF_NOEXCEPT { if (CURRENT_CONTEXT->pending_cmd_buffer) { CURRENT_CONTEXT->last_cmd_buffer = ngf_id::add_retain(CURRENT_CONTEXT->pending_cmd_buffer); CURRENT_CONTEXT->pending_cmd_buffer->commit(); CURRENT_CONTEXT->pending_cmd_buffer = nullptr; } if (CURRENT_CONTEXT->last_cmd_buffer) { CURRENT_CONTEXT->last_cmd_buffer->waitUntilCompleted(); } } static ngf_error ngfmtl_capture_set_bytes( ngf_cmd_buffer cmd_buf, const void* data, size_t size_bytes) { if (!data || size_bytes == 0u) { cmd_buf->pending_pc_size = 0u; return NGF_ERROR_OK; } if (size_bytes > NGF_MAX_ENCODER_INLINE_BYTES || (size_bytes & 0x3u) != 0u) { NGFI_DIAG_ERROR( "push-constant size %zu must be <= %u and a multiple of 4", size_bytes, NGF_MAX_ENCODER_INLINE_BYTES); return NGF_ERROR_INVALID_SIZE; } cmd_buf->pending_pc_size = static_cast(size_bytes); memcpy(cmd_buf->pending_pc_data, data, size_bytes); return NGF_ERROR_OK; } ngf_error ngf_set_bytes( ngf_render_encoder enc, const void* data, size_t size_bytes) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); const ngf_error err = ngfmtl_capture_set_bytes(cmd_buf, data, size_bytes); if (err != NGF_ERROR_OK) return err; ngfmtl_apply_set_bytes_gfx(cmd_buf); return NGF_ERROR_OK; } ngf_error ngf_set_compute_bytes( ngf_compute_encoder enc, const void* data, size_t size_bytes) NGF_NOEXCEPT { auto cmd_buf = NGFMTL_ENC2CMDBUF(enc); const ngf_error err = ngfmtl_capture_set_bytes(cmd_buf, data, size_bytes); if (err != NGF_ERROR_OK) return err; ngfmtl_apply_set_bytes_compute(cmd_buf); return NGF_ERROR_OK; } void ngf_renderdoc_capture_next_frame() NGF_NOEXCEPT { NGFI_DIAG_WARNING("RenderDoc functionality is not implemented for Metal backend"); } void ngf_renderdoc_capture_begin() NGF_NOEXCEPT { NGFI_DIAG_WARNING("RenderDoc functionality is not implemented for Metal backend"); } void ngf_renderdoc_capture_end() NGF_NOEXCEPT { NGFI_DIAG_WARNING("RenderDoc functionality is not implemented for Metal backend"); } uintptr_t ngf_get_mtl_image_handle(ngf_image image) NGF_NOEXCEPT { return (uintptr_t)(image->texture.get()); } uintptr_t ngf_get_mtl_buffer_handle(ngf_buffer buffer) NGF_NOEXCEPT { return (uintptr_t)(buffer->mtl_buffer.get()); } uintptr_t ngf_get_mtl_cmd_buffer_handle(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT { return (uintptr_t)(cmd_buffer->mtl_cmd_buffer); } uintptr_t ngf_get_mtl_render_encoder_handle(ngf_render_encoder render_encoder) NGF_NOEXCEPT { auto buf = NGFMTL_ENC2CMDBUF(render_encoder); return (uintptr_t)(buf->active_rce); } uintptr_t ngf_get_mtl_xfer_encoder_handle(ngf_xfer_encoder xfer_encoder) NGF_NOEXCEPT { auto buf = NGFMTL_ENC2CMDBUF(xfer_encoder); return (uintptr_t)(buf->active_bce); } uintptr_t ngf_get_mtl_compute_encoder_handle(ngf_compute_encoder compute_encoder) NGF_NOEXCEPT { auto buf = NGFMTL_ENC2CMDBUF(compute_encoder); return (uintptr_t)(buf->active_cce); } uintptr_t ngf_get_mtl_sampler_handle(ngf_sampler sampler) NGF_NOEXCEPT { return (uintptr_t)(sampler->sampler.get()); } uint32_t ngf_get_mtl_pixel_format_index(ngf_image_format format) NGF_NOEXCEPT { return (uint32_t)get_mtl_pixel_format(format).format; } uintptr_t ngf_get_mtl_device() NGF_NOEXCEPT { return (uintptr_t)(void*)MTL_DEVICE; } void ngf_mtl_set_sample_attachment_for_next_render_pass( ngf_cmd_buffer cmd_buffer, uintptr_t sample_buf_attachment_descriptor) NGF_NOEXCEPT { cmd_buffer->sample_buf_attachment_for_next_render_pass = ngf_id::add_retain( static_cast( (void*)sample_buf_attachment_descriptor)); } void ngf_mtl_set_sample_attachment_for_next_compute_pass( ngf_cmd_buffer cmd_buffer, uintptr_t sample_buf_attachment_descriptor) NGF_NOEXCEPT { cmd_buffer->sample_buf_attachment_for_next_compute_pass = ngf_id::add_retain( static_cast( (void*)sample_buf_attachment_descriptor)); } #include "ngf-common/create-destroy.cpp" ================================================ FILE: source/ngf-mtl/layer.mm ================================================ #include "nicegraf.h" #import #import #if TARGET_OS_OSX #import using NGFMTL_VIEW_TYPE = NSView; #else #import using NGFMTL_VIEW_TYPE = UIView; #endif // Implementation is defined in impl.cpp, header only here #include "MetalSingleHeader.hpp" static const CFStringRef get_mtl_colorspace(ngf_colorspace colorspace) { const CFStringRef color_spaces[NGF_COLORSPACE_COUNT] = { kCGColorSpaceSRGB, kCGColorSpaceExtendedSRGB, kCGColorSpaceExtendedLinearSRGB, kCGColorSpaceDisplayP3, kCGColorSpaceExtendedLinearDisplayP3, kCGColorSpaceDCIP3, kCGColorSpaceExtendedLinearITUR_2020, kCGColorSpaceITUR_2100_PQ }; return color_spaces[colorspace]; } // Return type of CA::MetalLayer* CA::MetalLayer* ngf_layer_add_to_view(MTL::Device* device, uint32_t width, uint32_t height, MTL::PixelFormat pixel_format, ngf_colorspace colorspace, uint32_t capacity_hint, bool display_sync_enabled, bool compute_access_enabled, uintptr_t native_handle) { CAMetalLayer* layer_ = [CAMetalLayer layer]; layer_.device = (__bridge id)device; layer_.drawableSize = CGSizeMake(width, height); layer_.pixelFormat = (MTLPixelFormat)pixel_format; // TODO: Is this cast correct? layer_.colorspace = CGColorSpaceCreateWithName(get_mtl_colorspace(colorspace)); layer_.framebufferOnly = compute_access_enabled ? NO : YES; #if TARGET_OS_OSX if (@available(macOS 10.13.2, *)) { layer_.maximumDrawableCount = capacity_hint; } if (@available(macOS 10.13, *)) { layer_.displaySyncEnabled = display_sync_enabled; } #endif const bool supports_edr = colorspace == NGF_COLORSPACE_EXTENDED_SRGB_LINEAR || colorspace == NGF_COLORSPACE_DISPLAY_P3_LINEAR || colorspace == NGF_COLORSPACE_ITUR_BT2020 || colorspace == NGF_COLORSPACE_ITUR_BT2100_PQ; if (supports_edr) { #if TARGET_OS_OSX if (@available(macOS 10.11, *)) { layer_.wantsExtendedDynamicRangeContent = YES; } #else if (@available(iOS 16.0, *)) { layer_.wantsExtendedDynamicRangeContent = YES; } #endif } // Associate the newly created Metal layer with the user-provided View. NGFMTL_VIEW_TYPE* view = CFBridgingRelease((void*)native_handle); #if TARGET_OS_OSX [view setLayer:layer_]; #else [view.layer addSublayer:layer_]; [layer_ setContentsScale:view.layer.contentsScale]; [layer_ setContentsGravity:kCAGravityResizeAspect]; [layer_ setFrame:view.frame]; #endif CFBridgingRetain(view); return (__bridge_retained CA::MetalLayer*)layer_; } CA::MetalDrawable* ngf_layer_next_drawable(CA::MetalLayer* layer) { return (__bridge CA::MetalDrawable*)[(__bridge CAMetalLayer*)layer nextDrawable]; } void ngf_resize_swapchain(CA::MetalLayer* layer, uint32_t width, uint32_t height, uintptr_t native_handle) { CAMetalLayer* bridged_layer = (__bridge CAMetalLayer*)layer; bridged_layer.drawableSize = CGSizeMake(width, height); NGFMTL_VIEW_TYPE* view = CFBridgingRelease((void*)native_handle); [bridged_layer setContentsScale:view.layer.contentsScale]; [bridged_layer setFrame:view.frame]; CFBridgingRetain(view); } ================================================ FILE: source/ngf-vk/ca-metal-layer.mm ================================================ #if defined(__APPLE__) #include "nicegraf.h" #import #import #if TARGET_OS_OSX #import using NGFMTL_VIEW_TYPE = NSView; #else #import using NGFMTL_VIEW_TYPE = UIView; #endif extern "C" { void* ngfvk_create_ca_metal_layer(const ngf_swapchain_info* swapchain_info) { //const MTLPixelFormat pixel_format = get_mtl_pixel_format(swapchain_info->color_format).format; auto layer = [CAMetalLayer layer]; layer.drawableSize = CGSizeMake(swapchain_info->width, swapchain_info->height); //layer.pixelFormat = pixel_format; layer.framebufferOnly = YES; #if TARGET_OS_OSX if (@available(macOS 10.13.2, *)) { layer.maximumDrawableCount = swapchain_info->capacity_hint; } if (@available(macOS 10.13, *)) { layer.displaySyncEnabled = (swapchain_info->present_mode == NGF_PRESENTATION_MODE_FIFO); } #endif // Associate the newly created Metal layer with the user-provided View. NGFMTL_VIEW_TYPE* view = CFBridgingRelease((void*)swapchain_info->native_handle); #if TARGET_OS_OSX [view setLayer:layer]; #else [view.layer addSublayer:layer]; [layer setContentsScale:view.layer.contentsScale]; [layer setContentsGravity:kCAGravityResizeAspect]; [layer setFrame:view.frame]; #endif CFBridgingRetain(view); return layer; } } #endif ================================================ FILE: source/ngf-vk/impl.cpp ================================================ /** * Copyright (c) 2026 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "ngf-common/silence.h" #include "nicegraf.h" #include "ngf-common/arena.h" #include "ngf-common/array.h" #include "ngf-common/chunked-list.h" #include "ngf-common/cmdbuf-state.h" #include "ngf-common/default-arenas.h" #include "ngf-common/frame-token.h" #include "ngf-common/hashtable.h" #include "ngf-common/macros.h" #include "ngf-common/unique-ptr.h" #include "ngf-common/util.h" #include "ngf-common/value-or-error.h" #include "vk_10.h" #include #include #include #include #include #pragma region constants namespace ngfvk { namespace global { constexpr uint32_t invalid_idx = ~((uint32_t)0u); constexpr uint32_t max_phys_dev = 64u; // 64 GPUs oughta be enough for everybody. constexpr uint32_t img_usage_transient_attachment = (1u << 31u); // Used by every pipeline layout and by ngf_context_t::vk_default_push_layout. constexpr VkPushConstantRange default_push_constant_range = { .stageFlags = VK_SHADER_STAGE_ALL, .offset = 0u, .size = NGF_MAX_ENCODER_INLINE_BYTES}; } // namespace global } // namespace ngfvk #pragma endregion #pragma region internal_struct_definitions struct ngfvk_dummy_resources { ngf_image img; ngf_image cube; ngf_buffer buf; ngf_texel_buffer_view tbuf; ngf_sampler samp; VkAccelerationStructureKHR dummy_accel_struct; VkDescriptorImageInfo img_info; VkDescriptorImageInfo cube_info; VkDescriptorImageInfo img_arr_info; VkDescriptorImageInfo cube_arr_info; VkDescriptorImageInfo samp_info; VkDescriptorImageInfo imgsamp_info; VkDescriptorImageInfo imgsamp_arr_info; VkDescriptorBufferInfo buf_info; pthread_mutex_t img_mu; bool image_transitioned; }; // Singleton for holding vulkan instance, device and queue handles. // This is shared by all contexts. struct { VkInstance instance; VkPhysicalDevice phys_dev; VkDevice device; VmaAllocator allocator; VkQueue gfx_queue; VkQueue present_queue; uint32_t gfx_family_idx; uint32_t present_family_idx; VkDebugUtilsMessengerEXT debug_messenger; #if defined(__linux__) xcb_connection_t* xcb_connection; xcb_visualid_t xcb_visualid; #endif ngfvk_dummy_resources dummy_res; } _vk; // Singleton for holding on to RenderDoc API struct { RENDERDOC_API_1_6_0* api; bool capture_next; bool is_capturing; } _renderdoc; // Swapchain state. struct ngfvk_swapchain { VkSwapchainKHR vk_swapchain; ngfi::fixed_array imgs; ngfi::fixed_array> wrapper_imgs; ngfi::fixed_array> multisample_imgs; ngfi::fixed_array multisample_img_views; ngfi::fixed_array acquire_sems; ngfi::fixed_array submit_sems; ngfi::fixed_array framebufs; ngf_image depth_img; uint32_t nimgs; // < Total number of images in the swapchain. uint32_t image_idx; // < The index of currently acquired image. uint32_t width; uint32_t height; VkPresentModeKHR present_mode; static ngfi::maybe_ngfptr make( const ngf_swapchain_info& swapchain_info, ngf_render_target rt, VkSurfaceKHR surface) noexcept; ngfvk_swapchain() noexcept = default; ngfvk_swapchain(ngfvk_swapchain&& other) noexcept = default; ~ngfvk_swapchain() noexcept; }; struct ngfvk_alloc { uintptr_t obj_handle = 0u; VmaAllocation vma_alloc = VK_NULL_HANDLE; void* mapped_data = nullptr; static ngfi::value_or_ngferr make(const ngf_image_info& info) NGF_NOEXCEPT; static ngfi::value_or_ngferr make(const ngf_buffer_info& info) NGF_NOEXCEPT; static ngfi::value_or_ngferr wrap(VkImage img) NGF_NOEXCEPT { ngfvk_alloc result {}; result.obj_handle = (uintptr_t)img; return ngfi::move(result); } ngfvk_alloc() NGF_NOEXCEPT = default; ngfvk_alloc(ngfvk_alloc&& other) NGF_NOEXCEPT { *this = ngfi::move(other); } ngfvk_alloc(const ngfvk_alloc&) = delete; ~ngfvk_alloc() NGF_NOEXCEPT { destroy(); } ngfvk_alloc& operator=(ngfvk_alloc&& other) NGF_NOEXCEPT; ngfvk_alloc& operator=(const ngfvk_alloc& other) NGF_NOEXCEPT = delete; private: void destroy() NGF_NOEXCEPT; }; struct ngfvk_buffer_view_info { VkBufferViewCreateInfo vk_info; VkBufferView vk_handle; }; typedef uint32_t ngfvk_desc_count[NGF_DESCRIPTOR_TYPE_COUNT]; struct ngfvk_desc_pool_capacity { uint32_t sets; ngfvk_desc_count descriptors; }; struct ngfvk_desc_binding { VkDescriptorType type; VkPipelineStageFlags stage_accessors; bool readonly; bool is_multilayered_image; bool is_cubemap; uint32_t ndescs_in_binding; }; struct ngfvk_desc_set_layout { VkDescriptorSetLayout vk_handle; ngfvk_desc_count counts; uint32_t nall_descs; // < Total number of descriptors across all bindings. ngfi::fixed_array binding_properties; }; struct ngfvk_desc_pool { ngfvk_desc_pool* next; VkDescriptorPool vk_pool; ngfvk_desc_pool_capacity capacity; ngfvk_desc_pool_capacity utilization; }; struct ngfvk_desc_pools_list { ngfvk_desc_pool* active_pool; ngfvk_desc_pool* list; }; struct ngfvk_desc_superpool { uint16_t ctx_id; ngfi::fixed_array pools_lists; }; // Command buffer with its associated pool. struct ngfvk_cmd_buf_with_pool { VkCommandBuffer cmd_buf; VkCommandPool cmd_pool; }; // Typed chunk lists for retiring Vulkan objects. template struct ngfvk_retire_list { ngfi::chunked_list list; }; ngfi::arena& current_frame_res_arena(); template struct ngfvk_retire_lists_t : private ngfvk_retire_list... { template void append(T&& v) { using X = ngfi::remove_reference_t; ngfvk_retire_list::list.append(v, current_frame_res_arena()); } template ngfi::chunked_list& list() { return ngfvk_retire_list::list; } template void clear() { return ngfvk_retire_list::list.clear(); } }; using ngfvk_retire_lists = ngfvk_retire_lists_t< VkPipeline, VkPipelineLayout, VkDescriptorSetLayout, ngfvk_cmd_buf_with_pool, VkFramebuffer, VkRenderPass, VkImageView, ngf_image_view, ngf_sampler, ngf_texel_buffer_view, ngf_image, ngf_buffer, ngfvk_desc_pools_list*>; // Vulkan resources associated with a given frame. struct ngfvk_frame_resources { ngfi::arena res_frame_arena; ngfi::array submitted_cmd_bufs; // < Submitted ngf command buffers. // Resources that should be disposed of at some point after this // frame's completion. ngfvk_retire_lists retire; // Fences that will be signaled at the end of the frame. VkFence fences[2]; // Number of fences to wait on to complete all submissions related to this // frame. uint32_t nwait_fences; }; struct ngfvk_command_superpool { ngfi::fixed_array cmd_pools; uint16_t ctx_id; ngfvk_command_superpool() = default; ngfvk_command_superpool(uint32_t queue_family_idx, uint32_t capacity, uint16_t ctx_id); ~ngfvk_command_superpool(); ngfvk_command_superpool(const ngfvk_command_superpool&) = delete; ngfvk_command_superpool(ngfvk_command_superpool&&) = default; }; struct ngfvk_attachment_pass_desc { VkImageLayout layout; VkAttachmentLoadOp load_op; VkAttachmentStoreOp store_op; bool is_resolve; }; struct ngfvk_renderpass_cache_entry { ngf_render_target rt; uint64_t ops_key; VkRenderPass renderpass; }; #define NGFVK_ENC2CMDBUF(enc) ((ngf_cmd_buffer)((void*)enc.pvt_data_donotuse.d0)) struct ngfvk_device_info { uint32_t vendor_id; uint32_t device_id; ngfi::array enabled_ext_names; VkPhysicalDeviceFeatures required_features; VkPhysicalDeviceShaderFloat16Int8Features sf16i8_features; VkPhysicalDeviceSynchronization2Features sync2_features; VkPhysicalDeviceBufferDeviceAddressFeatures bda_features; VkPhysicalDeviceAccelerationStructureFeaturesKHR accls_features; VkPhysicalDeviceRayQueryFeaturesKHR ray_query_features; VkPhysicalDeviceFeatures2 phys_dev_features2; }; struct ngfvk_generic_pipeline { VkPipeline vk_pipeline; ngfi::array descriptor_set_layouts; VkPipelineLayout vk_pipeline_layout; VkSpecializationInfo vk_spec_info; VkRenderPass compat_render_pass; static ngfi::maybe_ngfptr make(const ngf_graphics_pipeline_info& info) NGF_NOEXCEPT; static ngfi::maybe_ngfptr make(const ngf_compute_pipeline_info& info) NGF_NOEXCEPT; ~ngfvk_generic_pipeline() NGF_NOEXCEPT; private: ngf_error common_init( const ngf_specialization_info* spec_info, VkPipelineShaderStageCreateInfo* vk_shader_stages, const ngf_shader_stage* shader_stages, uint32_t nshader_stages) NGF_NOEXCEPT; }; // Describes how a resource is accessed within a synchronization scope. struct ngfvk_sync_barrier_masks { VkAccessFlags access_mask; // < Ways in which the resource is accessed. VkPipelineStageFlags stage_mask; // < Pipeline stages that have access to the resource. }; // Synchronization request, that describes the intent to access a resource. struct ngfvk_sync_req { ngfvk_sync_barrier_masks barrier_masks; // < Access/stage masks. VkImageLayout layout; // < For image resources only, current layout. }; // Synchronization state of a resource within the context of a single command buffer. struct ngfvk_sync_state { ngfvk_sync_barrier_masks last_writer_masks; ngfvk_sync_barrier_masks active_readers_masks; uint32_t per_stage_readers_mask; VkImageLayout layout; bool skip_hazard_tracking; }; // Type of synchronized resource. enum ngfvk_sync_res_type { NGFVK_SYNC_RES_BUFFER, NGFVK_SYNC_RES_IMAGE, NGFVK_SYNC_RES_COUNT }; // Tagged union for passing around handles to synchronized GPU resources in a generic way. struct ngfvk_sync_res { union { ngf_image img; ngf_buffer buf; } data; ngfvk_sync_res_type type; uint64_t hash; }; // Data associated with a particular synchronized resource within the context of a single cmd // buffer. struct ngfvk_sync_res_data { ngfvk_sync_req expected_sync_req; // < Expected sync state. ngfvk_sync_state sync_state; // < Latest synchronization state. uint32_t pending_sync_req_idx; ngfvk_sync_res_type res_type; uintptr_t res_handle; bool had_barrier; }; // Typedef for the sync resource data hash table using ngfvk_sync_res_hashtable = ngfi::hashtable; struct ngfvk_sync_req_batch { ngfvk_sync_res_hashtable::keyhash* sync_res_data_keys; ngfvk_sync_req* pending_sync_reqs; bool* freshness; uint32_t npending_sync_reqs; uint32_t nbuffer_sync_reqs; uint32_t nimage_sync_reqs; }; enum ngfvk_render_cmd_type { NGFVK_RENDER_CMD_BIND_PIPELINE, NGFVK_RENDER_CMD_SET_VIEWPORT, NGFVK_RENDER_CMD_SET_SCISSOR, NGFVK_RENDER_CMD_SET_STENCIL_REFERENCE, NGFVK_RENDER_CMD_SET_STENCIL_COMPARE_MASK, NGFVK_RENDER_CMD_SET_STENCIL_WRITE_MASK, NGFVK_RENDER_CMD_BIND_RESOURCE, NGFVK_RENDER_CMD_BIND_ATTRIB_BUFFER, NGFVK_RENDER_CMD_BIND_INDEX_BUFFER, NGFVK_RENDER_CMD_SET_DEPTH_BIAS, NGFVK_RENDER_CMD_DRAW, }; struct ngfvk_barrier_data { VkAccessFlags src_access_mask; VkAccessFlags dst_access_mask; VkPipelineStageFlags src_stage_mask; VkPipelineStageFlags dst_stage_mask; VkImageLayout src_layout; VkImageLayout dst_layout; ngfvk_sync_res res; }; struct ngfvk_render_cmd { union { ngf_graphics_pipeline pipeline; ngf_irect2d rect; struct { uint32_t front; uint32_t back; } stencil_values; ngf_resource_bind_op bind_resource; struct { ngf_buffer buffer; uint32_t binding; size_t offset; } bind_attrib_buffer; struct { ngf_buffer buffer; size_t offset; ngf_type type; } bind_index_buffer; struct { uint32_t first_element; uint32_t nelements; uint32_t ninstances; bool indexed; } draw; struct { float const_factor; float slope_factor; float clamp; } depth_bias; } data; ngfvk_render_cmd_type type : 8; }; struct ngfvk_pending_barrier_list { ngfi::chunked_list barriers; uint32_t npending_img_bars; uint32_t npending_buf_bars; }; // Range of render commands for virtual bind operations. // Stores a pointer to the first command and the count. struct ngfvk_virt_bind_range { const ngfvk_render_cmd* start; uint32_t count; }; struct ngfvk_reflect_binding_and_stage_mask { SpvReflectDescriptorBinding binding_data; VkPipelineStageFlags mask; }; #pragma endregion #pragma region external_struct_definitions struct ngf_cmd_buffer_t { ngf_frame_token parent_frame; // < The frame this cmd buffer is associated with. VkCommandBuffer vk_cmd_buffer; // < Active vulkan command buffer. VkCommandPool vk_cmd_pool; // < Active vulkan command pool. ngf_graphics_pipeline active_gfx_pipe; // < The bound graphics pipeline. ngf_compute_pipeline active_compute_pipe; // < The bound compute pipeline. ngf_render_target active_rt; // < Active render target. ngf_buffer active_attr_buf; ngf_buffer active_idx_buf; ngfvk_desc_pools_list* desc_pools_list; // < List of descriptor pools used in the buffer's frame. ngfi::chunked_list pending_bind_ops; // < Bind ops to be performed before the next draw. ngfi::chunked_list in_pass_cmd_chnks; ngfi::chunked_list virt_bind_ops_ranges; ngfvk_pending_barrier_list pending_barriers; ngfvk_sync_res_hashtable local_res_states; ngf_render_pass_info pending_render_pass_info; // < describes the active render pass uint32_t npending_bind_ops; uint32_t pending_clear_value_count; ngfi::cmd_buffer_state state; // < State of the cmd buffer (i.e. new/recording/etc.) bool renderpass_active : 1; // < Has an active renderpass. bool compute_pass_active : 1; // < Has an active compute pass. bool xfer_pass_active : 1; // < Has an active transfer pass. bool destroy_on_submit : 1; // < Destroy after submitting. static ngfi::maybe_ngfptr make() noexcept; ~ngf_cmd_buffer_t() noexcept; }; struct ngf_sampler_t { VkSampler vksampler; static ngfi::maybe_ngfptr make(const ngf_sampler_info& info) NGF_NOEXCEPT; ~ngf_sampler_t() NGF_NOEXCEPT; }; struct ngf_buffer_t { ngfvk_alloc alloc; size_t size; size_t mapped_offset; ngfvk_sync_state sync_state; uint64_t hash; uint32_t usage_flags; ngf_buffer_storage_type storage_type; static ngfi::maybe_ngfptr make(const ngf_buffer_info& info) NGF_NOEXCEPT; }; struct ngf_texel_buffer_view_t { VkBufferView vk_buf_view; ngf_buffer buffer; static ngfi::maybe_ngfptr make(const ngf_texel_buffer_view_info& info) NGF_NOEXCEPT; ~ngf_texel_buffer_view_t() NGF_NOEXCEPT; }; struct ngf_image_t { ngfvk_alloc alloc; VkImageView vkview; VkImageView vkview_arrayed; VkFormat vk_fmt; ngf_extent3d extent; ngf_image_type type; ngfvk_sync_state sync_state; uint64_t hash; uint32_t usage_flags; uint32_t nlevels; uint32_t nlayers; static ngfi::maybe_ngfptr make(const ngf_image_info& wrapper_info, ngfvk_alloc&& alloc) NGF_NOEXCEPT; static ngfi::maybe_ngfptr make(const ngf_image_info& wrapper_info) NGF_NOEXCEPT; ~ngf_image_t() NGF_NOEXCEPT; }; struct ngf_image_view_t { VkImageView vk_view; ngf_image src; static ngfi::maybe_ngfptr make(const ngf_image_view_info& info) NGF_NOEXCEPT; ~ngf_image_view_t() NGF_NOEXCEPT; }; struct ngf_context_t { ngfi::unique_ptr swapchain; ngf_swapchain_info swapchain_info; VkSurfaceKHR surface; uint32_t frame_id; uint32_t max_inflight_frames; ngf_frame_token current_frame_token; ngf_attachment_descriptions default_attachment_descriptions_list; ngfi::unique_ptr default_render_target; ngfi::fixed_array frame_res; ngfi::array command_superpools; ngfi::array desc_superpools; ngfi::array renderpass_cache; // Push-constant-compatible with every pipeline layout (all share default_push_constant_range). VkPipelineLayout vk_default_push_layout = VK_NULL_HANDLE; static ngfi::maybe_ngfptr make(const ngf_context_info& info); ~ngf_context_t() noexcept; }; struct ngf_shader_stage_t { VkShaderModule vk_module; VkShaderStageFlagBits vk_stage_bits; SpvReflectShaderModule spv_reflect_module; ngfi::fixed_array entry_point_name; static ngfi::maybe_ngfptr make(const ngf_shader_stage_info& info) NGF_NOEXCEPT; ~ngf_shader_stage_t() NGF_NOEXCEPT; }; struct ngf_render_target_t { VkFramebuffer frame_buffer; VkRenderPass compat_render_pass; uint32_t nattachments; ngfi::fixed_array attachment_descs; ngfi::fixed_array attachment_image_views; /* unused in default RT. */ ngfi::fixed_array attachment_images; /* unused in default RT. */ ngfi::fixed_array attachment_compat_pass_descs; bool is_default; bool have_resolve_attachments; uint32_t width; uint32_t height; static ngfi::maybe_ngfptr make(const ngf_render_target_info& info) NGF_NOEXCEPT; static ngfi::maybe_ngfptr make(uint32_t width, uint32_t height, uint32_t nattachment_descs) NGF_NOEXCEPT; ~ngf_render_target_t() NGF_NOEXCEPT; }; #pragma endregion #pragma region global_vars NGFI_THREADLOCAL ngf_context CURRENT_CONTEXT = NULL; namespace ngfvk { namespace global { ngf_device phys_devices[ngfvk::global::max_phys_dev]; ngfvk_device_info phys_device_infos[ngfvk::global::max_phys_dev]; ngf_device_capabilities phys_device_caps; uint32_t num_phys_devices = 0; } // namespace global } // namespace ngfvk #pragma endregion #pragma region vk_enum_maps static VkFilter get_vk_filter(ngf_sampler_filter filter) { static const VkFilter vkfilters[NGF_FILTER_COUNT] = {VK_FILTER_NEAREST, VK_FILTER_LINEAR}; return vkfilters[filter]; } static VkSamplerAddressMode get_vk_address_mode(ngf_sampler_wrap_mode mode) { static const VkSamplerAddressMode vkmodes[NGF_WRAP_MODE_COUNT] = { VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_SAMPLER_ADDRESS_MODE_REPEAT, VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT}; return vkmodes[mode]; } static VkSamplerMipmapMode get_vk_mipmode(ngf_sampler_filter filter) { static const VkSamplerMipmapMode vkmipmodes[NGF_FILTER_COUNT] = { VK_SAMPLER_MIPMAP_MODE_NEAREST, VK_SAMPLER_MIPMAP_MODE_LINEAR}; return vkmipmodes[filter]; } static VkSampleCountFlagBits get_vk_sample_count(ngf_sample_count sample_count) { switch (sample_count) { case NGF_SAMPLE_COUNT_1: return VK_SAMPLE_COUNT_1_BIT; case NGF_SAMPLE_COUNT_2: return VK_SAMPLE_COUNT_2_BIT; case NGF_SAMPLE_COUNT_4: return VK_SAMPLE_COUNT_4_BIT; case NGF_SAMPLE_COUNT_8: return VK_SAMPLE_COUNT_8_BIT; case NGF_SAMPLE_COUNT_16: return VK_SAMPLE_COUNT_16_BIT; case NGF_SAMPLE_COUNT_32: return VK_SAMPLE_COUNT_32_BIT; case NGF_SAMPLE_COUNT_64: return VK_SAMPLE_COUNT_64_BIT; default: assert(false); // TODO: return error? } return VK_SAMPLE_COUNT_1_BIT; } static VkDescriptorType get_vk_descriptor_type(ngf_descriptor_type type) { static const VkDescriptorType types[NGF_DESCRIPTOR_TYPE_COUNT] = { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, VK_DESCRIPTOR_TYPE_SAMPLER, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR}; return types[type]; } static VkImageType get_vk_image_type(ngf_image_type t) { static const VkImageType types[NGF_IMAGE_TYPE_COUNT] = { VK_IMAGE_TYPE_2D, VK_IMAGE_TYPE_3D, VK_IMAGE_TYPE_2D // In Vulkan cubemaps are treated as array of 2D images. }; return types[t]; } static VkImageViewType get_vk_image_view_type(ngf_image_type t, size_t nlayers) { if (t == NGF_IMAGE_TYPE_IMAGE_2D && nlayers == 1u) { return VK_IMAGE_VIEW_TYPE_2D; } else if (t == NGF_IMAGE_TYPE_IMAGE_2D && nlayers > 1u) { return VK_IMAGE_VIEW_TYPE_2D_ARRAY; } else if (t == NGF_IMAGE_TYPE_IMAGE_3D) { return VK_IMAGE_VIEW_TYPE_3D; } else if (t == NGF_IMAGE_TYPE_CUBE && nlayers == 1u) { return VK_IMAGE_VIEW_TYPE_CUBE; } else if (t == NGF_IMAGE_TYPE_CUBE && nlayers > 1u) { return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY; } else { NGFI_DIAG_ERROR("Invalid image type"); assert(false); return VK_IMAGE_VIEW_TYPE_2D; } } static VkCompareOp get_vk_compare_op(ngf_compare_op op) { static const VkCompareOp ops[NGF_COMPARE_OP_COUNT] = { VK_COMPARE_OP_NEVER, VK_COMPARE_OP_LESS, VK_COMPARE_OP_LESS_OR_EQUAL, VK_COMPARE_OP_EQUAL, VK_COMPARE_OP_GREATER_OR_EQUAL, VK_COMPARE_OP_GREATER, VK_COMPARE_OP_NOT_EQUAL, VK_COMPARE_OP_ALWAYS}; return ops[op]; } static VkStencilOp get_vk_stencil_op(ngf_stencil_op op) { static const VkStencilOp ops[NGF_STENCIL_OP_COUNT] = { VK_STENCIL_OP_KEEP, VK_STENCIL_OP_ZERO, VK_STENCIL_OP_REPLACE, VK_STENCIL_OP_INCREMENT_AND_CLAMP, VK_STENCIL_OP_INCREMENT_AND_WRAP, VK_STENCIL_OP_DECREMENT_AND_CLAMP, VK_STENCIL_OP_DECREMENT_AND_WRAP, VK_STENCIL_OP_INVERT}; return ops[op]; } static VkAttachmentLoadOp get_vk_load_op(ngf_attachment_load_op op) { static const VkAttachmentLoadOp ops[NGF_LOAD_OP_COUNT] = { VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_LOAD_OP_CLEAR}; return ops[op]; } static VkAttachmentStoreOp get_vk_store_op(ngf_attachment_store_op op) { static const VkAttachmentStoreOp ops[NGF_STORE_OP_COUNT] = { VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_STORE_OP_DONT_CARE, }; return ops[op]; } static VkBlendFactor get_vk_blend_factor(ngf_blend_factor f) { static const VkBlendFactor factors[NGF_BLEND_FACTOR_COUNT] = { VK_BLEND_FACTOR_ZERO, VK_BLEND_FACTOR_ONE, VK_BLEND_FACTOR_SRC_COLOR, VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR, VK_BLEND_FACTOR_DST_COLOR, VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR, VK_BLEND_FACTOR_SRC_ALPHA, VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, VK_BLEND_FACTOR_DST_ALPHA, VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA, VK_BLEND_FACTOR_CONSTANT_COLOR, VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR, VK_BLEND_FACTOR_CONSTANT_ALPHA, VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA}; return factors[f]; } static VkBlendOp get_vk_blend_op(ngf_blend_op op) { static const VkBlendOp ops[NGF_BLEND_OP_COUNT] = { VK_BLEND_OP_ADD, VK_BLEND_OP_SUBTRACT, VK_BLEND_OP_REVERSE_SUBTRACT, VK_BLEND_OP_MIN, VK_BLEND_OP_MAX}; return ops[op]; } static VkFormat get_vk_image_format(ngf_image_format f) { static const VkFormat formats[NGF_IMAGE_FORMAT_COUNT] = { VK_FORMAT_R8_UNORM, VK_FORMAT_R8G8_UNORM, VK_FORMAT_R8G8_SNORM, VK_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8_SRGB, VK_FORMAT_R8G8B8A8_SRGB, VK_FORMAT_B8G8R8_UNORM, VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_B8G8R8_SRGB, VK_FORMAT_B8G8R8A8_SRGB, VK_FORMAT_A2B10G10R10_UNORM_PACK32, VK_FORMAT_R32_SFLOAT, VK_FORMAT_R32G32_SFLOAT, VK_FORMAT_R32G32B32_SFLOAT, VK_FORMAT_R32G32B32A32_SFLOAT, VK_FORMAT_R16_SFLOAT, VK_FORMAT_R16G16_SFLOAT, VK_FORMAT_R16G16B16_SFLOAT, VK_FORMAT_R16G16B16A16_SFLOAT, VK_FORMAT_B10G11R11_UFLOAT_PACK32, VK_FORMAT_E5B9G9R9_UFLOAT_PACK32, VK_FORMAT_R16_UNORM, VK_FORMAT_R16_SNORM, VK_FORMAT_R16G16_UNORM, VK_FORMAT_R16G16_SNORM, VK_FORMAT_R16G16B16A16_UNORM, VK_FORMAT_R16G16B16A16_SNORM, VK_FORMAT_R8_UINT, VK_FORMAT_R8_SINT, VK_FORMAT_R16_UINT, VK_FORMAT_R16_SINT, VK_FORMAT_R16G16_UINT, VK_FORMAT_R16G16B16_UINT, VK_FORMAT_R16G16B16A16_UINT, VK_FORMAT_R32_UINT, VK_FORMAT_R32G32_UINT, VK_FORMAT_R32G32B32_UINT, VK_FORMAT_R32G32B32A32_UINT, VK_FORMAT_BC7_UNORM_BLOCK, VK_FORMAT_BC7_SRGB_BLOCK, VK_FORMAT_BC6H_SFLOAT_BLOCK, VK_FORMAT_BC6H_UFLOAT_BLOCK, VK_FORMAT_BC5_UNORM_BLOCK, VK_FORMAT_BC5_SNORM_BLOCK, VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_FORMAT_ASTC_4x4_SRGB_BLOCK, VK_FORMAT_ASTC_5x4_UNORM_BLOCK, VK_FORMAT_ASTC_5x4_SRGB_BLOCK, VK_FORMAT_ASTC_5x5_UNORM_BLOCK, VK_FORMAT_ASTC_5x5_SRGB_BLOCK, VK_FORMAT_ASTC_6x5_UNORM_BLOCK, VK_FORMAT_ASTC_6x5_SRGB_BLOCK, VK_FORMAT_ASTC_6x6_UNORM_BLOCK, VK_FORMAT_ASTC_6x6_SRGB_BLOCK, VK_FORMAT_ASTC_8x5_UNORM_BLOCK, VK_FORMAT_ASTC_8x5_SRGB_BLOCK, VK_FORMAT_ASTC_8x6_UNORM_BLOCK, VK_FORMAT_ASTC_8x6_SRGB_BLOCK, VK_FORMAT_ASTC_8x8_UNORM_BLOCK, VK_FORMAT_ASTC_8x8_SRGB_BLOCK, VK_FORMAT_ASTC_10x5_UNORM_BLOCK, VK_FORMAT_ASTC_10x5_SRGB_BLOCK, VK_FORMAT_ASTC_10x6_UNORM_BLOCK, VK_FORMAT_ASTC_10x6_SRGB_BLOCK, VK_FORMAT_ASTC_10x8_UNORM_BLOCK, VK_FORMAT_ASTC_10x8_SRGB_BLOCK, VK_FORMAT_ASTC_10x10_UNORM_BLOCK, VK_FORMAT_ASTC_10x10_SRGB_BLOCK, VK_FORMAT_ASTC_12x10_UNORM_BLOCK, VK_FORMAT_ASTC_12x10_SRGB_BLOCK, VK_FORMAT_ASTC_12x12_UNORM_BLOCK, VK_FORMAT_ASTC_12x12_SRGB_BLOCK, VK_FORMAT_D32_SFLOAT, VK_FORMAT_D16_UNORM, VK_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_UNDEFINED}; return formats[f]; } static VkPolygonMode get_vk_polygon_mode(ngf_polygon_mode m) { static const VkPolygonMode modes[NGF_POLYGON_MODE_COUNT] = { VK_POLYGON_MODE_FILL, VK_POLYGON_MODE_LINE, VK_POLYGON_MODE_POINT}; return modes[m]; } static VkCullModeFlags get_vk_cull_mode(ngf_cull_mode m) { static const VkCullModeFlagBits modes[NGF_CULL_MODE_COUNT] = { VK_CULL_MODE_BACK_BIT, VK_CULL_MODE_FRONT_BIT, VK_CULL_MODE_FRONT_AND_BACK}; return (VkCullModeFlags)modes[m]; } static VkFrontFace get_vk_front_face(ngf_front_face_mode f) { static const VkFrontFace modes[NGF_FRONT_FACE_COUNT] = { VK_FRONT_FACE_COUNTER_CLOCKWISE, VK_FRONT_FACE_CLOCKWISE}; return modes[f]; } static VkPrimitiveTopology get_vk_primitive_type(ngf_primitive_topology p) { static const VkPrimitiveTopology topos[NGF_PRIMITIVE_TOPOLOGY_COUNT] = { VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, VK_PRIMITIVE_TOPOLOGY_LINE_LIST, VK_PRIMITIVE_TOPOLOGY_LINE_STRIP}; return topos[p]; } static VkFormat get_vk_vertex_format(ngf_type type, uint32_t size, bool norm) { static const VkFormat normalized_formats[4][4] = { {VK_FORMAT_R8_SNORM, VK_FORMAT_R8G8_SNORM, VK_FORMAT_R8G8B8_SNORM, VK_FORMAT_R8G8B8A8_SNORM}, {VK_FORMAT_R8_UNORM, VK_FORMAT_R8G8_UNORM, VK_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8A8_UNORM}, {VK_FORMAT_R16_SNORM, VK_FORMAT_R16G16_SNORM, VK_FORMAT_R16G16B16_SNORM, VK_FORMAT_R16G16B16A16_SNORM}, {VK_FORMAT_R16_UNORM, VK_FORMAT_R16G16_UNORM, VK_FORMAT_R16G16B16_UNORM, VK_FORMAT_R16G16B16A16_UNORM}}; static const VkFormat formats[9][4] = { {VK_FORMAT_R8_SINT, VK_FORMAT_R8G8_SINT, VK_FORMAT_R8G8B8_SINT, VK_FORMAT_R8G8B8A8_SINT}, {VK_FORMAT_R8_UINT, VK_FORMAT_R8G8_UINT, VK_FORMAT_R8G8B8_UINT, VK_FORMAT_R8G8B8A8_UINT}, {VK_FORMAT_R16_SINT, VK_FORMAT_R16G16_SINT, VK_FORMAT_R16G16B16_SINT, VK_FORMAT_R16G16B16A16_SINT}, {VK_FORMAT_R16_UINT, VK_FORMAT_R16G16_UINT, VK_FORMAT_R16G16B16_UINT, VK_FORMAT_R16G16B16A16_UINT}, {VK_FORMAT_R32_SINT, VK_FORMAT_R32G32_SINT, VK_FORMAT_R32G32B32_SINT, VK_FORMAT_R32G32B32A32_SINT}, {VK_FORMAT_R32_UINT, VK_FORMAT_R32G32_UINT, VK_FORMAT_R32G32B32_UINT, VK_FORMAT_R32G32B32A32_UINT}, {VK_FORMAT_R32_SFLOAT, VK_FORMAT_R32G32_SFLOAT, VK_FORMAT_R32G32B32_SFLOAT, VK_FORMAT_R32G32B32A32_SFLOAT}, {VK_FORMAT_R16_SFLOAT, VK_FORMAT_R16G16_SFLOAT, VK_FORMAT_R16G16B16_SFLOAT, VK_FORMAT_R16G16B16A16_SFLOAT}, {VK_FORMAT_R64_SFLOAT, VK_FORMAT_R64G64_SFLOAT, VK_FORMAT_R64G64B64_SFLOAT, VK_FORMAT_R64G64B64A64_SFLOAT}}; if ((size < 1 || size > 4) || (norm && type > NGF_TYPE_UINT16)) { return VK_FORMAT_UNDEFINED; } else if (norm) { return normalized_formats[type][size - 1]; } else { return formats[type][size - 1]; } } static VkVertexInputRate get_vk_input_rate(ngf_vertex_input_rate r) { static const VkVertexInputRate rates[NGF_VERTEX_INPUT_RATE_COUNT] = { VK_VERTEX_INPUT_RATE_VERTEX, VK_VERTEX_INPUT_RATE_INSTANCE}; return rates[r]; } static VkShaderStageFlagBits get_vk_shader_stage(ngf_stage_type s) { static const VkShaderStageFlagBits stages[NGF_STAGE_COUNT] = { VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT, VK_SHADER_STAGE_COMPUTE_BIT}; return stages[s]; } static VkBufferUsageFlags get_vk_buffer_usage(uint32_t usage) { VkBufferUsageFlags flags = 0u; if (usage & NGF_BUFFER_USAGE_XFER_DST) flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT; if (usage & NGF_BUFFER_USAGE_XFER_SRC) flags |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT; if (usage & NGF_BUFFER_USAGE_UNIFORM_BUFFER) flags |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; if (usage & NGF_BUFFER_USAGE_INDEX_BUFFER) flags |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT; if (usage & NGF_BUFFER_USAGE_VERTEX_BUFFER) flags |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; if (usage & NGF_BUFFER_USAGE_TEXEL_BUFFER) flags |= VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT; if (usage & NGF_BUFFER_USAGE_STORAGE_BUFFER) flags |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; if (usage & NGF_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT) flags |= VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT; if (usage & NGF_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT) flags |= VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR; if (usage & NGF_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT) flags |= VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR; return flags; } static VkMemoryPropertyFlags get_vk_memory_flags(ngf_buffer_storage_type s) { switch (s) { case NGF_BUFFER_STORAGE_HOST_READABLE: return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; case NGF_BUFFER_STORAGE_HOST_WRITEABLE: case NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE: return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; case NGF_BUFFER_STORAGE_DEVICE_LOCAL: return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; case NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABLE: return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; case NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_READABLE_WRITEABLE: return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; } return 0; } static VmaAllocatorCreateFlags ngfvk_get_vma_alloc_flags(ngf_buffer_storage_type storage_type) { switch (storage_type) { case NGF_BUFFER_STORAGE_HOST_WRITEABLE: return VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; case NGF_BUFFER_STORAGE_HOST_READABLE: case NGF_BUFFER_STORAGE_HOST_READABLE_WRITEABLE: return VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; case NGF_BUFFER_STORAGE_DEVICE_LOCAL: return 0; case NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_WRITEABLE: return VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT; case NGF_BUFFER_STORAGE_DEVICE_LOCAL_HOST_READABLE_WRITEABLE: return VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT; } return 0; } static VkIndexType get_vk_index_type(ngf_type t) { switch (t) { case NGF_TYPE_UINT16: return VK_INDEX_TYPE_UINT16; case NGF_TYPE_UINT32: return VK_INDEX_TYPE_UINT32; default: return VK_INDEX_TYPE_MAX_ENUM; } } static bool ngfvk_format_is_depth(VkFormat image_format) { return image_format == VK_FORMAT_D16_UNORM || image_format == VK_FORMAT_D16_UNORM_S8_UINT || image_format == VK_FORMAT_D24_UNORM_S8_UINT || image_format == VK_FORMAT_D32_SFLOAT || image_format == VK_FORMAT_D32_SFLOAT_S8_UINT; } static bool ngfvk_format_is_stencil(VkFormat image_format) { return image_format == VK_FORMAT_D24_UNORM_S8_UINT || image_format == VK_FORMAT_D16_UNORM_S8_UINT || image_format == VK_FORMAT_D32_SFLOAT_S8_UINT; } static VkColorSpaceKHR get_vk_color_space(ngf_colorspace colorspace) { static VkColorSpaceKHR color_spaces[NGF_COLORSPACE_COUNT] = { VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT, VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT, VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT, VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT, VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT, VK_COLOR_SPACE_BT2020_LINEAR_EXT, VK_COLOR_SPACE_HDR10_ST2084_EXT}; return color_spaces[colorspace]; } #pragma endregion // vk_enum_maps #pragma region internal_funcs ngf_sample_count ngfi_get_highest_sample_count(size_t counts_bitmap); ngfi::arena& current_frame_res_arena() { return CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena; } // Handler for messages from validation layers, etc. // All messages are forwarded to the user-provided debug callback. static VKAPI_ATTR VkBool32 VKAPI_CALL ngfvk_debug_message_callback( VkDebugUtilsMessageSeverityFlagBitsEXT severity, VkDebugUtilsMessageTypeFlagsEXT, const VkDebugUtilsMessengerCallbackDataEXT* data, void*) { ngf_diagnostic_message_type ngf_msg_type; switch (severity) { case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT: case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT: ngf_msg_type = NGF_DIAGNOSTIC_INFO; break; case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT: ngf_msg_type = NGF_DIAGNOSTIC_WARNING; break; case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT: default: ngf_msg_type = NGF_DIAGNOSTIC_ERROR; break; } if (ngfi_diag_info.callback) { ngfi_diag_info.callback(ngf_msg_type, ngfi_diag_info.userdata, data->pMessage); } return VK_FALSE; } static bool ngfvk_query_presentation_support(VkPhysicalDevice phys_dev, uint32_t queue_family_index) { #if defined(_WIN32) || defined(_WIN64) return vkGetPhysicalDeviceWin32PresentationSupportKHR(phys_dev, queue_family_index); #elif defined(__ANDROID__) return true; // All Android queues surfaces support present. #elif defined(__APPLE__) return true; #else if (_vk.xcb_connection == NULL) { int screen_idx = 0; xcb_screen_t* screen = NULL; xcb_connection_t* connection = xcb_connect(NULL, &screen_idx); const xcb_setup_t* setup = xcb_get_setup(connection); for (xcb_screen_iterator_t it = xcb_setup_roots_iterator(setup); screen_idx >= 0 && it.rem; xcb_screen_next(&it)) { if (screen_idx-- == 0) { screen = it.data; } } assert(screen); _vk.xcb_connection = connection; _vk.xcb_visualid = screen->root_visual; } return vkGetPhysicalDeviceXcbPresentationSupportKHR( phys_dev, queue_family_index, _vk.xcb_connection, _vk.xcb_visualid); #endif } static void ngfvk_reset_renderpass_cache(ngf_context ctx) { for (size_t p = 0; p < ctx->renderpass_cache.size(); ++p) { ctx->frame_res[ctx->frame_id].retire.append(ctx->renderpass_cache[p].renderpass); } ctx->renderpass_cache.clear(); } // Forward declaration for use in ngfvk_retire_resources static void ngfvk_reset_desc_pools_list(ngfvk_desc_pools_list* superpool); static void ngfvk_retire_resources(ngfvk_frame_resources* frame_res) { if (frame_res->nwait_fences > 0u) { VkResult wait_status = VK_SUCCESS; do { wait_status = vkWaitForFences( _vk.device, frame_res->nwait_fences, frame_res->fences, VK_TRUE, 0x3B9ACA00ul); } while (wait_status == VK_TIMEOUT); vkResetFences(_vk.device, frame_res->nwait_fences, frame_res->fences); frame_res->nwait_fences = 0; } // Destroy retired pipelines for (VkPipeline p : frame_res->retire.list()) { vkDestroyPipeline(_vk.device, p, NULL); } frame_res->retire.clear(); // Destroy retired pipeline layouts for (VkPipelineLayout l : frame_res->retire.list()) { vkDestroyPipelineLayout(_vk.device, l, NULL); } frame_res->retire.clear(); // Destroy retired descriptor set layouts for (VkDescriptorSetLayout l : frame_res->retire.list()) { vkDestroyDescriptorSetLayout(_vk.device, l, NULL); } frame_res->retire.clear(); // Free retired command buffers for (const ngfvk_cmd_buf_with_pool& cb : frame_res->retire.list()) { vkFreeCommandBuffers(_vk.device, cb.cmd_pool, 1u, &cb.cmd_buf); } // Reset command pools for (const ngfvk_cmd_buf_with_pool& cb : frame_res->retire.list()) { vkResetCommandPool(_vk.device, cb.cmd_pool, 0); } frame_res->retire.clear(); // Destroy retired framebuffers for (VkFramebuffer fb : frame_res->retire.list()) { vkDestroyFramebuffer(_vk.device, fb, NULL); } frame_res->retire.clear(); // Destroy retired render passes for (VkRenderPass rp : frame_res->retire.list()) { vkDestroyRenderPass(_vk.device, rp, NULL); } frame_res->retire.clear(); // Destroy retired samplers for (ngf_sampler s : frame_res->retire.list()) { NGFI_FREE(s); } frame_res->retire.clear(); // Destroy retired image views for (VkImageView v : frame_res->retire.list()) { vkDestroyImageView(_vk.device, v, nullptr); } frame_res->retire.list().clear(); for (ngf_image_view v : frame_res->retire.list()) { NGFI_FREE(v); } frame_res->retire.list().clear(); // Destroy retired buffer views for (ngf_texel_buffer_view v : frame_res->retire.list()) { NGFI_FREE(v); } frame_res->retire.clear(); // Destroy retired images for (ngf_image img : frame_res->retire.list()) { NGFI_FREE(img); } frame_res->retire.clear(); // Destroy retired buffers for (ngf_buffer buf : frame_res->retire.list()) { NGFI_FREE(buf); } frame_res->retire.clear(); // Reset retired descriptor pool lists for (ngfvk_desc_pools_list* dpl : frame_res->retire.list()) { ngfvk_reset_desc_pools_list(dpl); } frame_res->retire.clear(); } static ngf_error ngfvk_create_desc_superpool(ngfvk_desc_superpool* superpool, uint8_t pools_lists, uint16_t ctx_id) { superpool->ctx_id = ctx_id; superpool->pools_lists = ngfi::fixed_array {pools_lists}; memset(superpool->pools_lists.data(), 0, pools_lists * sizeof(ngfvk_desc_pools_list)); return NGF_ERROR_OK; } static void ngfvk_destroy_desc_superpool(ngfvk_desc_superpool* superpool) { for (auto& pool_list : superpool->pools_lists) { ngfvk_desc_pool* p = pool_list.list; while (p) { vkDestroyDescriptorPool(_vk.device, p->vk_pool, NULL); ngfvk_desc_pool* next = p->next; NGFI_FREE(p); p = next; } } superpool->pools_lists = ngfi::fixed_array {}; } static ngfvk_desc_pools_list* ngfvk_find_desc_pools_list(ngf_frame_token token) { const uint16_t ctx_id = ngfi_frame_ctx_id(token); const uint8_t nframes = ngfi_frame_max_inflight_frames(token); const uint8_t frame_id = ngfi_frame_id(token); ngfvk_desc_superpool* superpool = NULL; for (size_t i = 0; i < CURRENT_CONTEXT->desc_superpools.size(); ++i) { if (CURRENT_CONTEXT->desc_superpools[i].ctx_id == ctx_id) { superpool = &CURRENT_CONTEXT->desc_superpools[i]; break; } } if (superpool == NULL) { ngfvk_desc_superpool new_superpool = { .ctx_id = (uint16_t)~0, .pools_lists = ngfi::fixed_array {}}; CURRENT_CONTEXT->desc_superpools.emplace_back(ngfi::move(new_superpool)); superpool = &CURRENT_CONTEXT->desc_superpools.back(); ngfvk_create_desc_superpool(superpool, nframes, ctx_id); } return &superpool->pools_lists[frame_id]; } static VkDescriptorSet ngfvk_desc_pools_list_allocate_set( ngfvk_desc_pools_list* pools, const ngfvk_desc_set_layout* set_layout) { // Ensure we have an active desriptor pool that is able to service the // request. const bool have_active_pool = (pools->active_pool != NULL); bool fresh_pool_required = !have_active_pool; if (have_active_pool) { // Check if the active descriptor pool can fit the required descriptor // set. ngfvk_desc_pool* pool = pools->active_pool; const ngfvk_desc_pool_capacity* capacity = &pool->capacity; ngfvk_desc_pool_capacity* usage = &pool->utilization; for (unsigned i = 0; !fresh_pool_required && i < NGF_DESCRIPTOR_TYPE_COUNT; ++i) { fresh_pool_required |= (usage->descriptors[i] + set_layout->counts[i] >= capacity->descriptors[i]); } fresh_pool_required |= (usage->sets + 1u >= capacity->sets); } if (fresh_pool_required) { if (!have_active_pool || pools->active_pool->next == NULL) { // TODO: make this tweakable ngfvk_desc_pool_capacity capacity; capacity.sets = 100u; for (int i = 0; i < NGF_DESCRIPTOR_TYPE_COUNT; ++i) capacity.descriptors[i] = 100u; // Prepare descriptor counts. auto vk_pool_sizes = ngfi::tmp_alloc(NGF_DESCRIPTOR_TYPE_COUNT); for (unsigned i = 0; i < NGF_DESCRIPTOR_TYPE_COUNT; ++i) { vk_pool_sizes[i].descriptorCount = capacity.descriptors[i]; vk_pool_sizes[i].type = get_vk_descriptor_type((ngf_descriptor_type)i); } // Prepare a createinfo structure for the new pool. const VkDescriptorPoolCreateInfo vk_pool_ci = { .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, .pNext = NULL, .flags = 0u, .maxSets = capacity.sets, .poolSizeCount = NGF_DESCRIPTOR_TYPE_COUNT, .pPoolSizes = vk_pool_sizes}; // Create the new pool. ngfvk_desc_pool* new_pool = NGFI_ALLOC(ngfvk_desc_pool); new_pool->next = NULL; new_pool->capacity = capacity; memset(&new_pool->utilization, 0, sizeof(new_pool->utilization)); const VkResult vk_pool_create_result = vkCreateDescriptorPool(_vk.device, &vk_pool_ci, NULL, &new_pool->vk_pool); if (vk_pool_create_result == VK_SUCCESS) { if (pools->active_pool != NULL && pools->active_pool->next == NULL) { pools->active_pool->next = new_pool; } else if (pools->active_pool == NULL) { pools->list = new_pool; } else { // shouldn't happen assert(false); } pools->active_pool = new_pool; } else { NGFI_FREE(new_pool); assert(false); } } else { pools->active_pool = pools->active_pool->next; } } // Allocate the new descriptor set from the pool. ngfvk_desc_pool* pool = pools->active_pool; const VkDescriptorSetAllocateInfo vk_desc_set_info = { .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, .pNext = NULL, .descriptorPool = pool->vk_pool, .descriptorSetCount = 1u, .pSetLayouts = &set_layout->vk_handle}; VkDescriptorSet result = VK_NULL_HANDLE; const VkResult desc_set_alloc_result = vkAllocateDescriptorSets(_vk.device, &vk_desc_set_info, &result); if (desc_set_alloc_result != VK_SUCCESS) { return VK_NULL_HANDLE; } // Update usage counters for the active descriptor pool. for (unsigned i = 0; i < NGF_DESCRIPTOR_TYPE_COUNT; ++i) { pool->utilization.descriptors[i] += set_layout->counts[i]; } pool->utilization.sets++; // Bind dummy resources. auto dummy_writes = ngfi::tmp_alloc(set_layout->nall_descs); uint32_t num_writes = 0u; for (uint32_t b = 0u; b < set_layout->binding_properties.size(); ++b) { if (set_layout->binding_properties[b].type == VK_DESCRIPTOR_TYPE_MAX_ENUM) continue; for (uint32_t array_idx = 0u; array_idx < set_layout->binding_properties[b].ndescs_in_binding; ++array_idx) { VkWriteDescriptorSet* desc_w = &dummy_writes[num_writes++]; desc_w->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; desc_w->pNext = NULL; desc_w->descriptorCount = 1u; desc_w->descriptorType = set_layout->binding_properties[b].type; desc_w->dstArrayElement = array_idx; desc_w->dstBinding = b; desc_w->dstSet = result; const bool is_multilayered_image = set_layout->binding_properties[b].is_multilayered_image; const bool is_cubemap = set_layout->binding_properties[b].is_cubemap; switch (desc_w->descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: desc_w->pImageInfo = &_vk.dummy_res.samp_info; break; case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: desc_w->pImageInfo = is_multilayered_image ? &_vk.dummy_res.imgsamp_arr_info : &_vk.dummy_res.imgsamp_info; break; case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: if (!is_cubemap) { desc_w->pImageInfo = is_multilayered_image ? &_vk.dummy_res.img_arr_info : &_vk.dummy_res.img_info; } else { desc_w->pImageInfo = is_multilayered_image ? &_vk.dummy_res.cube_arr_info : &_vk.dummy_res.cube_info; } break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: desc_w->pBufferInfo = &_vk.dummy_res.buf_info; break; case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: desc_w->pTexelBufferView = &_vk.dummy_res.tbuf->vk_buf_view; break; case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: { auto dummy_accel_info = ngfi::tmp_alloc(); dummy_accel_info->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR; dummy_accel_info->pNext = NULL; dummy_accel_info->accelerationStructureCount = 1; dummy_accel_info->pAccelerationStructures = &_vk.dummy_res.dummy_accel_struct; desc_w->pNext = dummy_accel_info; break; } default: assert(false); } } } vkUpdateDescriptorSets(_vk.device, num_writes, dummy_writes, 0, NULL); return result; } static ngf_error ngfvk_create_vk_image_view( VkImage image, VkImageViewType image_type, VkFormat image_format, uint32_t nmips, uint32_t nlayers, VkImageView* result) { const bool is_depth = ngfvk_format_is_depth(image_format); const bool is_stencil = ngfvk_format_is_stencil(image_format); const auto stencil_bit = is_stencil ? VK_IMAGE_ASPECT_STENCIL_BIT : ((VkImageAspectFlagBits)0); const auto aspect_mask = (VkImageAspectFlags)(is_depth ? (VK_IMAGE_ASPECT_DEPTH_BIT | stencil_bit) : VK_IMAGE_ASPECT_COLOR_BIT); const VkImageViewCreateInfo image_view_info = { .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, .pNext = NULL, .flags = 0u, .image = image, .viewType = image_type, .format = image_format, .components = {.r = VK_COMPONENT_SWIZZLE_IDENTITY, .g = VK_COMPONENT_SWIZZLE_IDENTITY, .b = VK_COMPONENT_SWIZZLE_IDENTITY, .a = VK_COMPONENT_SWIZZLE_IDENTITY}, .subresourceRange = { .aspectMask = aspect_mask, .baseMipLevel = 0u, .levelCount = nmips, .baseArrayLayer = 0u, .layerCount = nlayers}}; const VkResult create_view_vkerr = vkCreateImageView(_vk.device, &image_view_info, NULL, result); if (create_view_vkerr != VK_SUCCESS) { return NGF_ERROR_INVALID_OPERATION; } else { return NGF_ERROR_OK; } } static VkResult ngfvk_renderpass_from_attachment_descs( uint32_t nattachments, const ngf_attachment_description* attachment_descs, const ngfvk_attachment_pass_desc* attachment_compat_pass_descs, VkRenderPass* result) { auto vk_attachment_descs = ngfi::tmp_alloc(nattachments); auto vk_color_attachment_refs = ngfi::tmp_alloc(nattachments); auto vk_resolve_attachment_refs = ngfi::tmp_alloc(nattachments); uint32_t ncolor_attachments = 0u; uint32_t nresolve_attachments = 0u; VkAttachmentReference depth_stencil_attachment_ref; bool have_depth_stencil_attachment = false; for (uint32_t a = 0u; a < nattachments; ++a) { const ngf_attachment_description* ngf_attachment_desc = &attachment_descs[a]; const ngfvk_attachment_pass_desc* attachment_pass_desc = &attachment_compat_pass_descs[a]; const bool has_stencil = ngf_attachment_desc->type == NGF_ATTACHMENT_DEPTH_STENCIL; VkAttachmentDescription* vk_attachment_desc = &vk_attachment_descs[a]; vk_attachment_desc->flags = 0u; vk_attachment_desc->format = get_vk_image_format(ngf_attachment_desc->format); vk_attachment_desc->samples = get_vk_sample_count(ngf_attachment_desc->sample_count); vk_attachment_desc->loadOp = attachment_pass_desc->load_op; vk_attachment_desc->storeOp = attachment_pass_desc->store_op; vk_attachment_desc->stencilLoadOp = has_stencil ? attachment_pass_desc->load_op : VK_ATTACHMENT_LOAD_OP_DONT_CARE; vk_attachment_desc->stencilStoreOp = has_stencil ? attachment_pass_desc->store_op : VK_ATTACHMENT_STORE_OP_DONT_CARE; vk_attachment_desc->initialLayout = attachment_pass_desc->layout; vk_attachment_desc->finalLayout = attachment_pass_desc->layout; if (ngf_attachment_desc->type == NGF_ATTACHMENT_COLOR) { if (!attachment_pass_desc->is_resolve) { VkAttachmentReference* vk_color_attachment_reference = &vk_color_attachment_refs[ncolor_attachments++]; vk_color_attachment_reference->attachment = a; vk_color_attachment_reference->layout = attachment_pass_desc->layout; } else { VkAttachmentReference* vk_resolve_attachment_reference = &vk_resolve_attachment_refs[nresolve_attachments++]; vk_resolve_attachment_reference->attachment = a; vk_resolve_attachment_reference->layout = attachment_pass_desc->layout; } } if (ngf_attachment_desc->type == NGF_ATTACHMENT_DEPTH || ngf_attachment_desc->type == NGF_ATTACHMENT_DEPTH_STENCIL) { if (have_depth_stencil_attachment) { // TODO: insert diag. log here return VK_ERROR_UNKNOWN; } else { have_depth_stencil_attachment = true; depth_stencil_attachment_ref.attachment = a; depth_stencil_attachment_ref.layout = attachment_pass_desc->layout; } } } if (nresolve_attachments > 0u && nresolve_attachments != ncolor_attachments) { // TODO: insert diag. log here. return VK_ERROR_UNKNOWN; } const VkSubpassDescription subpass_desc = { .flags = 0u, .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS, .inputAttachmentCount = 0u, .pInputAttachments = NULL, .colorAttachmentCount = ncolor_attachments, .pColorAttachments = vk_color_attachment_refs, .pResolveAttachments = nresolve_attachments > 0u ? vk_resolve_attachment_refs : NULL, .pDepthStencilAttachment = have_depth_stencil_attachment ? &depth_stencil_attachment_ref : NULL, .preserveAttachmentCount = 0u, .pPreserveAttachments = NULL}; const VkRenderPassCreateInfo renderpass_ci = { .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, .pNext = NULL, .flags = 0u, .attachmentCount = nattachments, .pAttachments = vk_attachment_descs, .subpassCount = 1u, .pSubpasses = &subpass_desc, .dependencyCount = 0u, .pDependencies = NULL}; return vkCreateRenderPass(_vk.device, &renderpass_ci, NULL, result); } static inline uint64_t ngfvk_ptr_hash(void* data) { uint64_t mmh3_out[2] = {0, 0}; ngfi::detail::mmh3_x64_128(reinterpret_cast(data), 0x9e3779b9, mmh3_out); return mmh3_out[0] ^ mmh3_out[1]; } ngfi::maybe_ngfptr ngf_context_t::make(const ngf_context_info& info) { auto ctx = ngfi::unique_ptr::make(); if (!ctx) { return NGF_ERROR_OUT_OF_MEM; } ngf_error err = NGF_ERROR_OK; VkResult vk_err = VK_SUCCESS; const ngf_swapchain_info* swapchain_info = info.swapchain_info; // Create swapchain if necessary. if (swapchain_info != NULL) { // Begin by creating the window surface. #if defined(_WIN32) || defined(_WIN64) const VkWin32SurfaceCreateInfoKHR surface_info = { .sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR, .pNext = NULL, .flags = 0, .hinstance = GetModuleHandle(NULL), .hwnd = (HWND)swapchain_info->native_handle}; #elif defined(__ANDROID__) const VkAndroidSuraceCreateInfoKHR surface_info = { .sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, .pNext = NULL, .flags = 0, .window = swapchain_info->native_handle}; #elif defined(__APPLE__) const VkMetalSurfaceCreateInfoEXT surface_info = { .sType = VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT, .pNext = NULL, .flags = 0, .pLayer = (const CAMetalLayer*)ngfvk_create_ca_metal_layer(swapchain_info)}; #else const VkXcbSurfaceCreateInfoKHR surface_info = { .sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR, .pNext = NULL, .flags = 0, .connection = _vk.xcb_connection, .window = (xcb_window_t)swapchain_info->native_handle}; #endif vk_err = VK_CREATE_SURFACE_FN(_vk.instance, &surface_info, NULL, &ctx->surface); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } VkBool32 surface_supported = false; vkGetPhysicalDeviceSurfaceSupportKHR( _vk.phys_dev, _vk.present_family_idx, ctx->surface, &surface_supported); if (!surface_supported) { return NGF_ERROR_OBJECT_CREATION_FAILED; } // Create the default rendertarget object. const bool default_rt_has_depth = swapchain_info->depth_format != NGF_IMAGE_FORMAT_UNDEFINED; const bool default_rt_is_multisampled = (unsigned int)swapchain_info->sample_count > 1u; const bool default_rt_no_stencil = swapchain_info->depth_format == NGF_IMAGE_FORMAT_DEPTH32 || swapchain_info->depth_format == NGF_IMAGE_FORMAT_DEPTH16; const uint32_t nattachment_descs = 1u + (default_rt_has_depth ? 1u : 0u) + (default_rt_is_multisampled ? 1u : 0u); ctx->default_render_target = ngfi::move( ngf_render_target_t::make(swapchain_info->width, swapchain_info->height, nattachment_descs) .value()); uint32_t attachment_desc_idx = 0u; ngf_attachment_description* color_attachment_desc = &ctx->default_render_target->attachment_descs[attachment_desc_idx]; color_attachment_desc->format = swapchain_info->color_format; color_attachment_desc->sample_count = swapchain_info->sample_count; color_attachment_desc->type = NGF_ATTACHMENT_COLOR; color_attachment_desc->is_resolve = false; ngfvk_attachment_pass_desc* color_attachment_pass_desc = &ctx->default_render_target->attachment_compat_pass_descs[attachment_desc_idx]; color_attachment_pass_desc->layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; color_attachment_pass_desc->is_resolve = false; color_attachment_pass_desc->load_op = VK_ATTACHMENT_LOAD_OP_CLEAR; color_attachment_pass_desc->store_op = VK_ATTACHMENT_STORE_OP_DONT_CARE; if (default_rt_has_depth) { ++attachment_desc_idx; ngf_attachment_description* depth_attachment_desc = &ctx->default_render_target->attachment_descs[attachment_desc_idx]; depth_attachment_desc->format = swapchain_info->depth_format; depth_attachment_desc->sample_count = swapchain_info->sample_count; depth_attachment_desc->type = default_rt_no_stencil ? NGF_ATTACHMENT_DEPTH : NGF_ATTACHMENT_DEPTH_STENCIL; depth_attachment_desc->is_resolve = false; ngfvk_attachment_pass_desc* depth_attachment_pass_desc = &ctx->default_render_target->attachment_compat_pass_descs[attachment_desc_idx]; depth_attachment_pass_desc->layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; depth_attachment_pass_desc->is_resolve = false; depth_attachment_pass_desc->load_op = VK_ATTACHMENT_LOAD_OP_CLEAR; depth_attachment_pass_desc->store_op = VK_ATTACHMENT_STORE_OP_DONT_CARE; } if (default_rt_is_multisampled) { ++attachment_desc_idx; ngf_attachment_description* resolve_attachment_desc = &ctx->default_render_target->attachment_descs[attachment_desc_idx]; resolve_attachment_desc->format = swapchain_info->color_format; resolve_attachment_desc->sample_count = NGF_SAMPLE_COUNT_1; resolve_attachment_desc->type = NGF_ATTACHMENT_COLOR; resolve_attachment_desc->is_resolve = true; ngfvk_attachment_pass_desc* resolve_attachment_pass_desc = &ctx->default_render_target->attachment_compat_pass_descs[attachment_desc_idx]; resolve_attachment_pass_desc->layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; resolve_attachment_pass_desc->is_resolve = true; resolve_attachment_pass_desc->load_op = VK_ATTACHMENT_LOAD_OP_CLEAR; resolve_attachment_pass_desc->store_op = VK_ATTACHMENT_STORE_OP_DONT_CARE; ctx->default_render_target->have_resolve_attachments = true; } ngfvk_renderpass_from_attachment_descs( nattachment_descs, ctx->default_render_target->attachment_descs.data(), ctx->default_render_target->attachment_compat_pass_descs.data(), &ctx->default_render_target->compat_render_pass); // Create the swapchain itself. auto maybe_swapchain = ngfvk_swapchain::make(*swapchain_info, ctx->default_render_target.get(), ctx->surface); if (maybe_swapchain.has_error()) return maybe_swapchain.error(); ctx->swapchain = ngfi::move(maybe_swapchain.value()); if (err != NGF_ERROR_OK) { return err; } ctx->swapchain_info = *swapchain_info; } else { ctx->default_render_target = NULL; } // Create frame resource holders. const uint32_t max_inflight_frames = swapchain_info ? ctx->swapchain->nimgs : 3u; ctx->max_inflight_frames = max_inflight_frames; ctx->frame_res = ngfi::fixed_array {max_inflight_frames}; if (ctx->frame_res.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; } for (uint32_t f = 0u; f < max_inflight_frames; ++f) { ctx->frame_res[f].res_frame_arena.set_block_size(1024); ctx->frame_res[f].submitted_cmd_bufs.reserve(8u); const VkFenceCreateInfo fence_info = { .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, .pNext = NULL, .flags = 0u}; ctx->frame_res[f].nwait_fences = 0; for (uint32_t i = 0u; i < sizeof(ctx->frame_res[f].fences) / sizeof(VkFence); ++i) { vk_err = vkCreateFence(_vk.device, &fence_info, NULL, &ctx->frame_res[f].fences[i]); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } } } ctx->frame_id = 0u; ctx->current_frame_token = ~0u; ctx->command_superpools.reserve(3); ctx->desc_superpools.reserve(3); ctx->renderpass_cache.reserve(8); { const VkPipelineLayoutCreateInfo default_push_layout_info = { .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, .pNext = NULL, .flags = 0u, .setLayoutCount = 0u, .pSetLayouts = NULL, .pushConstantRangeCount = 1u, .pPushConstantRanges = &ngfvk::global::default_push_constant_range}; vk_err = vkCreatePipelineLayout( _vk.device, &default_push_layout_info, NULL, &ctx->vk_default_push_layout); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } } return ngfi::move(ctx); } ngf_context_t::~ngf_context_t() noexcept { vkDeviceWaitIdle(_vk.device); if (vk_default_push_layout != VK_NULL_HANDLE) { vkDestroyPipelineLayout(_vk.device, vk_default_push_layout, NULL); } if (default_render_target) { swapchain = ngfi::unique_ptr {}; // swapchain must be destroyed before surface. if (surface != VK_NULL_HANDLE) { vkDestroySurfaceKHR(_vk.instance, surface, NULL); } } default_render_target = ngfi::unique_ptr {}; // explicitly destroy default RT here. for (ngfvk_frame_resources& fr : frame_res) { ngfvk_retire_resources(&fr); for (uint32_t i = 0u; i < sizeof(fr.fences) / sizeof(VkFence); ++i) { vkDestroyFence(_vk.device, fr.fences[i], NULL); } } for (size_t p = 0; p < desc_superpools.size(); ++p) { ngfvk_destroy_desc_superpool(&desc_superpools[p]); } ngfvk_reset_renderpass_cache(this); if (CURRENT_CONTEXT == this) CURRENT_CONTEXT = nullptr; } ngfi::maybe_ngfptr ngf_render_target_t::make(const ngf_render_target_info& info) NGF_NOEXCEPT { auto rt = ngfi::unique_ptr::make(); if (!rt) return NGF_ERROR_OUT_OF_MEM; uint32_t ncolor_attachments = 0u; uint32_t nresolve_attachments = 0u; for (uint32_t a = 0u; a < info.attachment_descriptions->ndescs; ++a) { if (info.attachment_descriptions->descs[a].type == NGF_ATTACHMENT_COLOR) { if (info.attachment_descriptions->descs[a].is_resolve) { ++nresolve_attachments; } else { ++ncolor_attachments; } } } if (nresolve_attachments > 0 && ncolor_attachments != nresolve_attachments) { NGFI_DIAG_ERROR("the same number of resolve and color attachments must be provided"); return NGF_ERROR_INVALID_OPERATION; } ngfi::fixed_array vk_attachment_pass_descs { info.attachment_descriptions->ndescs}; ngfi::fixed_array attachment_views {info.attachment_descriptions->ndescs}; ngfi::fixed_array attachment_images {info.attachment_descriptions->ndescs}; for (uint32_t a = 0u; a < info.attachment_descriptions->ndescs; ++a) { const ngf_attachment_description* ngf_attachment_desc = &info.attachment_descriptions->descs[a]; ngfvk_attachment_pass_desc* attachment_pass_desc = &vk_attachment_pass_descs[a]; const ngf_attachment_type attachment_type = ngf_attachment_desc->type; rt->have_resolve_attachments |= ngf_attachment_desc->is_resolve; switch (attachment_type) { case NGF_ATTACHMENT_COLOR: attachment_pass_desc->layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; break; case NGF_ATTACHMENT_DEPTH: case NGF_ATTACHMENT_DEPTH_STENCIL: attachment_pass_desc->layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; break; default: assert(false); } const ngf_image_ref* attachment_img_ref = &info.attachment_image_refs[a]; const ngf_image attachment_img = attachment_img_ref->image; attachment_pass_desc->is_resolve = ngf_attachment_desc->is_resolve; // These are needed just to create a compatible render pass, load/store ops don't affect // render pass compatibility. const ngf_attachment_load_op load_op = NGF_LOAD_OP_DONTCARE; const ngf_attachment_store_op store_op = NGF_STORE_OP_DONTCARE; attachment_pass_desc->load_op = get_vk_load_op(load_op); attachment_pass_desc->store_op = get_vk_store_op(store_op); const bool attachment_is_cubemap = attachment_img_ref->image->type == NGF_IMAGE_TYPE_CUBE; const VkImageAspectFlags subresource_aspect_flags = (attachment_type == NGF_ATTACHMENT_COLOR ? VK_IMAGE_ASPECT_COLOR_BIT : 0u) | (attachment_type == NGF_ATTACHMENT_DEPTH ? VK_IMAGE_ASPECT_DEPTH_BIT : 0u) | (attachment_type == NGF_ATTACHMENT_DEPTH_STENCIL ? VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT : 0u); const VkImageViewCreateInfo image_view_create_info = { .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, .pNext = NULL, .flags = 0u, .image = (VkImage)attachment_img->alloc.obj_handle, .viewType = VK_IMAGE_VIEW_TYPE_2D, .format = attachment_img->vk_fmt, .components = { .r = VK_COMPONENT_SWIZZLE_IDENTITY, .g = VK_COMPONENT_SWIZZLE_IDENTITY, .b = VK_COMPONENT_SWIZZLE_IDENTITY, .a = VK_COMPONENT_SWIZZLE_IDENTITY, }, .subresourceRange = { .aspectMask = subresource_aspect_flags, .baseMipLevel = attachment_img_ref->mip_level, .levelCount = 1u, .baseArrayLayer = attachment_is_cubemap ? 6u * attachment_img_ref->layer + attachment_img_ref->cubemap_face : attachment_img_ref->layer, .layerCount = 1u, }}; VkResult vk_err = vkCreateImageView(_vk.device, &image_view_create_info, NULL, &attachment_views[a]); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } attachment_images[a] = attachment_img; } rt->attachment_image_views = ngfi::move(attachment_views); rt->attachment_images = ngfi::move(attachment_images); const VkResult renderpass_create_result = ngfvk_renderpass_from_attachment_descs( info.attachment_descriptions->ndescs, info.attachment_descriptions->descs, vk_attachment_pass_descs.data(), &rt->compat_render_pass); if (renderpass_create_result != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } rt->width = info.attachment_image_refs[0].image->extent.width; rt->height = info.attachment_image_refs[0].image->extent.height; rt->nattachments = info.attachment_descriptions->ndescs; rt->attachment_descs = ngfi::fixed_array {rt->nattachments}; rt->attachment_compat_pass_descs = ngfi::move(vk_attachment_pass_descs); memcpy( &rt->attachment_descs[0], info.attachment_descriptions->descs, sizeof(rt->attachment_descs[0]) * info.attachment_descriptions->ndescs); // Create a framebuffer. const VkFramebufferCreateInfo fb_info = { .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, .pNext = NULL, .flags = 0u, .renderPass = rt->compat_render_pass, .attachmentCount = info.attachment_descriptions->ndescs, .pAttachments = rt->attachment_image_views.data(), .width = rt->width, .height = rt->height, .layers = 1u}; VkResult vk_err = vkCreateFramebuffer(_vk.device, &fb_info, NULL, &rt->frame_buffer); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } return rt; } ngfi::maybe_ngfptr ngf_render_target_t::make(uint32_t width, uint32_t height, uint32_t nattachment_descs) NGF_NOEXCEPT { auto rt = ngfi::unique_ptr::make(); if (!rt) return NGF_ERROR_OUT_OF_MEM; rt->is_default = true; rt->width = width; rt->height = height; rt->frame_buffer = VK_NULL_HANDLE; rt->nattachments = nattachment_descs; rt->attachment_descs = ngfi::fixed_array {nattachment_descs}; rt->attachment_compat_pass_descs = ngfi::fixed_array {nattachment_descs}; return rt; } ngf_render_target_t::~ngf_render_target_t() NGF_NOEXCEPT { if (CURRENT_CONTEXT) { ngfvk_frame_resources* res = &CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id]; if (!is_default) { if (frame_buffer != VK_NULL_HANDLE) { res->retire.append(frame_buffer); } } if (compat_render_pass != VK_NULL_HANDLE) { res->retire.append(compat_render_pass); } for (VkImageView v : attachment_image_views) { res->retire.append(v); } // clear out the entire renderpass cache to make sure the entries associated // with this target don't stick around. // TODO: clear out all caches across all contexts. ngfvk_reset_renderpass_cache(CURRENT_CONTEXT); } } ngfi::maybe_ngfptr ngf_texel_buffer_view_t::make(const ngf_texel_buffer_view_info& info) NGF_NOEXCEPT { auto buf_view = ngfi::unique_ptr::make(); if (!buf_view) return NGF_ERROR_OUT_OF_MEM; const VkBufferViewCreateInfo vk_buf_view_ci = { .sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO, .pNext = NULL, .flags = 0u, .buffer = (VkBuffer)info.buffer->alloc.obj_handle, .format = get_vk_image_format(info.texel_format), .offset = info.offset, .range = info.size}; const VkResult vk_result = vkCreateBufferView(_vk.device, &vk_buf_view_ci, NULL, &buf_view->vk_buf_view); if (vk_result != VK_SUCCESS) return NGF_ERROR_OBJECT_CREATION_FAILED; buf_view->buffer = info.buffer; return buf_view; } ngf_texel_buffer_view_t::~ngf_texel_buffer_view_t() NGF_NOEXCEPT { vkDestroyBufferView(_vk.device, vk_buf_view, nullptr); } ngfi::maybe_ngfptr ngfvk_generic_pipeline::make(const ngf_graphics_pipeline_info& info) NGF_NOEXCEPT { ngfi::tmp_arena().reset(); auto pipeline = ngfi::unique_ptr::make(); if (!pipeline) return NGF_ERROR_OUT_OF_MEM; VkPipelineShaderStageCreateInfo vk_shader_stages[5]; if (info.nshader_stages > 5) return NGF_ERROR_OBJECT_CREATION_FAILED; ngf_error err = pipeline->common_init( info.spec_info, vk_shader_stages, info.shader_stages, info.nshader_stages); if (err != NGF_ERROR_OK) return err; // Prepare vertex input. auto vk_binding_descs = ngfi::tmp_alloc(info.input_info->nvert_buf_bindings); auto vk_attrib_descs = ngfi::tmp_alloc(info.input_info->nattribs); if ((vk_binding_descs == nullptr && info.input_info->nvert_buf_bindings > 0) || (vk_attrib_descs == nullptr && info.input_info->nattribs > 0)) { return NGF_ERROR_OUT_OF_MEM; } for (uint32_t i = 0u; i < info.input_info->nvert_buf_bindings; ++i) { VkVertexInputBindingDescription* vk_binding_desc = &vk_binding_descs[i]; const ngf_vertex_buf_binding_desc* binding_desc = &info.input_info->vert_buf_bindings[i]; vk_binding_desc->binding = binding_desc->binding; vk_binding_desc->stride = binding_desc->stride; vk_binding_desc->inputRate = get_vk_input_rate(binding_desc->input_rate); } for (uint32_t i = 0u; i < info.input_info->nattribs; ++i) { VkVertexInputAttributeDescription* vk_attrib_desc = &vk_attrib_descs[i]; const ngf_vertex_attrib_desc* attrib_desc = &info.input_info->attribs[i]; vk_attrib_desc->location = attrib_desc->location; vk_attrib_desc->binding = attrib_desc->binding; vk_attrib_desc->offset = attrib_desc->offset; vk_attrib_desc->format = get_vk_vertex_format(attrib_desc->type, attrib_desc->size, attrib_desc->normalized); } VkPipelineVertexInputStateCreateInfo vertex_input = { .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, .pNext = NULL, .flags = 0u, .vertexBindingDescriptionCount = info.input_info->nvert_buf_bindings, .pVertexBindingDescriptions = vk_binding_descs, .vertexAttributeDescriptionCount = info.input_info->nattribs, .pVertexAttributeDescriptions = vk_attrib_descs}; // Prepare input assembly. VkPipelineInputAssemblyStateCreateInfo input_assembly = { .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, .pNext = NULL, .flags = 0u, .topology = get_vk_primitive_type(info.input_assembly_info->primitive_topology), .primitiveRestartEnable = info.input_assembly_info->enable_primitive_restart}; // Prepare tessellation state. VkPipelineTessellationStateCreateInfo tess = { .sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, .pNext = NULL, .flags = 0u, .patchControlPoints = 1u}; // Prepare viewport/scissor state. const VkViewport dummy_viewport = {.x = .0f, .y = .0f, .width = .0f, .height = .0f, .minDepth = .0f, .maxDepth = .0f}; const VkRect2D dummy_scissor = {.offset = {.x = 0, .y = 0}, .extent = {.width = 0, .height = 0}}; VkPipelineViewportStateCreateInfo viewport_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, .pNext = NULL, .flags = 0u, .viewportCount = 1u, .pViewports = &dummy_viewport, .scissorCount = 1u, .pScissors = &dummy_scissor}; // Prepare rasterization state. VkPipelineRasterizationStateCreateInfo rasterization = { .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, .pNext = NULL, .flags = 0u, .depthClampEnable = VK_FALSE, .rasterizerDiscardEnable = info.rasterization->discard, .polygonMode = get_vk_polygon_mode(info.rasterization->polygon_mode), .cullMode = get_vk_cull_mode(info.rasterization->cull_mode), .frontFace = get_vk_front_face(info.rasterization->front_face), .depthBiasEnable = info.rasterization->enable_depth_bias ? VK_TRUE : VK_FALSE, .depthBiasConstantFactor = 0.0f, .depthBiasClamp = 0.0f, .depthBiasSlopeFactor = 0.0f, .lineWidth = 1.0f}; // Prepare multisampling. VkPipelineMultisampleStateCreateInfo multisampling = { .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, .pNext = NULL, .flags = 0u, .rasterizationSamples = get_vk_sample_count(info.multisample->sample_count), .sampleShadingEnable = VK_FALSE, .minSampleShading = 0.0f, .pSampleMask = NULL, .alphaToCoverageEnable = info.multisample->alpha_to_coverage ? VK_TRUE : VK_FALSE, .alphaToOneEnable = VK_FALSE}; // Prepare depth/stencil. VkPipelineDepthStencilStateCreateInfo depth_stencil = { .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, .pNext = NULL, .flags = 0u, .depthTestEnable = info.depth_stencil->depth_test, .depthWriteEnable = info.depth_stencil->depth_write, .depthCompareOp = get_vk_compare_op(info.depth_stencil->depth_compare), .depthBoundsTestEnable = VK_FALSE, .stencilTestEnable = info.depth_stencil->stencil_test, .front = {.failOp = get_vk_stencil_op(info.depth_stencil->front_stencil.fail_op), .passOp = get_vk_stencil_op(info.depth_stencil->front_stencil.pass_op), .depthFailOp = get_vk_stencil_op(info.depth_stencil->front_stencil.depth_fail_op), .compareOp = get_vk_compare_op(info.depth_stencil->front_stencil.compare_op), .compareMask = info.depth_stencil->front_stencil.compare_mask, .writeMask = info.depth_stencil->front_stencil.write_mask, .reference = info.depth_stencil->front_stencil.reference}, .back = {.failOp = get_vk_stencil_op(info.depth_stencil->back_stencil.fail_op), .passOp = get_vk_stencil_op(info.depth_stencil->back_stencil.pass_op), .depthFailOp = get_vk_stencil_op(info.depth_stencil->back_stencil.depth_fail_op), .compareOp = get_vk_compare_op(info.depth_stencil->back_stencil.compare_op), .compareMask = info.depth_stencil->back_stencil.compare_mask, .writeMask = info.depth_stencil->back_stencil.write_mask, .reference = info.depth_stencil->back_stencil.reference}, .minDepthBounds = 0.0f, .maxDepthBounds = 1.0f}; uint32_t ncolor_attachments = 0u; for (uint32_t i = 0; i < info.compatible_rt_attachment_descs->ndescs; ++i) { if (info.compatible_rt_attachment_descs->descs[i].type == NGF_ATTACHMENT_COLOR && !info.compatible_rt_attachment_descs->descs[i].is_resolve) ++ncolor_attachments; } // Prepare blend state. VkPipelineColorBlendAttachmentState blend_states[16]; memset(blend_states, 0, sizeof(blend_states)); for (size_t i = 0u; i < ncolor_attachments; ++i) { if (info.color_attachment_blend_states) { const ngf_blend_info* blend = &info.color_attachment_blend_states[i]; const VkPipelineColorBlendAttachmentState attachment_blend_state = { .blendEnable = blend->enable, .srcColorBlendFactor = blend->enable ? get_vk_blend_factor(blend->src_color_blend_factor) : VK_BLEND_FACTOR_ONE, .dstColorBlendFactor = blend->enable ? get_vk_blend_factor(blend->dst_color_blend_factor) : VK_BLEND_FACTOR_ZERO, .colorBlendOp = blend->enable ? get_vk_blend_op(blend->blend_op_color) : VK_BLEND_OP_ADD, .srcAlphaBlendFactor = blend->enable ? get_vk_blend_factor(blend->src_alpha_blend_factor) : VK_BLEND_FACTOR_ONE, .dstAlphaBlendFactor = blend->enable ? get_vk_blend_factor(blend->dst_alpha_blend_factor) : VK_BLEND_FACTOR_ZERO, .alphaBlendOp = blend->enable ? get_vk_blend_op(blend->blend_op_alpha) : VK_BLEND_OP_ADD, .colorWriteMask = (VkColorComponentFlags)(((blend->color_write_mask & NGF_COLOR_MASK_WRITE_BIT_R) ? VK_COLOR_COMPONENT_R_BIT : 0) | ((blend->color_write_mask & NGF_COLOR_MASK_WRITE_BIT_G) ? VK_COLOR_COMPONENT_G_BIT : 0) | ((blend->color_write_mask & NGF_COLOR_MASK_WRITE_BIT_B) ? VK_COLOR_COMPONENT_B_BIT : 0) | ((blend->color_write_mask & NGF_COLOR_MASK_WRITE_BIT_A) ? VK_COLOR_COMPONENT_A_BIT : 0))}; blend_states[i] = attachment_blend_state; } else { blend_states[i].blendEnable = VK_FALSE; blend_states[i].colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; } } if (ncolor_attachments >= NGFI_ARRAYSIZE(blend_states)) { NGFI_DIAG_ERROR("too many attachments specified"); return NGF_ERROR_OBJECT_CREATION_FAILED; } VkPipelineColorBlendStateCreateInfo color_blend = { .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, .pNext = NULL, .flags = 0u, .logicOpEnable = VK_FALSE, .logicOp = VK_LOGIC_OP_SET, .attachmentCount = ncolor_attachments, .pAttachments = blend_states, .blendConstants = {info.blend_consts[0], info.blend_consts[1], info.blend_consts[2], info.blend_consts[3]}}; // Dynamic state. const VkDynamicState dynamic_states[] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR, VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_DEPTH_BIAS}; const uint32_t ndynamic_states = NGFI_ARRAYSIZE(dynamic_states); VkPipelineDynamicStateCreateInfo dynamic_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, .pNext = NULL, .flags = 0u, .dynamicStateCount = ndynamic_states, .pDynamicStates = dynamic_states}; // Create a compatible render pass object. auto attachment_compat_pass_descs = ngfi::tmp_alloc(info.compatible_rt_attachment_descs->ndescs); for (uint32_t i = 0u; i < info.compatible_rt_attachment_descs->ndescs; ++i) { attachment_compat_pass_descs[i].load_op = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attachment_compat_pass_descs[i].store_op = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachment_compat_pass_descs[i].is_resolve = info.compatible_rt_attachment_descs->descs[i].is_resolve; attachment_compat_pass_descs[i].layout = VK_IMAGE_LAYOUT_GENERAL; } VkResult vk_err = ngfvk_renderpass_from_attachment_descs( info.compatible_rt_attachment_descs->ndescs, info.compatible_rt_attachment_descs->descs, attachment_compat_pass_descs, &pipeline->compat_render_pass); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } // Create required pipeline. const VkGraphicsPipelineCreateInfo vk_pipeline_info = { .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, .pNext = NULL, .flags = 0u, .stageCount = info.nshader_stages, .pStages = vk_shader_stages, .pVertexInputState = &vertex_input, .pInputAssemblyState = &input_assembly, .pTessellationState = &tess, .pViewportState = &viewport_state, .pRasterizationState = &rasterization, .pMultisampleState = &multisampling, .pDepthStencilState = &depth_stencil, .pColorBlendState = &color_blend, .pDynamicState = &dynamic_state, .layout = pipeline->vk_pipeline_layout, .renderPass = pipeline->compat_render_pass, .subpass = 0u, .basePipelineHandle = VK_NULL_HANDLE, .basePipelineIndex = -1}; vk_err = vkCreateGraphicsPipelines( _vk.device, VK_NULL_HANDLE, 1u, &vk_pipeline_info, NULL, &pipeline->vk_pipeline); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } return pipeline; } ngfi::maybe_ngfptr ngfvk_generic_pipeline::make(const ngf_compute_pipeline_info& info) NGF_NOEXCEPT { ngfi::tmp_arena().reset(); auto pipeline = ngfi::unique_ptr::make(); if (!pipeline) return NGF_ERROR_OUT_OF_MEM; VkPipelineShaderStageCreateInfo vk_shader_stage {}; ngf_error err = pipeline->common_init(info.spec_info, &vk_shader_stage, &info.shader_stage, 1u); if (err != NGF_ERROR_OK) return err; const VkComputePipelineCreateInfo vk_pipeline_ci = { .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, .pNext = NULL, .flags = 0, .stage = vk_shader_stage, .layout = pipeline->vk_pipeline_layout, .basePipelineHandle = VK_NULL_HANDLE, .basePipelineIndex = -1}; VkResult vk_err = vkCreateComputePipelines( _vk.device, VK_NULL_HANDLE, 1, &vk_pipeline_ci, NULL, &pipeline->vk_pipeline); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } return pipeline; } static int ngfvk_binding_comparator(const void* a, const void* b) { auto a_binding = (const ngfvk_reflect_binding_and_stage_mask*)a; auto b_binding = (const ngfvk_reflect_binding_and_stage_mask*)b; if (a_binding->binding_data.set < b_binding->binding_data.set) return -1; else if (a_binding->binding_data.set == b_binding->binding_data.set) { if (a_binding->binding_data.binding < b_binding->binding_data.binding) return -1; else if (a_binding->binding_data.binding == b_binding->binding_data.binding) return 0; } return 1; } static ngf_descriptor_type ngfvk_get_ngf_descriptor_type(SpvReflectDescriptorType spv_reflect_type) { switch (spv_reflect_type) { case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER: return NGF_DESCRIPTOR_UNIFORM_BUFFER; case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE: return NGF_DESCRIPTOR_IMAGE; case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER: return NGF_DESCRIPTOR_SAMPLER; case SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: return NGF_DESCRIPTOR_IMAGE_AND_SAMPLER; case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: return NGF_DESCRIPTOR_TEXEL_BUFFER; case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER: return NGF_DESCRIPTOR_STORAGE_BUFFER; case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE: return NGF_DESCRIPTOR_STORAGE_IMAGE; case SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: return NGF_DESCRIPTOR_ACCELERATION_STRUCTURE; default: return NGF_DESCRIPTOR_TYPE_COUNT; } } ngf_error ngfvk_generic_pipeline::common_init( const ngf_specialization_info* spec_info, VkPipelineShaderStageCreateInfo* vk_shader_stages, const ngf_shader_stage* shader_stages, uint32_t nshader_stages) NGF_NOEXCEPT { if (spec_info) { auto spec_map_entries = ngfi::tmp_alloc(spec_info->nspecializations); vk_spec_info.pData = spec_info->value_buffer; vk_spec_info.mapEntryCount = spec_info->nspecializations; vk_spec_info.pMapEntries = spec_map_entries; size_t total_data_size = 0u; for (size_t i = 0; i < spec_info->nspecializations; ++i) { VkSpecializationMapEntry* vk_specialization = &spec_map_entries[i]; const ngf_constant_specialization* specialization = &spec_info->specializations[i]; vk_specialization->constantID = specialization->constant_id; vk_specialization->offset = specialization->offset; size_t specialization_size = 0u; switch (specialization->type) { case NGF_TYPE_INT8: case NGF_TYPE_UINT8: specialization_size = 1u; break; case NGF_TYPE_INT16: case NGF_TYPE_UINT16: case NGF_TYPE_HALF_FLOAT: specialization_size = 2u; break; case NGF_TYPE_INT32: case NGF_TYPE_UINT32: case NGF_TYPE_FLOAT: specialization_size = 4u; break; case NGF_TYPE_DOUBLE: specialization_size = 8u; break; default: assert(false); } vk_specialization->size = specialization_size; total_data_size += specialization_size; } vk_spec_info.dataSize = total_data_size; } for (uint32_t s = 0u; s < nshader_stages; ++s) { const ngf_shader_stage stage = shader_stages[s]; vk_shader_stages[s].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; vk_shader_stages[s].pNext = NULL; vk_shader_stages[s].flags = 0u; vk_shader_stages[s].stage = stage->vk_stage_bits; vk_shader_stages[s].module = stage->vk_module; vk_shader_stages[s].pName = stage->entry_point_name.data(), vk_shader_stages[s].pSpecializationInfo = &vk_spec_info; } descriptor_set_layouts.reserve(4); // Extract and dedupe all descriptor bindings. uint32_t ntotal_bindings = 0u; for (uint32_t i = 0u; i < nshader_stages; ++i) { ntotal_bindings += shader_stages[i]->spv_reflect_module.descriptor_binding_count; } auto bindings = ngfi::tmp_alloc(ntotal_bindings); uint32_t bindings_offset = 0u; for (uint32_t i = 0u; i < nshader_stages; ++i) { const SpvReflectShaderModule* spv_module = &shader_stages[i]->spv_reflect_module; const uint32_t binding_count = spv_module->descriptor_binding_count; for (size_t j = bindings_offset; j < bindings_offset + binding_count; ++j) { bindings[j].binding_data = spv_module->descriptor_bindings[j - bindings_offset]; switch (spv_module->entry_points[0].shader_stage) { case SPV_REFLECT_SHADER_STAGE_VERTEX_BIT: bindings[j].mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT; break; case SPV_REFLECT_SHADER_STAGE_FRAGMENT_BIT: bindings[j].mask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; break; case SPV_REFLECT_SHADER_STAGE_COMPUTE_BIT: bindings[j].mask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; break; default: assert(false); break; } } bindings_offset += binding_count; } qsort( bindings, ntotal_bindings, sizeof(ngfvk_reflect_binding_and_stage_mask), ngfvk_binding_comparator); const uint32_t last_binding_idx = ntotal_bindings > 0 ? ntotal_bindings - 1u : 0u; const uint32_t max_set_id = ntotal_bindings > 0 ? bindings[last_binding_idx].binding_data.set : 0u; const uint32_t nall_sets = ntotal_bindings > 0 ? max_set_id + 1u : 0u; auto nall_bindings_per_set = ngfi::tmp_alloc(nall_sets); memset(nall_bindings_per_set, 0, nall_sets * sizeof(nall_bindings_per_set[0])); uint32_t nunique_bindings = 0u; for (uint32_t cur = 0u; cur < ntotal_bindings; ++cur) { const ngfvk_reflect_binding_and_stage_mask* cur_binding = &bindings[cur]; ngfvk_reflect_binding_and_stage_mask* last_unique_binding = nunique_bindings == 0 ? NULL : &bindings[nunique_bindings - 1]; const SpvReflectDescriptorBinding* last_unique_binding_data = !last_unique_binding ? NULL : &last_unique_binding->binding_data; const SpvReflectDescriptorBinding* cur_binding_data = &cur_binding->binding_data; if (!last_unique_binding_data || (last_unique_binding_data->set != cur_binding_data->set || last_unique_binding_data->binding != cur_binding_data->binding)) { bindings[nunique_bindings++] = *cur_binding; nall_bindings_per_set[cur_binding_data->set] = NGFI_MAX(nall_bindings_per_set[cur_binding_data->set], cur_binding_data->binding + 1u); } else { last_unique_binding->mask |= cur_binding->mask; } } // Create descriptor set layouts. auto vk_set_layouts = ngfi::tmp_alloc(max_set_id + 1); uint32_t last_set_id = ~0u; for (uint32_t cur = 0u; cur < nunique_bindings;) { ngfvk_desc_set_layout set_layout; memset((void*)&set_layout, 0, sizeof(set_layout)); const uint32_t current_set_id = bindings[cur].binding_data.set; if (last_set_id == ~0u || current_set_id - last_set_id > 1u) { // there is a gap in descriptor sets, fill it in with empty layouts; for (uint32_t i = last_set_id == ~0u ? 0u : last_set_id + 1; i < current_set_id; ++i) { const VkDescriptorSetLayoutCreateInfo vk_ds_info = { .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, .pNext = NULL, .flags = 0u, .bindingCount = 0u, .pBindings = NULL}; vkCreateDescriptorSetLayout(_vk.device, &vk_ds_info, NULL, &set_layout.vk_handle); vk_set_layouts[i] = set_layout.vk_handle; descriptor_set_layouts.emplace_back(ngfi::move(set_layout)); } } const uint32_t nall_bindings = nall_bindings_per_set[bindings[cur].binding_data.set]; if (nall_bindings > 0u) { set_layout.binding_properties = ngfi::fixed_array {nall_bindings}; for (size_t i = 0u; i < nall_bindings; ++i) { set_layout.binding_properties[i].type = VK_DESCRIPTOR_TYPE_MAX_ENUM; } memset( set_layout.binding_properties.data(), 0, sizeof(ngfvk_desc_binding) * set_layout.binding_properties.size()); } const uint32_t first_binding_in_set = cur; while (cur < nunique_bindings && current_set_id == bindings[cur].binding_data.set) cur++; const uint32_t nbindings_in_set = cur - first_binding_in_set; auto vk_descriptor_bindings = ngfi::tmp_alloc(nbindings_in_set); for (uint32_t i = first_binding_in_set; i < cur; ++i) { VkDescriptorSetLayoutBinding* vk_d = &vk_descriptor_bindings[i - first_binding_in_set]; const SpvReflectDescriptorBinding* d = &bindings[i].binding_data; const ngf_descriptor_type ngf_desc_type = ngfvk_get_ngf_descriptor_type(d->descriptor_type); if (ngf_desc_type == NGF_DESCRIPTOR_TYPE_COUNT) { return NGF_ERROR_OBJECT_CREATION_FAILED; } vk_d->binding = d->binding; vk_d->descriptorCount = d->count; vk_d->descriptorType = get_vk_descriptor_type(ngf_desc_type); vk_d->stageFlags = VK_SHADER_STAGE_ALL; vk_d->pImmutableSamplers = NULL; const ngfvk_desc_binding binding_properties = { .type = vk_d->descriptorType, .stage_accessors = bindings[i].mask, .readonly = ((d->block.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE) != 0), .is_multilayered_image = (d->image.arrayed != 0), .is_cubemap = (d->image.dim == SpvDimCube), .ndescs_in_binding = vk_d->descriptorCount}; set_layout.binding_properties[d->binding] = binding_properties; set_layout.counts[ngf_desc_type]++; set_layout.nall_descs += vk_d->descriptorCount; } const VkDescriptorSetLayoutCreateInfo vk_ds_info = { .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, .pNext = NULL, .flags = 0u, .bindingCount = nbindings_in_set, .pBindings = vk_descriptor_bindings}; const VkResult vk_err = vkCreateDescriptorSetLayout(_vk.device, &vk_ds_info, NULL, &set_layout.vk_handle); vk_set_layouts[current_set_id] = set_layout.vk_handle; descriptor_set_layouts.emplace_back(ngfi::move(set_layout)); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } last_set_id = current_set_id; } // Pipeline layout. const uint32_t ndescriptor_sets = static_cast(descriptor_set_layouts.size()); const VkPipelineLayoutCreateInfo vk_pipeline_layout_info = { .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, .pNext = NULL, .flags = 0u, .setLayoutCount = ndescriptor_sets, .pSetLayouts = vk_set_layouts, .pushConstantRangeCount = 1u, .pPushConstantRanges = &ngfvk::global::default_push_constant_range}; const VkResult vk_err = vkCreatePipelineLayout(_vk.device, &vk_pipeline_layout_info, NULL, &vk_pipeline_layout); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } return NGF_ERROR_OK; } ngfvk_generic_pipeline::~ngfvk_generic_pipeline() NGF_NOEXCEPT { auto res = &CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id]; if (vk_pipeline != VK_NULL_HANDLE) { res->retire.append(vk_pipeline); } if (vk_pipeline_layout != VK_NULL_HANDLE) { res->retire.append(vk_pipeline_layout); } for (size_t l = 0; l < descriptor_set_layouts.size(); ++l) { ngfvk_desc_set_layout* layout = &descriptor_set_layouts[l]; VkDescriptorSetLayout vk_layout = layout->vk_handle; res->retire.append(vk_layout); } if (compat_render_pass != VK_NULL_HANDLE) res->retire.append(compat_render_pass); } ngfi::maybe_ngfptr ngf_shader_stage_t::make(const ngf_shader_stage_info& info) NGF_NOEXCEPT { auto stage = ngfi::unique_ptr::make(); if (!stage) return NGF_ERROR_OUT_OF_MEM; VkShaderModuleCreateInfo vk_sm_info = { .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, .pNext = NULL, .flags = 0u, .codeSize = (info.content_length), .pCode = (uint32_t*)info.content}; VkResult vkerr = vkCreateShaderModule(_vk.device, &vk_sm_info, NULL, &stage->vk_module); if (vkerr != VK_SUCCESS) return NGF_ERROR_OBJECT_CREATION_FAILED; const SpvReflectResult spverr = spvReflectCreateShaderModule(info.content_length, info.content, &stage->spv_reflect_module); if (spverr != SPV_REFLECT_RESULT_SUCCESS) return NGF_ERROR_OBJECT_CREATION_FAILED; stage->vk_stage_bits = get_vk_shader_stage(info.type); size_t entry_point_name_length = strlen(info.entry_point_name) + 1u; stage->entry_point_name = ngfi::fixed_array {entry_point_name_length}; strncpy(stage->entry_point_name.data(), info.entry_point_name, entry_point_name_length); return stage; } ngf_shader_stage_t::~ngf_shader_stage_t() NGF_NOEXCEPT { if (vk_module != VK_NULL_HANDLE) { vkDestroyShaderModule(_vk.device, vk_module, NULL); spvReflectDestroyShaderModule(&spv_reflect_module); } } ngfi::maybe_ngfptr ngf_buffer_t::make(const ngf_buffer_info& info) NGF_NOEXCEPT { auto a = ngfvk_alloc::make(info); if (a.has_error()) { return a.error(); } auto buf = ngfi::unique_ptr::make(); if (!buf) return NGF_ERROR_OUT_OF_MEM; buf->alloc = ngfi::move(a.value()); buf->size = info.size; buf->storage_type = info.storage_type; buf->usage_flags = info.buffer_usage; buf->hash = ngfvk_ptr_hash(buf.get()); memset(&buf->sync_state, 0, sizeof(buf->sync_state)); buf->sync_state.layout = VK_IMAGE_LAYOUT_UNDEFINED; return buf; } ngfi::maybe_ngfptr ngf_image_view_t::make(const ngf_image_view_info& info) NGF_NOEXCEPT { auto view = ngfi::unique_ptr::make(); if (!view) return NGF_ERROR_OUT_OF_MEM; const VkImageViewCreateInfo vk_view_info = { .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, .pNext = NULL, .flags = 0u, .image = (VkImage)info.src_image->alloc.obj_handle, .viewType = get_vk_image_view_type(info.view_type, info.nlayers), .format = get_vk_image_format(info.view_format), .components = {.r = VK_COMPONENT_SWIZZLE_R, .g = VK_COMPONENT_SWIZZLE_G, .b = VK_COMPONENT_SWIZZLE_B, .a = VK_COMPONENT_SWIZZLE_A}, .subresourceRange = { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = info.base_mip_level, .levelCount = info.nmips, .baseArrayLayer = info.base_layer, .layerCount = info.nlayers}}; const VkResult vk_err = vkCreateImageView(_vk.device, &vk_view_info, NULL, &view->vk_view); if (vk_err != VK_SUCCESS) return NGF_ERROR_OBJECT_CREATION_FAILED; view->src = info.src_image; return view; } ngf_image_view_t::~ngf_image_view_t() NGF_NOEXCEPT { vkDestroyImageView(_vk.device, vk_view, nullptr); } ngfi::maybe_ngfptr ngf_image_t::make(const ngf_image_info& info, ngfvk_alloc&& alloc) NGF_NOEXCEPT { auto result = ngfi::unique_ptr::make(); const bool is_cubemap = info.type == NGF_IMAGE_TYPE_CUBE; result->alloc = ngfi::move(alloc); result->extent.width = NGFI_MAX(1, info.extent.width); result->extent.height = NGFI_MAX(1, info.extent.height); result->extent.depth = NGFI_MAX(1, info.extent.depth); result->nlayers = info.nlayers * (is_cubemap ? 6u : 1u); result->nlevels = info.nmips; result->type = info.type; result->usage_flags = info.usage_hint; result->vk_fmt = get_vk_image_format(info.format); memset(&result->sync_state, 0, sizeof(result->sync_state)); result->sync_state.layout = VK_IMAGE_LAYOUT_UNDEFINED; result->hash = ngfvk_ptr_hash(result.get()); ngf_error err = NGF_ERROR_OK; if (result->alloc.vma_alloc) { err = ngfvk_create_vk_image_view( (VkImage)result->alloc.obj_handle, get_vk_image_view_type(info.type, info.nlayers), result->vk_fmt, result->nlevels, result->nlayers, &result->vkview); if (err != NGF_ERROR_OK) return err; err = ngfvk_create_vk_image_view( (VkImage)result->alloc.obj_handle, get_vk_image_view_type(info.type, 2u), // force _ARRAY type view result->vk_fmt, result->nlevels, result->nlayers, &result->vkview_arrayed); if (err != NGF_ERROR_OK) return err; } else { result->vkview = result->vkview_arrayed = VK_NULL_HANDLE; } return result; } ngfi::maybe_ngfptr ngf_image_t::make(const ngf_image_info& info) NGF_NOEXCEPT { auto maybe_alloc = ngfvk_alloc::make(info); if (maybe_alloc.has_error()) return maybe_alloc.error(); return ngf_image_t::make(info, ngfi::move(maybe_alloc.value())); } ngf_image_t::~ngf_image_t() noexcept { if (vkview) { vkDestroyImageView(_vk.device, vkview, NULL); } if (vkview_arrayed) { vkDestroyImageView(_vk.device, vkview_arrayed, NULL); } } ngfi::value_or_ngferr ngfvk_alloc::make(const ngf_image_info& info) NGF_NOEXCEPT { const bool is_sampled_from = info.usage_hint & NGF_IMAGE_USAGE_SAMPLE_FROM; const bool is_storage = info.usage_hint & NGF_IMAGE_USAGE_STORAGE; const bool is_xfer_dst = info.usage_hint & NGF_IMAGE_USAGE_XFER_DST; const bool is_xfer_src = info.usage_hint & NGF_IMAGE_USAGE_XFER_SRC; const bool is_attachment = info.usage_hint & NGF_IMAGE_USAGE_ATTACHMENT; const bool enable_auto_mips = info.usage_hint & NGF_IMAGE_USAGE_MIPMAP_GENERATION; const bool is_transient = info.usage_hint & ngfvk::global::img_usage_transient_attachment; const bool is_depth_stencil = info.format == NGF_IMAGE_FORMAT_DEPTH16 || info.format == NGF_IMAGE_FORMAT_DEPTH32 || info.format == NGF_IMAGE_FORMAT_DEPTH24_STENCIL8; const VkImageUsageFlagBits attachment_usage_bits = is_depth_stencil ? VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT : VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; const auto usage_flags = (VkImageUsageFlags)((is_sampled_from ? VK_IMAGE_USAGE_SAMPLED_BIT : 0u) | (is_storage ? VK_IMAGE_USAGE_STORAGE_BIT : 0u) | (is_attachment ? attachment_usage_bits : 0u) | (is_transient ? VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT : 0) | (is_xfer_dst ? VK_IMAGE_USAGE_TRANSFER_DST_BIT : 0u) | (is_xfer_src ? VK_IMAGE_USAGE_TRANSFER_SRC_BIT : 0u) | (enable_auto_mips ? (VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT) : 0u)); const bool is_cubemap = info.type == NGF_IMAGE_TYPE_CUBE; const VkFormat vk_image_format = get_vk_image_format(info.format); const VkImageType vk_image_type = get_vk_image_type(info.type); const VkImageCreateFlags create_flags = is_cubemap ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u; VkImageFormatProperties dummy_props; const bool optimal_tiling_supported = vkGetPhysicalDeviceImageFormatProperties( _vk.phys_dev, vk_image_format, vk_image_type, VK_IMAGE_TILING_OPTIMAL, usage_flags, create_flags, &dummy_props) == VK_SUCCESS; const VkImageCreateInfo vk_image_info = { .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, .pNext = NULL, .flags = create_flags, .imageType = vk_image_type, .format = vk_image_format, .extent = {.width = info.extent.width, .height = info.extent.height, .depth = info.extent.depth}, .mipLevels = info.nmips, .arrayLayers = info.nlayers * (!is_cubemap ? 1u : 6u), .samples = get_vk_sample_count(info.sample_count), .tiling = optimal_tiling_supported ? VK_IMAGE_TILING_OPTIMAL : VK_IMAGE_TILING_LINEAR, .usage = usage_flags, .sharingMode = VK_SHARING_MODE_EXCLUSIVE, .queueFamilyIndexCount = 0, .pQueueFamilyIndices = NULL, .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED}; VmaAllocationCreateInfo vma_alloc_info = { .flags = 0u, .usage = VMA_MEMORY_USAGE_GPU_ONLY, .requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, .preferredFlags = 0u, .memoryTypeBits = 0u, .pool = VK_NULL_HANDLE, .pUserData = (void*)0x1}; VkImage img; VmaAllocation alloc; const VkResult vk_err = vmaCreateImage( _vk.allocator, &vk_image_info, &vma_alloc_info, (VkImage*)&img, &alloc, nullptr); if (vk_err == VK_SUCCESS) { ngfvk_alloc result; result.obj_handle = (uintptr_t)img; result.vma_alloc = alloc; return result; } else { return NGF_ERROR_OBJECT_CREATION_FAILED; } } ngfi::value_or_ngferr ngfvk_alloc::make(const ngf_buffer_info& info) NGF_NOEXCEPT { if (info.buffer_usage == 0u) { NGFI_DIAG_ERROR("Buffer usage not specified."); return NGF_ERROR_INVALID_OPERATION; } if (info.storage_type > NGF_BUFFER_STORAGE_DEVICE_LOCAL && !ngfvk::global::phys_device_caps.device_local_memory_is_host_visible) { NGFI_DIAG_ERROR("Host-visible device-local storage requested, but not supported."); return NGF_ERROR_INVALID_OPERATION; } const VkBufferUsageFlags vk_usage_flags = get_vk_buffer_usage(info.buffer_usage); const VkMemoryPropertyFlags vk_mem_flags = get_vk_memory_flags(info.storage_type); const bool vk_mem_is_host_visible = vk_mem_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; const VmaMemoryUsage vma_usage_flags = info.storage_type >= NGF_BUFFER_STORAGE_DEVICE_LOCAL ? VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE : VMA_MEMORY_USAGE_AUTO_PREFER_HOST; const VkBufferCreateInfo buf_vk_info = { .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .pNext = NULL, .flags = 0u, .size = info.size, .usage = vk_usage_flags, .sharingMode = VK_SHARING_MODE_EXCLUSIVE, .queueFamilyIndexCount = 0, .pQueueFamilyIndices = NULL}; const VmaAllocationCreateInfo buf_alloc_info = { .flags = ngfvk_get_vma_alloc_flags(info.storage_type), .usage = vma_usage_flags, .requiredFlags = vk_mem_flags, .preferredFlags = 0u, .memoryTypeBits = 0u, .pool = VK_NULL_HANDLE, .pUserData = NULL}; VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo alloc_info {}; const VkResult vkresult = vmaCreateBuffer(_vk.allocator, &buf_vk_info, &buf_alloc_info, &buf, &alloc, &alloc_info); if (vkresult == VK_SUCCESS) { ngfvk_alloc result {}; result.obj_handle = (uintptr_t)buf; result.vma_alloc = alloc; result.mapped_data = vk_mem_is_host_visible ? alloc_info.pMappedData : nullptr; return result; } else { return NGF_ERROR_OBJECT_CREATION_FAILED; } } ngfvk_alloc& ngfvk_alloc::operator=(ngfvk_alloc&& other) NGF_NOEXCEPT { destroy(); obj_handle = other.obj_handle; other.obj_handle = 0; vma_alloc = other.vma_alloc; other.vma_alloc = VK_NULL_HANDLE; mapped_data = other.mapped_data; other.mapped_data = nullptr; return *this; } void ngfvk_alloc::destroy() NGF_NOEXCEPT { if (vma_alloc) { VmaAllocationInfo alloc_info {}; vmaGetAllocationInfo(_vk.allocator, vma_alloc, &alloc_info); if (alloc_info.pUserData) { vmaDestroyImage(_vk.allocator, (VkImage)obj_handle, vma_alloc); } else { vmaDestroyBuffer(_vk.allocator, (VkBuffer)obj_handle, vma_alloc); } } } static ngf_error ngfvk_maybe_acquire_swapchain_image() { if (CURRENT_CONTEXT->swapchain && CURRENT_CONTEXT->swapchain->vk_swapchain != VK_NULL_HANDLE) { if (CURRENT_CONTEXT->swapchain->image_idx == ngfvk::global::invalid_idx) { const VkResult acquire_result = vkAcquireNextImageKHR( _vk.device, CURRENT_CONTEXT->swapchain->vk_swapchain, UINT64_MAX, CURRENT_CONTEXT->swapchain->acquire_sems[CURRENT_CONTEXT->frame_id], VK_NULL_HANDLE, &CURRENT_CONTEXT->swapchain->image_idx); if (acquire_result == VK_SUBOPTIMAL_KHR) { NGFI_DIAG_WARNING("suboptimal swapchain configuration reported by vulkan"); } else if (acquire_result != VK_SUCCESS) { NGFI_DIAG_ERROR("failed to acquire swapchain image"); return NGF_ERROR_INVALID_OPERATION; } } return NGF_ERROR_OK; } else { return NGF_ERROR_INVALID_OPERATION; } } ngfvk_swapchain::~ngfvk_swapchain() noexcept { vkDeviceWaitIdle(_vk.device); for (VkSemaphore sem : acquire_sems) { if (sem != VK_NULL_HANDLE) { vkDestroySemaphore(_vk.device, sem, nullptr); } } for (VkSemaphore sem : submit_sems) { if (sem != VK_NULL_HANDLE) { vkDestroySemaphore(_vk.device, sem, nullptr); } } for (VkFramebuffer fb : framebufs) { if (fb != VK_NULL_HANDLE) vkDestroyFramebuffer(_vk.device, fb, nullptr); } for (VkImageView view : multisample_img_views) { if (view != VK_NULL_HANDLE) vkDestroyImageView(_vk.device, view, nullptr); } if (vk_swapchain != VK_NULL_HANDLE) { vkDestroySwapchainKHR(_vk.device, vk_swapchain, nullptr); } if (depth_img) { ngf_destroy_image(depth_img); } } ngfi::maybe_ngfptr ngfvk_swapchain::make( const ngf_swapchain_info& swapchain_info, ngf_render_target rt, VkSurfaceKHR surface) noexcept { ngf_error err = NGF_ERROR_OK; VkResult vk_err = VK_SUCCESS; VkPresentModeKHR present_mode = VK_PRESENT_MODE_FIFO_KHR; auto swapchain = ngfi::unique_ptr::make(); // Check available present modes and fall back on FIFO if the requested // present mode is not supported. uint32_t npresent_modes = 0u; vkGetPhysicalDeviceSurfacePresentModesKHR(_vk.phys_dev, surface, &npresent_modes, nullptr); ngfi::fixed_array present_modes {npresent_modes}; vkGetPhysicalDeviceSurfacePresentModesKHR( _vk.phys_dev, surface, &npresent_modes, present_modes.data()); static const VkPresentModeKHR modes[] = {VK_PRESENT_MODE_FIFO_KHR, VK_PRESENT_MODE_IMMEDIATE_KHR}; const VkPresentModeKHR requested_present_mode = modes[swapchain_info.present_mode]; for (uint32_t p = 0u; p < npresent_modes; ++p) { if (present_modes[p] == requested_present_mode) { present_mode = present_modes[p]; break; } } // Check if the requested surface format is valid. uint32_t nformats = 0u; vkGetPhysicalDeviceSurfaceFormatsKHR(_vk.phys_dev, surface, &nformats, nullptr); ngfi::fixed_array formats {nformats}; assert(formats.data()); vkGetPhysicalDeviceSurfaceFormatsKHR(_vk.phys_dev, surface, &nformats, formats.data()); const VkFormat requested_format = get_vk_image_format(swapchain_info.color_format); if (!(nformats == 1 && formats[0].format == VK_FORMAT_UNDEFINED)) { bool found = false; for (size_t f = 0; !found && f < nformats; ++f) { found = formats[f].format == requested_format; } if (!found) { NGFI_DIAG_ERROR("Invalid swapchain image format requested."); return NGF_ERROR_INVALID_FORMAT; } } // Determine min/max extents. VkSurfaceCapabilitiesKHR surface_caps; vkGetPhysicalDeviceSurfaceCapabilitiesKHR(_vk.phys_dev, surface, &surface_caps); const VkExtent2D min_surface_extent = surface_caps.minImageExtent; const VkExtent2D max_surface_extent = surface_caps.maxImageExtent; // Determine if we should use exclusive or concurrent sharing mode for // swapchain images. const bool exclusive_sharing = _vk.gfx_family_idx == _vk.present_family_idx; const VkSharingMode sharing_mode = exclusive_sharing ? VK_SHARING_MODE_EXCLUSIVE : VK_SHARING_MODE_CONCURRENT; const uint32_t num_sharing_queue_families = exclusive_sharing ? 0 : 2; const uint32_t sharing_queue_families[] = {_vk.gfx_family_idx, _vk.present_family_idx}; // Determine usage flags. const auto storage_bit = (VkImageUsageFlagBits)(swapchain_info.enable_compute_access ? VK_IMAGE_USAGE_STORAGE_BIT : 0); const auto usage_mask = (VkImageUsageFlags)(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | storage_bit); // Create swapchain. const VkSwapchainCreateInfoKHR vk_sc_info = { .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, .pNext = NULL, .flags = 0, .surface = surface, .minImageCount = swapchain_info.capacity_hint, .imageFormat = requested_format, .imageColorSpace = get_vk_color_space(swapchain_info.colorspace), .imageExtent = {.width = NGFI_MIN( max_surface_extent.width, NGFI_MAX(min_surface_extent.width, swapchain_info.width)), .height = NGFI_MIN( max_surface_extent.height, NGFI_MAX(min_surface_extent.height, swapchain_info.height))}, .imageArrayLayers = 1, .imageUsage = usage_mask, .imageSharingMode = sharing_mode, .queueFamilyIndexCount = num_sharing_queue_families, .pQueueFamilyIndices = sharing_queue_families, .preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR, .compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR, .presentMode = present_mode}; vk_err = vkCreateSwapchainKHR(_vk.device, &vk_sc_info, NULL, &swapchain->vk_swapchain); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } // Obtain swapchain images. vk_err = vkGetSwapchainImagesKHR(_vk.device, swapchain->vk_swapchain, &swapchain->nimgs, nullptr); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } swapchain->imgs = ngfi::fixed_array {swapchain->nimgs}; if (swapchain->imgs.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; } vk_err = vkGetSwapchainImagesKHR( _vk.device, swapchain->vk_swapchain, &swapchain->nimgs, swapchain->imgs.data()); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } // Create "wrapper" ngf_image objects for swapchain images. swapchain->wrapper_imgs = ngfi::fixed_array> {swapchain->nimgs}; if (swapchain->wrapper_imgs.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; } const ngf_image_info wrapper_image_info = { .type = NGF_IMAGE_TYPE_IMAGE_2D, .extent = {.width = swapchain_info.width, .height = swapchain_info.height, .depth = 1}, .nmips = 1u, .nlayers = 1u, .format = swapchain_info.color_format, .sample_count = NGF_SAMPLE_COUNT_1, .usage_hint = NGF_IMAGE_USAGE_ATTACHMENT}; for (size_t i = 0u; i < swapchain->nimgs; ++i) { auto wrap_img = ngf_image_t::make( wrapper_image_info, ngfi::move(ngfvk_alloc::wrap(swapchain->imgs[i]).value())); if (wrap_img.has_error()) return wrap_img.error(); swapchain->wrapper_imgs[i] = ngfi::move(wrap_img.value()); } // Create multisampled images, if necessary. const bool is_multisampled = (unsigned int)swapchain_info.sample_count > 1u; if (is_multisampled) { const ngf_image_info ms_image_info = { .type = NGF_IMAGE_TYPE_IMAGE_2D, .extent = {.width = swapchain_info.width, .height = swapchain_info.height, .depth = 1u}, .nmips = 1u, .nlayers = 1u, .format = swapchain_info.color_format, .sample_count = swapchain_info.sample_count, .usage_hint = NGF_IMAGE_USAGE_ATTACHMENT | ngfvk::global::img_usage_transient_attachment, }; swapchain->multisample_imgs = ngfi::fixed_array> {swapchain->nimgs}; if (swapchain->multisample_imgs.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; } for (size_t i = 0u; i < swapchain->nimgs; ++i) { auto maybe_ms_alloc = ngfvk_alloc::make(ms_image_info); if (maybe_ms_alloc.has_error()) { return maybe_ms_alloc.error(); } auto maybe_ms_img = ngf_image_t::make(ms_image_info, ngfi::move(maybe_ms_alloc.value())); if (maybe_ms_img.has_error()) { return maybe_ms_img.error(); } swapchain->multisample_imgs[i] = ngfi::move(maybe_ms_img.value()); } // Create image views for multisample images. swapchain->multisample_img_views = ngfi::fixed_array {swapchain->nimgs}; if (swapchain->multisample_img_views.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; } for (uint32_t i = 0u; i < swapchain->nimgs; ++i) { err = ngfvk_create_vk_image_view( (VkImage)swapchain->multisample_imgs[i]->alloc.obj_handle, VK_IMAGE_VIEW_TYPE_2D, requested_format, 1u, 1u, &swapchain->multisample_img_views[i]); if (err != NGF_ERROR_OK) { return err; } } } // Create image views for swapchain images. for (uint32_t i = 0u; i < swapchain->nimgs; ++i) { err = ngfvk_create_vk_image_view( swapchain->imgs[i], VK_IMAGE_VIEW_TYPE_2D, requested_format, 1u, 1u, &swapchain->wrapper_imgs[i]->vkview); if (err != NGF_ERROR_OK) { return err; } } // Create an image for the depth attachment if necessary. const bool have_depth_attachment = swapchain_info.depth_format != NGF_IMAGE_FORMAT_UNDEFINED; if (have_depth_attachment) { const ngf_image_info depth_image_info = { .type = NGF_IMAGE_TYPE_IMAGE_2D, .extent = {.width = swapchain_info.width, .height = swapchain_info.height, .depth = 1u}, .nmips = 1u, .nlayers = 1u, .format = swapchain_info.depth_format, .sample_count = swapchain_info.sample_count, .usage_hint = NGF_IMAGE_USAGE_ATTACHMENT | (is_multisampled ? ngfvk::global::img_usage_transient_attachment : 0u)}; err = ngf_create_image(&depth_image_info, &swapchain->depth_img); if (err != NGF_ERROR_OK) { return err; } } else { swapchain->depth_img = nullptr; } // Create framebuffers for swapchain images. swapchain->framebufs = ngfi::fixed_array {swapchain->nimgs}; if (swapchain->framebufs.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; } const bool have_resolve_attachment = (unsigned int)swapchain_info.sample_count > 1u; const uint32_t depth_stencil_attachment_idx = swapchain->depth_img ? 1u : VK_ATTACHMENT_UNUSED; const uint32_t resolve_attachment_idx = have_resolve_attachment ? (swapchain->depth_img ? 2u : 1u) : VK_ATTACHMENT_UNUSED; const uint32_t nattachments = rt->nattachments; for (uint32_t f = 0u; f < swapchain->nimgs; ++f) { VkImageView attachment_views[3] {}; attachment_views[0] = is_multisampled ? swapchain->multisample_img_views[f] : swapchain->wrapper_imgs[f]->vkview; if (depth_stencil_attachment_idx != VK_ATTACHMENT_UNUSED) { attachment_views[depth_stencil_attachment_idx] = swapchain->depth_img->vkview; } if (resolve_attachment_idx != VK_ATTACHMENT_UNUSED) { attachment_views[resolve_attachment_idx] = swapchain->wrapper_imgs[f]->vkview; } const VkFramebufferCreateInfo fb_info = { .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, .pNext = NULL, .flags = 0u, .renderPass = rt->compat_render_pass, .attachmentCount = nattachments, .pAttachments = attachment_views, .width = swapchain_info.width, .height = swapchain_info.height, .layers = 1u}; vk_err = vkCreateFramebuffer(_vk.device, &fb_info, NULL, &swapchain->framebufs[f]); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } } // Create semaphores to be signaled when a swapchain image is acquired, // and when a swapchain image is ready to be presented. swapchain->acquire_sems = ngfi::fixed_array {swapchain->nimgs}; swapchain->submit_sems = ngfi::fixed_array { swapchain->nimgs}; if (swapchain->acquire_sems.data() == nullptr || swapchain->submit_sems.data() == nullptr) { return NGF_ERROR_OUT_OF_MEM; } memset(&swapchain->acquire_sems[0], 0, sizeof(VkSemaphore) * swapchain->nimgs); memset(&swapchain->submit_sems[0], 0, sizeof(VkSemaphore) * swapchain->nimgs); for (uint32_t s = 0u; s < swapchain->nimgs; ++s) { const VkSemaphoreCreateInfo sem_info = { .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, .pNext = NULL, .flags = 0}; vk_err = vkCreateSemaphore(_vk.device, &sem_info, NULL, &swapchain->acquire_sems[s]); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } vk_err = vkCreateSemaphore(_vk.device, &sem_info, NULL, &swapchain->submit_sems[s]); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } } swapchain->image_idx = 0U; swapchain->width = swapchain_info.width; swapchain->height = swapchain_info.height; return ngfi::move(swapchain); } static void ngfvk_cleanup_pending_binds(ngf_cmd_buffer cmd_buf) { cmd_buf->pending_bind_ops.clear(); cmd_buf->npending_bind_ops = 0u; } static ngf_error ngfvk_encoder_start(ngf_cmd_buffer cmd_buf) { NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_RECORDING); return NGF_ERROR_OK; } static ngf_error ngfvk_initialize_generic_encoder(ngf_cmd_buffer cmd_buf, struct ngfi_private_encoder_data* enc) { enc->d0 = (uintptr_t)cmd_buf; return NGF_ERROR_OK; } static ngf_error ngfvk_encoder_end(ngf_cmd_buffer cmd_buf, struct ngfi_private_encoder_data* generic_enc) { (void)generic_enc; NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT); return NGF_ERROR_OK; } ngfvk_command_superpool::ngfvk_command_superpool( uint32_t queue_family_idx, uint32_t capacity, uint16_t ctx_id) : cmd_pools {capacity}, ctx_id {ctx_id} { memset(cmd_pools.data(), 0, sizeof(cmd_pools[0]) * capacity); for (VkCommandPool& pool : cmd_pools) { const VkCommandPoolCreateInfo pool_ci = { .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, .pNext = nullptr, .flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, .queueFamilyIndex = queue_family_idx}; if (vkCreateCommandPool(_vk.device, &pool_ci, NULL, &pool) != VK_SUCCESS) { break; } } } ngfvk_command_superpool::~ngfvk_command_superpool() { for (VkCommandPool pool : cmd_pools) { if (pool) vkDestroyCommandPool(_vk.device, pool, nullptr); } } static ngfvk_command_superpool* ngfvk_find_command_superpool(uint16_t ctx_id, uint8_t nframes) { ngfvk_command_superpool* result = NULL; for (size_t i = 0; i < CURRENT_CONTEXT->command_superpools.size(); ++i) { if (CURRENT_CONTEXT->command_superpools[i].ctx_id == ctx_id) { result = &CURRENT_CONTEXT->command_superpools[i]; break; } } if (result == nullptr) { result = CURRENT_CONTEXT->command_superpools.emplace_back( ngfvk_command_superpool {_vk.gfx_family_idx, nframes, ctx_id}); } return result; } static ngf_error ngfvk_cmd_buffer_allocate_for_frame( ngf_frame_token frame_token, VkCommandPool* pool, VkCommandBuffer* cmd_buf) { const ngfvk_command_superpool* superpool = ngfvk_find_command_superpool( ngfi_frame_ctx_id(frame_token), ngfi_frame_max_inflight_frames(frame_token)); if (superpool == nullptr || superpool->cmd_pools.empty()) { NGFI_DIAG_ERROR("failed to allocate command buffer"); return NGF_ERROR_OBJECT_CREATION_FAILED; } *pool = superpool->cmd_pools[ngfi_frame_id(frame_token)]; const VkCommandBufferAllocateInfo vk_cmdbuf_info = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, .pNext = NULL, .commandPool = *pool, .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, .commandBufferCount = 1u}; const VkResult vk_err = vkAllocateCommandBuffers(_vk.device, &vk_cmdbuf_info, cmd_buf); if (vk_err != VK_SUCCESS) { NGFI_DIAG_ERROR("Failed to allocate cmd buffer, VK error: %d", vk_err); return NGF_ERROR_OBJECT_CREATION_FAILED; } const VkCommandBufferBeginInfo cmd_buf_begin = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, .pNext = NULL, .flags = 0, .pInheritanceInfo = NULL}; vkBeginCommandBuffer(*cmd_buf, &cmd_buf_begin); return NGF_ERROR_OK; } ngfi::maybe_ngfptr ngf_cmd_buffer_t::make() NGF_NOEXCEPT { auto cmd_buf = ngfi::unique_ptr::make(); if (!cmd_buf) { return NGF_ERROR_OUT_OF_MEM; } cmd_buf->parent_frame = ~0u; cmd_buf->state = ngfi::CMD_BUFFER_STATE_NEW; cmd_buf->active_gfx_pipe = NULL; cmd_buf->active_compute_pipe = NULL; cmd_buf->active_attr_buf = NULL; cmd_buf->active_idx_buf = NULL; cmd_buf->renderpass_active = false; cmd_buf->compute_pass_active = false; cmd_buf->destroy_on_submit = false; cmd_buf->active_rt = NULL; cmd_buf->desc_pools_list = NULL; cmd_buf->vk_cmd_buffer = VK_NULL_HANDLE; cmd_buf->vk_cmd_pool = VK_NULL_HANDLE; cmd_buf->pending_barriers.npending_img_bars = 0; cmd_buf->pending_barriers.npending_buf_bars = 0; cmd_buf->local_res_states = ngfvk_sync_res_hashtable {100u}; return ngfi::move(cmd_buf); } ngf_cmd_buffer_t::~ngf_cmd_buffer_t() noexcept { if (vk_cmd_buffer != VK_NULL_HANDLE) { vkFreeCommandBuffers(_vk.device, vk_cmd_pool, 1u, &vk_cmd_buffer); } ngfvk_cleanup_pending_binds(this); in_pass_cmd_chnks.clear(); virt_bind_ops_ranges.clear(); } static void ngfvk_execute_pending_binds(ngf_cmd_buffer cmd_buf) { // Binding resources requires an active pipeline. ngfvk_generic_pipeline* pipeline_data = NULL; if (!(cmd_buf->renderpass_active ^ cmd_buf->compute_pass_active)) { NGFI_DIAG_ERROR("either a render or compute pass needs to be active to bind resources"); return; } if (cmd_buf->renderpass_active) pipeline_data = (ngfvk_generic_pipeline*)(cmd_buf->active_gfx_pipe); else if (cmd_buf->compute_pass_active) pipeline_data = (ngfvk_generic_pipeline*)(cmd_buf->active_compute_pipe); assert(pipeline_data); // Get the number of active descriptor set layouts in the pipeline. const uint32_t ndesc_set_layouts = static_cast(pipeline_data->descriptor_set_layouts.size()); // Reset temp. storage to make sure we have all of it available. ngfi::tmp_arena().reset(); // Allocate an array of descriptor set handles from temporary storage and // set them all to null. As we process bind operations, we'll allocate // descriptor sets and put them into the array as necessary. auto vk_desc_sets = ngfi::tmp_alloc(ndesc_set_layouts); memset(vk_desc_sets, (uintptr_t)VK_NULL_HANDLE, ndesc_set_layouts * sizeof(vk_desc_sets[0])); // Allocate an array of vulkan descriptor set writes from temp storage, one write per // pending bind op. auto vk_writes = ngfi::tmp_alloc(cmd_buf->npending_bind_ops); // Find a descriptor pools list to allocate from. ngfvk_desc_pools_list* pools = ngfvk_find_desc_pools_list(cmd_buf->parent_frame); cmd_buf->desc_pools_list = pools; // Process each bind operation, constructing a corresponding // vulkan descriptor set write operation. uint32_t descriptor_write_idx = 0u; for (const ngf_resource_bind_op& bind_op_ref : cmd_buf->pending_bind_ops) { const ngf_resource_bind_op* bind_op = &bind_op_ref; // Ensure that a valid descriptor set is referenced by this // bind operation. if (bind_op->target_set >= ndesc_set_layouts) { NGFI_DIAG_WARNING( "invalid descriptor set %d referenced by bind operation (pipeline has " "%d sets) - ignoring", bind_op->target_set, ndesc_set_layouts); continue; } // Find the corresponding descriptor set layout. const ngfvk_desc_set_layout* set_layout = &pipeline_data->descriptor_set_layouts[bind_op->target_set]; // Ensure that a valid binding is referenced by this bind operation. if (bind_op->target_binding >= set_layout->binding_properties.size()) { NGFI_DIAG_WARNING( "invalid binding %d referenced by bind operation (descriptor set has %d bindings) - " "ignoring", bind_op->target_binding, set_layout->binding_properties.size()); continue; } if (set_layout->binding_properties[bind_op->target_binding].type != get_vk_descriptor_type(bind_op->type)) { NGFI_DIAG_WARNING( "attempting to bind descriptor with unmatching type (set %d binding %d) - ignoring", bind_op->target_set, bind_op->target_binding); continue; } // Allocate a new descriptor set if necessary. const bool need_new_desc_set = vk_desc_sets[bind_op->target_set] == VK_NULL_HANDLE; if (need_new_desc_set) { VkDescriptorSet set = ngfvk_desc_pools_list_allocate_set(pools, set_layout); if (set == VK_NULL_HANDLE) { NGFI_DIAG_ERROR("Failed to bind graphics resources - could not allocate descriptor set"); return; } vk_desc_sets[bind_op->target_set] = set; } // At this point, we have a valid descriptor set in the `vk_sets` array. // We'll use it in the write operation corresponding to the current bind_op. VkDescriptorSet set = vk_desc_sets[bind_op->target_set]; // Construct a vulkan descriptor set write corresponding to this bind // operation. VkWriteDescriptorSet* vk_write = &vk_writes[descriptor_write_idx]; vk_write->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; vk_write->pNext = NULL; vk_write->dstSet = set; vk_write->dstBinding = bind_op->target_binding; vk_write->descriptorCount = 1u; vk_write->dstArrayElement = bind_op->array_index; vk_write->descriptorType = get_vk_descriptor_type(bind_op->type); switch (bind_op->type) { case NGF_DESCRIPTOR_STORAGE_BUFFER: case NGF_DESCRIPTOR_UNIFORM_BUFFER: { const ngf_buffer_bind_info* bind_info = &bind_op->info.buffer; auto vk_bind_info = ngfi::tmp_alloc(); vk_bind_info->buffer = (VkBuffer)bind_info->buffer->alloc.obj_handle; vk_bind_info->offset = bind_info->offset; vk_bind_info->range = bind_info->range; vk_write->pBufferInfo = vk_bind_info; break; } case NGF_DESCRIPTOR_TEXEL_BUFFER: { vk_write->pTexelBufferView = &(bind_op->info.texel_buffer_view->vk_buf_view); break; } case NGF_DESCRIPTOR_STORAGE_IMAGE: if (cmd_buf->renderpass_active) { NGFI_DIAG_WARNING("Binding storage images to non-compute shader is currently unsupported."); continue; } /* break omitted intentionally */ case NGF_DESCRIPTOR_IMAGE: case NGF_DESCRIPTOR_SAMPLER: case NGF_DESCRIPTOR_IMAGE_AND_SAMPLER: { const ngf_image_sampler_bind_info* bind_info = &bind_op->info.image_sampler; const bool is_multilayered_image = set_layout->binding_properties[bind_op->target_binding].is_multilayered_image; VkImageView image_view = VK_NULL_HANDLE; if (bind_op->type == NGF_DESCRIPTOR_IMAGE || bind_op->type == NGF_DESCRIPTOR_STORAGE_IMAGE || bind_op->type == NGF_DESCRIPTOR_IMAGE_AND_SAMPLER) { image_view = bind_info->is_image_view ? bind_info->resource.view->vk_view : (is_multilayered_image ? bind_info->resource.image->vkview_arrayed : bind_info->resource.image->vkview); } auto vk_bind_info = ngfi::tmp_alloc(); vk_bind_info->imageView = VK_NULL_HANDLE; vk_bind_info->imageLayout = VK_IMAGE_LAYOUT_UNDEFINED; vk_bind_info->sampler = VK_NULL_HANDLE; if (bind_op->type == NGF_DESCRIPTOR_IMAGE || bind_op->type == NGF_DESCRIPTOR_IMAGE_AND_SAMPLER) { vk_bind_info->imageView = image_view; vk_bind_info->imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; } else if (bind_op->type == NGF_DESCRIPTOR_STORAGE_IMAGE) { vk_bind_info->imageView = image_view; vk_bind_info->imageLayout = VK_IMAGE_LAYOUT_GENERAL; } else if ( bind_op->type == NGF_DESCRIPTOR_SAMPLER || bind_op->type == NGF_DESCRIPTOR_IMAGE_AND_SAMPLER) { vk_bind_info->sampler = bind_info->sampler->vksampler; } vk_write->pImageInfo = vk_bind_info; break; } case NGF_DESCRIPTOR_ACCELERATION_STRUCTURE: { auto accel_struct_info = ngfi::tmp_alloc(); accel_struct_info->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR; accel_struct_info->pNext = NULL; accel_struct_info->accelerationStructureCount = 1u; accel_struct_info->pAccelerationStructures = (const VkAccelerationStructureKHR*)&bind_op->info.acceleration_structure; vk_write->pNext = accel_struct_info; break; } default: assert(false); } ++descriptor_write_idx; } // perform all the vulkan descriptor set write operations to populate the // newly allocated descriptor sets. vkUpdateDescriptorSets(_vk.device, descriptor_write_idx, vk_writes, 0, NULL); // bind each of the descriptor sets individually (this ensures that desc. // sets bound for a compatible pipeline earlier in this command buffer // don't get clobbered). for (uint32_t s = 0; s < ndesc_set_layouts; ++s) { if (vk_desc_sets[s] != VK_NULL_HANDLE) { vkCmdBindDescriptorSets( cmd_buf->vk_cmd_buffer, cmd_buf->renderpass_active ? VK_PIPELINE_BIND_POINT_GRAPHICS : VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_data->vk_pipeline_layout, s, 1, &vk_desc_sets[s], 0, NULL); } } ngfvk_cleanup_pending_binds(cmd_buf); } // Returns a bitstring uniquely identifying the series of load/store op combos // for each attachment. static uint64_t ngfvk_renderpass_ops_key( const ngf_render_target rt, const ngf_attachment_load_op* load_ops, const ngf_attachment_store_op* store_ops) { const uint32_t num_rt_attachments = rt->nattachments; const uint32_t nattachments = rt->is_default ? (NGFI_MIN(2, num_rt_attachments)) : num_rt_attachments; assert(nattachments < (8u * sizeof(uint64_t) / 4u)); uint64_t result = 0u; for (uint32_t i = 0u; i < nattachments; ++i) { const uint64_t load_op_bits = (uint64_t)load_ops[i]; const uint64_t store_op_bits = (uint64_t)store_ops[i]; assert(load_op_bits <= 3); assert(store_op_bits <= 2); const uint64_t attachment_ops_combo = (load_op_bits << 2u) | store_op_bits; result |= attachment_ops_combo << (i * 4u); } // For default RT, the load/store ops of the resolve attachments are not // specified by the client code explicitly. We always treat them as // DONT_CARE / STORE. if (rt->is_default && nattachments < num_rt_attachments && rt->attachment_compat_pass_descs[nattachments].is_resolve) { result = result | ((uint64_t)0x1u << (4u * nattachments)); } return result; } // Macros for accessing load/store ops encoded in a renderpass ops key. #define NGFVK_ATTACHMENT_OPS_COMBO(idx, ops_key) ((ops_key >> (4u * idx)) & 15u) #define NGFVK_ATTACHMENT_LOAD_OP_FROM_KEY(idx, ops_key) \ (get_vk_load_op((ngf_attachment_load_op)(NGFVK_ATTACHMENT_OPS_COMBO(idx, ops_key) >> 2u))) #define NGFVK_ATTACHMENT_STORE_OP_FROM_KEY(idx, ops_key) \ (get_vk_store_op((ngf_attachment_store_op)(NGFVK_ATTACHMENT_OPS_COMBO(idx, ops_key) & 3u))) // Looks up a renderpass object from the current context's renderpass cache, and creates // one if it doesn't exist. static VkRenderPass ngfvk_lookup_renderpass(ngf_render_target rt, uint64_t ops_key) { VkRenderPass result = VK_NULL_HANDLE; for (size_t r = 0; r < CURRENT_CONTEXT->renderpass_cache.size(); ++r) { const ngfvk_renderpass_cache_entry* cache_entry = &CURRENT_CONTEXT->renderpass_cache[r]; if (cache_entry->rt == rt && cache_entry->ops_key == ops_key) { result = cache_entry->renderpass; break; } } if (result == VK_NULL_HANDLE) { const uint32_t nattachments = rt->nattachments; auto attachment_compat_pass_descs = ngfi::tmp_alloc(nattachments); const size_t rt_attachment_pass_descs_size = rt->nattachments * sizeof(ngfvk_attachment_pass_desc); memcpy( attachment_compat_pass_descs, rt->attachment_compat_pass_descs.data(), rt_attachment_pass_descs_size); for (uint32_t i = 0; i < rt->nattachments; ++i) { attachment_compat_pass_descs[i].load_op = NGFVK_ATTACHMENT_LOAD_OP_FROM_KEY(i, ops_key); attachment_compat_pass_descs[i].store_op = NGFVK_ATTACHMENT_STORE_OP_FROM_KEY(i, ops_key); } ngfvk_renderpass_from_attachment_descs( nattachments, rt->attachment_descs.data(), attachment_compat_pass_descs, &result); const ngfvk_renderpass_cache_entry cache_entry = { .rt = rt, .ops_key = ops_key, .renderpass = result}; CURRENT_CONTEXT->renderpass_cache.push_back(cache_entry); } return result; } static bool ngfvk_init_loader_if_necessary() { return !vkGetInstanceProcAddr ? vkl_init_loader() : true; } static VkResult ngfvk_create_instance( bool request_validation, bool request_debug_groups, VkInstance* instance_ptr, bool* validation_enabled) { // Scan through the list of instance-level extensions, determine which are supported. bool swapchain_colorspace_supported = false; uint32_t ninst_exts = 0u; vkEnumerateInstanceExtensionProperties(NULL, &ninst_exts, NULL); auto ext_props = (VkExtensionProperties*)malloc(sizeof(VkExtensionProperties) * ninst_exts); if (ext_props == NULL) { return VK_ERROR_OUT_OF_HOST_MEMORY; } vkEnumerateInstanceExtensionProperties(NULL, &ninst_exts, ext_props); for (size_t i = 0; i < ninst_exts && !swapchain_colorspace_supported; ++i) { swapchain_colorspace_supported = (strcmp(VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME, ext_props[i].extensionName) == 0u); } free(ext_props); // Query the supported instance version. uint32_t instance_version = VK_API_VERSION_1_0; if (vkEnumerateInstanceVersion) { vkEnumerateInstanceVersion(&instance_version); } // nicegraf requires Vulkan 1.1+ if (instance_version < VK_API_VERSION_1_1) { return VK_ERROR_INCOMPATIBLE_DRIVER; } // Use the highest supported version up to 1.2. const uint32_t api_version = NGFI_MIN(instance_version, VK_API_VERSION_1_2); // Names of instance-level extensions. const char* ext_names[] = { "VK_KHR_surface", VK_SURFACE_EXT, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, NULL, NULL}; const uint32_t max_optional_exts = 2u; uint32_t optional_ext_count = 0u; const uint32_t nmandatory_exts = NGFI_ARRAYSIZE(ext_names) - max_optional_exts; if (swapchain_colorspace_supported) { ext_names[nmandatory_exts + optional_ext_count++] = VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME; } if (request_validation || request_debug_groups) { ext_names[nmandatory_exts + optional_ext_count++] = VK_EXT_DEBUG_UTILS_EXTENSION_NAME; } assert(max_optional_exts >= optional_ext_count); const VkApplicationInfo app_info = {// Application information. .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, .pNext = NULL, .pApplicationName = NULL, // TODO: allow specifying app name. .pEngineName = "nicegraf", .engineVersion = VK_MAKE_VERSION(NGF_VER_MAJ, NGF_VER_MIN, 0), .apiVersion = api_version}; // Names of instance layers to enable. const char* validation_layer_name = "VK_LAYER_KHRONOS_validation"; const char* enabled_layers[] = {validation_layer_name}; // Check if validation layers are supported. uint32_t nlayers = 0u; vkEnumerateInstanceLayerProperties(&nlayers, NULL); auto layer_props = ngfi::tmp_alloc(nlayers); vkEnumerateInstanceLayerProperties(&nlayers, layer_props); bool validation_supported = false; for (size_t l = 0u; !validation_supported && l < nlayers; ++l) { validation_supported = (strcmp(validation_layer_name, layer_props[l].layerName) == 0u); } // Enable validation only if detailed verbosity is requested. const bool enable_validation = validation_supported && request_validation; if (validation_enabled) { *validation_enabled = enable_validation; } // Create a Vulkan instance. const uint32_t nunused_exts = (max_optional_exts - optional_ext_count); const VkInstanceCreateInfo inst_info = { .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, .pNext = NULL, .flags = 0u, .pApplicationInfo = &app_info, .enabledLayerCount = enable_validation ? 1u : 0u, .ppEnabledLayerNames = enabled_layers, .enabledExtensionCount = (uint32_t)NGFI_ARRAYSIZE(ext_names) - nunused_exts, .ppEnabledExtensionNames = ext_names}; VkResult vk_err = vkCreateInstance(&inst_info, NULL, instance_ptr); if (vk_err != VK_SUCCESS) { NGFI_DIAG_ERROR("Failed to create a Vulkan instance, VK error %d.", vk_err); return vk_err; } return VK_SUCCESS; } static void ngfvk_cmd_bind_resources( ngf_cmd_buffer buf, const ngf_resource_bind_op* bind_operations, uint32_t nbind_operations) { for (uint32_t i = 0; i < nbind_operations; ++i) { buf->pending_bind_ops.append( bind_operations[i], CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena); ++buf->npending_bind_ops; } } static void ngfvk_cmd_buf_reset_render_cmds(ngf_cmd_buffer cmd_buf) { cmd_buf->in_pass_cmd_chnks.clear(); } static void ngfvk_cmd_buf_add_render_cmd( ngf_cmd_buffer cmd_buf, const ngfvk_render_cmd* cmd, bool in_renderpass) { if (in_renderpass) { cmd_buf->in_pass_cmd_chnks.append( *cmd, CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena); } else { assert(false); } } static void ngfvk_cmd_buf_reset_res_states(ngf_cmd_buffer cmd_buf) { cmd_buf->local_res_states.clear(); } static inline ngfvk_sync_res ngfvk_sync_res_from_buf(ngf_buffer buf) { ngfvk_sync_res sync_res = { .data = {.buf = buf}, .type = NGFVK_SYNC_RES_BUFFER, .hash = buf->hash}; return sync_res; } static inline ngfvk_sync_res ngfvk_sync_res_from_img(ngf_image img) { ngfvk_sync_res sync_res = {.data = {.img = img}, .type = NGFVK_SYNC_RES_IMAGE, .hash = img->hash}; return sync_res; } static uintptr_t ngfvk_handle_from_sync_res(const ngfvk_sync_res* res) { return res->type == NGFVK_SYNC_RES_BUFFER ? (uintptr_t)res->data.img : (uintptr_t)res->data.buf; } // Look up resource state in a given cmd buffer. // If an entry corresponding to the resource doesn't already exist, it gets created. static bool ngfvk_cmd_buf_lookup_sync_res( ngf_cmd_buffer cmd_buf, const ngfvk_sync_res* sync_res, ngfvk_sync_res_data** sync_res_data_out) { ngfvk_sync_res_data new_res_state {}; bool new_res = false; const ngfvk_sync_res_hashtable::keyhash keyhash = { ngfvk_handle_from_sync_res(sync_res), sync_res->hash}; *sync_res_data_out = cmd_buf->local_res_states.get_or_insert_prehashed(keyhash, new_res_state, new_res); if (new_res) { ngfvk_sync_res_data* sync_res_data = *sync_res_data_out; memset(sync_res_data, 0, sizeof(new_res_state)); sync_res_data->expected_sync_req.layout = VK_IMAGE_LAYOUT_UNDEFINED; sync_res_data->res_handle = ngfvk_handle_from_sync_res(sync_res); sync_res_data->res_type = sync_res->type; sync_res_data->pending_sync_req_idx = ~0u; } return new_res; } static inline uint32_t ngfvk_next_nonzero_bit(uint32_t* mask) { const uint32_t old_mask = *mask; return (*mask = old_mask & (old_mask - 1), *mask ^ old_mask); } static inline uint32_t ngfvk_stage_idx(VkPipelineStageFlagBits bit) { switch (bit) { case VK_PIPELINE_STAGE_VERTEX_INPUT_BIT: return 0; case VK_PIPELINE_STAGE_VERTEX_SHADER_BIT: return 1; case VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT: return 2; case VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT: return 3; case VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT: return 4; case VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT: return 5; case VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT: return 6; case VK_PIPELINE_STAGE_TRANSFER_BIT: return 7; default: assert(false); } return ~0u; } static inline uint32_t ngfvk_access_idx(VkAccessFlagBits bit) { switch (bit) { case VK_ACCESS_SHADER_READ_BIT: return 0u; case VK_ACCESS_SHADER_WRITE_BIT: return 1u; case VK_ACCESS_UNIFORM_READ_BIT: return 2u; case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: return 0u; case VK_ACCESS_INDEX_READ_BIT: return 1u; case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: return 0u; case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: return 1u; case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT: return 0u; case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT: return 1u; case VK_ACCESS_TRANSFER_READ_BIT: return 0u; case VK_ACCESS_TRANSFER_WRITE_BIT: return 1u; default: assert(false); } return ~0u; } static uint32_t ngfvk_per_stage_access_mask(const ngfvk_sync_barrier_masks* barrier_masks) { static const VkAccessFlags valid_access_flags[] = { VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDEX_READ_BIT, // VERTEX_INPUT VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT, // VERTEX_SHADER VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT, // FRAGMENT_SHADER VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, // COMPUTE_SHADER VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, // EARLY_FRAGMENT_TESTS VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, // LATE_FRAGMENT_TESTS VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // COLOR_ATTACHMENT_OUTPUT VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT // TRANSFER }; static const uint32_t bits_per_stage = 3u; uint32_t stage_mask = (uint32_t)barrier_masks->stage_mask; uint32_t result = 0u; while (stage_mask) { const VkPipelineStageFlagBits stage_bit = (VkPipelineStageFlagBits)ngfvk_next_nonzero_bit(&stage_mask); const uint32_t stg_idx = ngfvk_stage_idx(stage_bit); uint32_t access_mask = (uint32_t)barrier_masks->access_mask; while (access_mask) { const VkAccessFlagBits access_bit = (VkAccessFlagBits)ngfvk_next_nonzero_bit(&access_mask); if (valid_access_flags[stg_idx] & access_bit) { const uint32_t acc_idx = ngfvk_access_idx(access_bit); result |= (1 << (bits_per_stage * stg_idx + acc_idx)); } } } return result; } // Checks whether a barrier is needed before performing an operation on a resource, given its // sync state. // If a barrier is not needed, returns false. Otherwise, populates the barrier data appropriately // and returns true. static bool ngfvk_sync_barrier( ngfvk_sync_state* sync_state, const ngfvk_sync_req* sync_req, ngfvk_barrier_data* barrier) { const VkPipelineStageFlags dst_stage_mask = sync_req->barrier_masks.stage_mask; const VkAccessFlags dst_access_mask = sync_req->barrier_masks.access_mask; const VkImageLayout dst_layout = sync_req->layout; // Mask of all accesses we care about, that perform writes. static const VkAccessFlags all_write_accesses_mask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; // Reset all barrier data. memset(barrier, 0, sizeof(*barrier)); // Decide if the requested operation necessitates a write to the resource. // Layout transitions are read-modify-write operations, thus if a layout transition is required // for the operation, we _always_ need a write, even if the actual requested access type // specified in `dst_access_mask` is read-only. const bool need_layout_transition = dst_layout != sync_state->layout; const bool dst_stages_want_write = (all_write_accesses_mask & dst_access_mask); const bool need_write = dst_stages_want_write || need_layout_transition; if (!need_write) { // Case for read-only operations. // Those can run concurrently with other read-only operations, and only need to wait for // any outstanding writes to complete. const uint32_t per_stg_acc_mask = ngfvk_per_stage_access_mask(&sync_req->barrier_masks); const bool accesses_seen_write = ((sync_state->per_stage_readers_mask & per_stg_acc_mask) == per_stg_acc_mask); if (sync_state->last_writer_masks.stage_mask != VK_PIPELINE_STAGE_NONE && !accesses_seen_write) { // If there was a preceding write, and the stage requesting the read-only operation // hasn't consumed it yet, a barrier is necessary. barrier->src_stage_mask |= sync_state->last_writer_masks.stage_mask; barrier->src_access_mask |= sync_state->last_writer_masks.access_mask & all_write_accesses_mask; } // Add the requested operation to the mask of ongoing reads. sync_state->active_readers_masks.stage_mask |= dst_stage_mask; sync_state->active_readers_masks.access_mask |= dst_access_mask; sync_state->per_stage_readers_mask |= per_stg_acc_mask; } else { // Case for modifying operations. // No more than a single modifying operation may be in progress at a given time. // Modifying operations have to wait for all outstanding reads and writes to complete. // Add any outstanding readers to the barrier's source mask. barrier->src_stage_mask |= sync_state->active_readers_masks.stage_mask; barrier->src_access_mask |= sync_state->active_readers_masks.access_mask; // No active readers remain after a modifying op, so zero out their corresponding masks. sync_state->active_readers_masks.stage_mask = 0u; sync_state->active_readers_masks.access_mask = 0u; sync_state->per_stage_readers_mask = 0u; // If there is an outstanding write, emit a barrier for it. // Note that we skip this if there were any outsdtanding reads, those already depend on the // write to finish, so it's sufficient to just depend on them. if (barrier->src_stage_mask == 0 && sync_state->last_writer_masks.stage_mask != VK_PIPELINE_STAGE_NONE) { barrier->src_stage_mask |= sync_state->last_writer_masks.stage_mask; barrier->src_access_mask |= sync_state->last_writer_masks.access_mask; } // Update last writer stage and access mask. sync_state->last_writer_masks.stage_mask = dst_stage_mask; sync_state->last_writer_masks.access_mask = dst_access_mask; // If the requested access was actually readonly, mark it as synced with the last write // since in that context the last write is made by the layout transition, the results of which // are made available and visible to the destination stage automatically. if ((dst_access_mask & all_write_accesses_mask) == 0u) { sync_state->active_readers_masks.stage_mask |= dst_stage_mask; sync_state->active_readers_masks.access_mask |= dst_access_mask; sync_state->per_stage_readers_mask |= ngfvk_per_stage_access_mask(&sync_req->barrier_masks); } } // We need a barrier if we found any source stages to wait on, or if a layout transition was // necessary. const bool need_barrier = barrier->src_stage_mask != 0u || need_layout_transition; if (need_barrier) { barrier->dst_access_mask = dst_access_mask; barrier->dst_stage_mask = dst_stage_mask; barrier->src_stage_mask = barrier->src_stage_mask ? barrier->src_stage_mask : VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; barrier->src_layout = sync_state->layout; barrier->dst_layout = dst_layout; } // Update the layout in synchronization state. sync_state->layout = dst_layout; return need_barrier; } static void ngfvk_sync_req_batch_init(uint32_t nmax_sync_reqs, ngfvk_sync_req_batch* result) { memset(result, 0, sizeof(*result)); result->pending_sync_reqs = ngfi::tmp_alloc(nmax_sync_reqs); result->sync_res_data_keys = ngfi::tmp_alloc(nmax_sync_reqs); result->freshness = ngfi::tmp_alloc(nmax_sync_reqs); memset(result->freshness, 0, sizeof(bool) * nmax_sync_reqs); } // Merges a given sync request with the resource's already pending sync request. Returns `false` and // does nothing if the operation requested by the given sync request is incompatible with the // pending sync request. static bool ngfvk_sync_req_merge(ngfvk_sync_req* dst_sync_req, const ngfvk_sync_req* sync_req) { static const VkAccessFlags NGFVK_RENDER_ACCESSES_MASK = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; static const VkAccessFlags NGFVK_WRITE_ACCESSES_MASK = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT; const bool render_requested = ((sync_req->barrier_masks.access_mask & NGFVK_RENDER_ACCESSES_MASK) != 0); const bool write_requested = ((sync_req->barrier_masks.access_mask & NGFVK_WRITE_ACCESSES_MASK) != 0); const bool render_pending = ((dst_sync_req->barrier_masks.access_mask & NGFVK_RENDER_ACCESSES_MASK) != 0); const bool write_pending = ((dst_sync_req->barrier_masks.access_mask & NGFVK_WRITE_ACCESSES_MASK) != 0); const bool read_requested = !write_requested && (sync_req->barrier_masks.access_mask != 0); const bool read_pending = !write_pending && (dst_sync_req->barrier_masks.access_mask != 0); const bool layout_incompatible = dst_sync_req->layout != VK_IMAGE_LAYOUT_UNDEFINED && dst_sync_req->layout != VK_IMAGE_LAYOUT_GENERAL && sync_req->layout != VK_IMAGE_LAYOUT_GENERAL && dst_sync_req->layout != sync_req->layout; // Using a resource as a render target is not compatible with any other type of access. // Using a resource in a manner that requires it to be simultaneously in two incompatible layouts // results in transitioning to the GENERAL layout which is compatible with all kinds of accesses. // Merging modifying and non-modifying sync requests is allowed because the same resource might // be accessed with different descriptors in a GPU program (e.g. an image can be accessed both // as a sampled texture and as a storage image). if ((render_requested && (write_pending || read_pending || render_pending)) || (render_pending && (write_requested || read_requested))) { NGFI_DIAG_ERROR("Attempt to use a resource with incompatible accesses within a single " "draw/dispatch. Ignoring."); return false; } dst_sync_req->barrier_masks.access_mask |= sync_req->barrier_masks.access_mask; dst_sync_req->barrier_masks.stage_mask |= sync_req->barrier_masks.stage_mask; const bool preserve_general_layout = (dst_sync_req->layout == VK_IMAGE_LAYOUT_GENERAL || sync_req->layout == VK_IMAGE_LAYOUT_GENERAL); dst_sync_req->layout = (preserve_general_layout || layout_incompatible) ? VK_IMAGE_LAYOUT_GENERAL : sync_req->layout; return true; } static bool ngfvk_sync_req_batch_add( ngfvk_sync_req_batch* batch, ngfvk_sync_res_hashtable::key_type key, uint64_t hash, ngfvk_sync_res_data* sync_res_data, bool fresh, const ngfvk_sync_req* sync_req) { if (sync_res_data->pending_sync_req_idx == ~0u) { sync_res_data->pending_sync_req_idx = batch->npending_sync_reqs++; if (sync_res_data->res_type == NGFVK_SYNC_RES_BUFFER) { batch->nbuffer_sync_reqs++; } else if (sync_res_data->res_type == NGFVK_SYNC_RES_IMAGE) { batch->nimage_sync_reqs++; } memset( &batch->pending_sync_reqs[sync_res_data->pending_sync_req_idx], 0, sizeof(batch->pending_sync_reqs[0])); batch->pending_sync_reqs[sync_res_data->pending_sync_req_idx].layout = VK_IMAGE_LAYOUT_UNDEFINED; batch->sync_res_data_keys[sync_res_data->pending_sync_req_idx].key = key; batch->sync_res_data_keys[sync_res_data->pending_sync_req_idx].hash = hash; } if (fresh && sync_res_data->pending_sync_req_idx < batch->npending_sync_reqs) { batch->freshness[sync_res_data->pending_sync_req_idx] = true; } return ngfvk_sync_req_merge( &batch->pending_sync_reqs[sync_res_data->pending_sync_req_idx], sync_req); } static bool ngfvk_sync_req_batch_add_with_lookup( ngfvk_sync_req_batch* batch, ngf_cmd_buffer cmd_buf, const ngfvk_sync_res* res, const ngfvk_sync_req* sync_req) { switch(res->type) { // Ignore resources marked as read-only. case NGFVK_SYNC_RES_BUFFER: if (res->data.buf->sync_state.skip_hazard_tracking) return false; break; case NGFVK_SYNC_RES_IMAGE: if (res->data.img->sync_state.skip_hazard_tracking) return false; break; default:; } ngfvk_sync_res_data* sync_res_data; const bool fresh = ngfvk_cmd_buf_lookup_sync_res(cmd_buf, res, &sync_res_data); return ngfvk_sync_req_batch_add( batch, ngfvk_handle_from_sync_res(res), res->hash, sync_res_data, fresh, sync_req); } static void ngfvk_sync_commit_pending_barriers_legacy( ngfvk_pending_barrier_list* pending_bars, VkCommandBuffer cmd_buf) { auto img_bars = ngfi::tmp_alloc(pending_bars->npending_img_bars); auto buf_bars = ngfi::tmp_alloc(pending_bars->npending_buf_bars); VkPipelineStageFlags src_stage_mask = 0u; VkPipelineStageFlags dst_stage_mask = 0u; uint32_t nimg_bars = 0u; uint32_t nbuf_bars = 0u; for (const ngfvk_barrier_data& barrier_ref : pending_bars->barriers) { const ngfvk_barrier_data* barrier = &barrier_ref; src_stage_mask |= barrier->src_stage_mask; dst_stage_mask |= barrier->dst_stage_mask; switch (barrier->res.type) { case NGFVK_SYNC_RES_IMAGE: { const ngf_image img = barrier->res.data.img; VkImageMemoryBarrier* image_barrier = &img_bars[nimg_bars++]; image_barrier->sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_barrier->pNext = NULL; image_barrier->srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; image_barrier->dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; image_barrier->srcAccessMask = barrier->src_access_mask; image_barrier->dstAccessMask = barrier->dst_access_mask; image_barrier->oldLayout = barrier->src_layout; image_barrier->newLayout = barrier->dst_layout; image_barrier->image = (VkImage)img->alloc.obj_handle; image_barrier->subresourceRange.baseArrayLayer = 0u; image_barrier->subresourceRange.baseMipLevel = 0u; image_barrier->subresourceRange.layerCount = img->nlayers; image_barrier->subresourceRange.levelCount = img->nlevels; const bool is_depth = ngfvk_format_is_depth(img->vk_fmt); const bool is_stencil = ngfvk_format_is_stencil(img->vk_fmt); image_barrier->subresourceRange.aspectMask = (is_depth ? VK_IMAGE_ASPECT_DEPTH_BIT : 0u) | (is_stencil ? VK_IMAGE_ASPECT_STENCIL_BIT : 0u) | ((!is_depth && !is_stencil) ? VK_IMAGE_ASPECT_COLOR_BIT : 0u); break; } case NGFVK_SYNC_RES_BUFFER: { const ngf_buffer buf = barrier->res.data.buf; VkBufferMemoryBarrier* buffer_barrier = &buf_bars[nbuf_bars++]; buffer_barrier->sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; buffer_barrier->pNext = NULL; buffer_barrier->srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buffer_barrier->dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buffer_barrier->srcAccessMask = barrier->src_access_mask; buffer_barrier->dstAccessMask = barrier->dst_access_mask; buffer_barrier->offset = 0u; buffer_barrier->buffer = (VkBuffer)buf->alloc.obj_handle; buffer_barrier->size = buf->size; break; } default: assert(false); break; } } pending_bars->barriers.clear(); pending_bars->npending_buf_bars = 0u; pending_bars->npending_img_bars = 0u; if (nbuf_bars > 0 || nimg_bars > 0) { vkCmdPipelineBarrier( cmd_buf, src_stage_mask, dst_stage_mask, 0u, 0u, NULL, nbuf_bars, buf_bars, nimg_bars, img_bars); } } static void ngfvk_sync_commit_pending_barriers_sync2( ngfvk_pending_barrier_list* pending_bars, VkCommandBuffer cmd_buf) { auto img_bars = ngfi::tmp_alloc(pending_bars->npending_img_bars); auto buf_bars = ngfi::tmp_alloc(pending_bars->npending_buf_bars); uint32_t nimg_bars = 0u; uint32_t nbuf_bars = 0u; for (const ngfvk_barrier_data& barrier_ref : pending_bars->barriers) { const ngfvk_barrier_data* barrier = &barrier_ref; switch (barrier->res.type) { case NGFVK_SYNC_RES_IMAGE: { const ngf_image img = barrier->res.data.img; VkImageMemoryBarrier2* image_barrier = &img_bars[nimg_bars++]; image_barrier->sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2; image_barrier->pNext = NULL; image_barrier->srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; image_barrier->dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; image_barrier->srcStageMask = barrier->src_stage_mask; image_barrier->dstStageMask = barrier->dst_stage_mask; image_barrier->srcAccessMask = barrier->src_access_mask; image_barrier->dstAccessMask = barrier->dst_access_mask; image_barrier->oldLayout = barrier->src_layout; image_barrier->newLayout = barrier->dst_layout; image_barrier->image = (VkImage)img->alloc.obj_handle; image_barrier->subresourceRange.baseArrayLayer = 0u; image_barrier->subresourceRange.baseMipLevel = 0u; image_barrier->subresourceRange.layerCount = img->nlayers; image_barrier->subresourceRange.levelCount = img->nlevels; const bool is_depth = ngfvk_format_is_depth(img->vk_fmt); const bool is_stencil = ngfvk_format_is_stencil(img->vk_fmt); image_barrier->subresourceRange.aspectMask = (is_depth ? VK_IMAGE_ASPECT_DEPTH_BIT : 0u) | (is_stencil ? VK_IMAGE_ASPECT_STENCIL_BIT : 0u) | ((!is_depth && !is_stencil) ? VK_IMAGE_ASPECT_COLOR_BIT : 0u); break; } case NGFVK_SYNC_RES_BUFFER: { const ngf_buffer buf = barrier->res.data.buf; VkBufferMemoryBarrier2* buffer_barrier = &buf_bars[nbuf_bars++]; buffer_barrier->sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2; buffer_barrier->pNext = NULL; buffer_barrier->srcStageMask = barrier->src_stage_mask; buffer_barrier->dstStageMask = barrier->dst_stage_mask; buffer_barrier->srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buffer_barrier->dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buffer_barrier->srcAccessMask = barrier->src_access_mask; buffer_barrier->dstAccessMask = barrier->dst_access_mask; buffer_barrier->offset = 0u; buffer_barrier->buffer = (VkBuffer)buf->alloc.obj_handle; buffer_barrier->size = buf->size; break; } default: assert(false); break; } } pending_bars->barriers.clear(); pending_bars->npending_buf_bars = 0u; pending_bars->npending_img_bars = 0u; if (nbuf_bars > 0 || nimg_bars > 0) { const VkDependencyInfo dep_info = { .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO, .pNext = NULL, .dependencyFlags = 0u, .memoryBarrierCount = 0u, .pMemoryBarriers = NULL, .bufferMemoryBarrierCount = nbuf_bars, .pBufferMemoryBarriers = buf_bars, .imageMemoryBarrierCount = nimg_bars, .pImageMemoryBarriers = img_bars}; vkCmdPipelineBarrier2(cmd_buf, &dep_info); } } static void ngfvk_sync_commit_pending_barriers( ngfvk_pending_barrier_list* pending_bars, VkCommandBuffer cmd_buf) { if (vkCmdPipelineBarrier2) { ngfvk_sync_commit_pending_barriers_sync2(pending_bars, cmd_buf); } else { ngfvk_sync_commit_pending_barriers_legacy(pending_bars, cmd_buf); } } static void ngfvk_sync_req_batch_process(ngfvk_sync_req_batch* batch, ngf_cmd_buffer cmd_buf) { for (size_t i = 0u; i < batch->npending_sync_reqs; ++i) { auto sync_res_data = cmd_buf->local_res_states.get_prehashed(batch->sync_res_data_keys[i]); if (!sync_res_data) { NGFI_DIAG_WARNING( "Internal error - resource missing from cmd buffer's synchronization table?"); assert(false); } const ngfvk_sync_req* sync_req = &batch->pending_sync_reqs[i]; const bool fresh = batch->freshness[i]; ngfvk_barrier_data barrier_data; const bool barrier_needed = ngfvk_sync_barrier(&sync_res_data->sync_state, sync_req, &barrier_data); if (barrier_needed && !fresh) { barrier_data.res.type = sync_res_data->res_type; if (barrier_data.res.type == NGFVK_SYNC_RES_IMAGE) { barrier_data.res.data.img = (ngf_image)sync_res_data->res_handle; ++cmd_buf->pending_barriers.npending_img_bars; } else { barrier_data.res.data.buf = (ngf_buffer)sync_res_data->res_handle; ++cmd_buf->pending_barriers.npending_buf_bars; } cmd_buf->pending_barriers.barriers.append( barrier_data, CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena); sync_res_data->had_barrier = true; } sync_res_data->pending_sync_req_idx = ~0u; if (!sync_res_data->had_barrier) { sync_res_data->expected_sync_req.barrier_masks.stage_mask |= sync_req->barrier_masks.stage_mask; sync_res_data->expected_sync_req.barrier_masks.access_mask |= sync_req->barrier_masks.access_mask; // Make note of the initial layout with which the resource is expected to be used. if (sync_res_data->expected_sync_req.layout == VK_IMAGE_LAYOUT_UNDEFINED) { sync_res_data->expected_sync_req.layout = sync_req->layout; } } } } static void ngfvk_sync_req_batch_commit(ngfvk_sync_req_batch* batch, ngf_cmd_buffer cmd_buf) { ngfvk_sync_req_batch_process(batch, cmd_buf); ngfvk_sync_commit_pending_barriers(&cmd_buf->pending_barriers, cmd_buf->vk_cmd_buffer); } static void ngfvk_handle_single_sync_req( ngf_cmd_buffer cmd_buf, const ngfvk_sync_res* res, const ngfvk_sync_req* sync_req) { bool fresh = false; ngfvk_sync_res_hashtable::keyhash sync_res_data_key; ngfvk_sync_req empty_sync_req = {.barrier_masks = {0u, 0u}, .layout = VK_IMAGE_LAYOUT_UNDEFINED}; ngfvk_sync_req_batch batch = { .sync_res_data_keys = &sync_res_data_key, .pending_sync_reqs = &empty_sync_req, .freshness = &fresh, .npending_sync_reqs = 0, .nbuffer_sync_reqs = 0, .nimage_sync_reqs = 0}; ngfvk_sync_req_batch_add_with_lookup(&batch, cmd_buf, res, sync_req); ngfvk_sync_req_batch_commit(&batch, cmd_buf); } static ngfvk_sync_res ngfvk_sync_res_from_bind_op(const ngf_resource_bind_op* bind_op) { switch (bind_op->type) { case NGF_DESCRIPTOR_IMAGE: case NGF_DESCRIPTOR_IMAGE_AND_SAMPLER: case NGF_DESCRIPTOR_STORAGE_IMAGE: return ngfvk_sync_res_from_img( bind_op->info.image_sampler.is_image_view ? bind_op->info.image_sampler.resource.view->src : bind_op->info.image_sampler.resource.image); break; case NGF_DESCRIPTOR_STORAGE_BUFFER: case NGF_DESCRIPTOR_UNIFORM_BUFFER: return ngfvk_sync_res_from_buf(bind_op->info.buffer.buffer); break; case NGF_DESCRIPTOR_TEXEL_BUFFER: return ngfvk_sync_res_from_buf(bind_op->info.texel_buffer_view->buffer); break; default: break; } const ngfvk_sync_res none = {.data = {.buf = NULL}, .type = NGFVK_SYNC_RES_COUNT}; return none; } // Returns a sync request corresponding to the given bind operation. static ngfvk_sync_req ngfvk_sync_req_for_bind_op( const ngf_resource_bind_op* bind_op, const ngfvk_generic_pipeline* pipeline) { ngfvk_sync_req sync_req; memset(&sync_req, 0, sizeof(sync_req)); sync_req.layout = VK_IMAGE_LAYOUT_UNDEFINED; // Bind ops that target non-existent sets/bindings should be disregarded. if (bind_op->target_set >= pipeline->descriptor_set_layouts.size()) return sync_req; const ngfvk_desc_set_layout* layout = &pipeline->descriptor_set_layouts[bind_op->target_set]; if (bind_op->target_binding >= layout->binding_properties.size()) return sync_req; const bool is_read_only = layout->binding_properties[bind_op->target_binding].readonly; sync_req.barrier_masks.stage_mask = pipeline->descriptor_set_layouts[bind_op->target_set] .binding_properties[bind_op->target_binding] .stage_accessors; switch (bind_op->type) { case NGF_DESCRIPTOR_UNIFORM_BUFFER: { sync_req.barrier_masks.access_mask = VK_ACCESS_UNIFORM_READ_BIT; break; } case NGF_DESCRIPTOR_IMAGE: case NGF_DESCRIPTOR_IMAGE_AND_SAMPLER: { sync_req.barrier_masks.access_mask = VK_ACCESS_SHADER_READ_BIT; sync_req.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; break; } case NGF_DESCRIPTOR_STORAGE_BUFFER: { sync_req.barrier_masks.access_mask = VK_ACCESS_SHADER_READ_BIT | (is_read_only ? 0u : VK_ACCESS_SHADER_WRITE_BIT); break; } case NGF_DESCRIPTOR_STORAGE_IMAGE: { sync_req.barrier_masks.access_mask = VK_ACCESS_SHADER_READ_BIT | (is_read_only ? 0u : VK_ACCESS_SHADER_WRITE_BIT); sync_req.layout = VK_IMAGE_LAYOUT_GENERAL; break; } case NGF_DESCRIPTOR_TEXEL_BUFFER: { sync_req.barrier_masks.access_mask = VK_ACCESS_SHADER_READ_BIT; break; } case NGF_DESCRIPTOR_SAMPLER: sync_req.barrier_masks.stage_mask = 0u; break; case NGF_DESCRIPTOR_ACCELERATION_STRUCTURE: sync_req.barrier_masks.stage_mask = 0u; break; default: assert(0); } return sync_req; } // Actually records renderpass commands into a command buffer. static void ngfvk_cmd_buf_record_render_cmds( ngf_cmd_buffer buf, const ngfi::chunked_list& cmd_list) { ngfi::tmp_arena().reset(); for (const ngfvk_render_cmd& cmd_ref : cmd_list) { const ngfvk_render_cmd* cmd = &cmd_ref; switch (cmd->type) { case NGFVK_RENDER_CMD_BIND_PIPELINE: { buf->active_gfx_pipe = cmd->data.pipeline; // If we had a pipeline bound for which there have been resources bound, but no draw call // executed, commit those resources to actual descriptor sets and bind them so that the next // pipeline is able to "see" those resources, provided that it's compatible. if (buf->active_gfx_pipe && buf->npending_bind_ops > 0u) { ngfvk_execute_pending_binds(buf); } vkCmdBindPipeline( buf->vk_cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, ((ngfvk_generic_pipeline*)(cmd->data.pipeline))->vk_pipeline); break; } case NGFVK_RENDER_CMD_SET_VIEWPORT: { const VkViewport viewport = { .x = (float)cmd->data.rect.x, .y = (float)cmd->data.rect.y, .width = NGFI_MAX(1, (float)cmd->data.rect.width), .height = NGFI_MAX(1, (float)cmd->data.rect.height), .minDepth = 0.0f, .maxDepth = 1.0f}; vkCmdSetViewport(buf->vk_cmd_buffer, 0u, 1u, &viewport); break; } case NGFVK_RENDER_CMD_SET_SCISSOR: { const ngf_irect2d* r = &cmd->data.rect; const VkRect2D scissor_rect = {.offset = {r->x, r->y}, .extent = {r->width, r->height}}; vkCmdSetScissor(buf->vk_cmd_buffer, 0u, 1u, &scissor_rect); break; } case NGFVK_RENDER_CMD_SET_STENCIL_REFERENCE: { vkCmdSetStencilReference( buf->vk_cmd_buffer, VK_STENCIL_FACE_FRONT_BIT, cmd->data.stencil_values.front); vkCmdSetStencilReference( buf->vk_cmd_buffer, VK_STENCIL_FACE_BACK_BIT, cmd->data.stencil_values.back); break; } case NGFVK_RENDER_CMD_SET_STENCIL_COMPARE_MASK: { vkCmdSetStencilCompareMask( buf->vk_cmd_buffer, VK_STENCIL_FACE_FRONT_BIT, cmd->data.stencil_values.front); vkCmdSetStencilCompareMask( buf->vk_cmd_buffer, VK_STENCIL_FACE_BACK_BIT, cmd->data.stencil_values.back); break; } case NGFVK_RENDER_CMD_SET_STENCIL_WRITE_MASK: { vkCmdSetStencilWriteMask( buf->vk_cmd_buffer, VK_STENCIL_FACE_FRONT_BIT, cmd->data.stencil_values.front); vkCmdSetStencilWriteMask( buf->vk_cmd_buffer, VK_STENCIL_FACE_BACK_BIT, cmd->data.stencil_values.back); break; } case NGFVK_RENDER_CMD_SET_DEPTH_BIAS: { vkCmdSetDepthBias( buf->vk_cmd_buffer, cmd->data.depth_bias.const_factor, cmd->data.depth_bias.clamp, cmd->data.depth_bias.slope_factor); break; } case NGFVK_RENDER_CMD_BIND_RESOURCE: { ngfvk_cmd_bind_resources(buf, &cmd->data.bind_resource, 1u); break; } case NGFVK_RENDER_CMD_BIND_ATTRIB_BUFFER: { VkDeviceSize vkoffset = cmd->data.bind_attrib_buffer.offset; vkCmdBindVertexBuffers( buf->vk_cmd_buffer, cmd->data.bind_attrib_buffer.binding, 1, (VkBuffer*)&cmd->data.bind_attrib_buffer.buffer->alloc.obj_handle, &vkoffset); break; } case NGFVK_RENDER_CMD_BIND_INDEX_BUFFER: { const VkIndexType idx_type = get_vk_index_type(cmd->data.bind_index_buffer.type); assert(idx_type == VK_INDEX_TYPE_UINT16 || idx_type == VK_INDEX_TYPE_UINT32); vkCmdBindIndexBuffer( buf->vk_cmd_buffer, (VkBuffer)cmd->data.bind_index_buffer.buffer->alloc.obj_handle, cmd->data.bind_index_buffer.offset, idx_type); break; } case NGFVK_RENDER_CMD_DRAW: { // Allocate and write descriptor sets. ngfvk_execute_pending_binds(buf); // With all resources bound, we may perform the draw operation. if (cmd->data.draw.indexed) { vkCmdDrawIndexed( buf->vk_cmd_buffer, cmd->data.draw.nelements, cmd->data.draw.ninstances, cmd->data.draw.first_element, 0u, 0u); } else { vkCmdDraw( buf->vk_cmd_buffer, cmd->data.draw.nelements, cmd->data.draw.ninstances, cmd->data.draw.first_element, 0u); } break; } default: assert(false); } } ngfi::tmp_arena().reset(); } static void ngfvk_debug_label_begin(VkCommandBuffer b, const char* name) { if (vkCmdBeginDebugUtilsLabelEXT) { const VkDebugUtilsLabelEXT label = { .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT, .pNext = NULL, .pLabelName = name, .color = {0.f, 0.f, 0.f, 0.f}}; vkCmdBeginDebugUtilsLabelEXT(b, &label); } } static void ngfvk_debug_label_end(VkCommandBuffer b) { if (vkCmdEndDebugUtilsLabelEXT) { vkCmdEndDebugUtilsLabelEXT(b); } } // Submits all pending command buffers for the current frame. static ngf_error ngfvk_submit_pending_cmd_buffers( ngfvk_frame_resources* frame_res, VkSemaphore wait_semaphore, VkFence signal_fence) { ngf_error err = NGF_ERROR_OK; const uint32_t ncmd_bufs = static_cast(frame_res->submitted_cmd_bufs.size()); auto submitted_cmd_buf_handles = ngfi::frame_alloc(ncmd_bufs * 2u + 2u); uint32_t submitted_cmd_buf_handles_idx = 0u; { // Check if dummy image needs to be transitioned from UNDEFINED to GENERAL layout, // submit and aux command buffer with the appropriate barrier if so. pthread_mutex_lock(&_vk.dummy_res.img_mu); if (!_vk.dummy_res.image_transitioned) { _vk.dummy_res.image_transitioned = true; VkCommandBuffer aux_cmd_buf; VkCommandPool aux_cmd_pool; ngfvk_cmd_buffer_allocate_for_frame( CURRENT_CONTEXT->current_frame_token, &aux_cmd_pool, &aux_cmd_buf); const VkImageMemoryBarrier bar[] = { { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, .pNext = NULL, .srcAccessMask = 0, .dstAccessMask = 0, .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, .newLayout = VK_IMAGE_LAYOUT_GENERAL, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .image = (VkImage)_vk.dummy_res.img->alloc.obj_handle, .subresourceRange = {.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0u, .levelCount = 1u, .baseArrayLayer = 0u, .layerCount = 1u}, }, {.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, .pNext = NULL, .srcAccessMask = 0, .dstAccessMask = 0, .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, .newLayout = VK_IMAGE_LAYOUT_GENERAL, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .image = (VkImage)_vk.dummy_res.cube->alloc.obj_handle, .subresourceRange = { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0u, .levelCount = 1u, .baseArrayLayer = 0u, .layerCount = 6u}}}; vkCmdPipelineBarrier(aux_cmd_buf, 0, 0, 0, 0, NULL, 0, NULL, 2, bar); vkEndCommandBuffer(aux_cmd_buf); submitted_cmd_buf_handles[submitted_cmd_buf_handles_idx++] = aux_cmd_buf; frame_res->retire.append(ngfvk_cmd_buf_with_pool {aux_cmd_buf, aux_cmd_pool}); } pthread_mutex_unlock(&_vk.dummy_res.img_mu); } ngfvk_pending_barrier_list pending_patch_barriers; pending_patch_barriers.npending_img_bars = 0; pending_patch_barriers.npending_buf_bars = 0; for (size_t c = 0; c < frame_res->submitted_cmd_bufs.size(); ++c) { ngf_cmd_buffer cmd_buf = frame_res->submitted_cmd_bufs[c]; ngfi::tmp_arena().reset(); for (auto& entry : cmd_buf->local_res_states) { ngfvk_sync_res_data* cmd_buf_res_state = &entry.value; ngfvk_sync_state* global_sync_state = cmd_buf_res_state->res_type == NGFVK_SYNC_RES_IMAGE ? &(((ngf_image)cmd_buf_res_state->res_handle)->sync_state) : &(((ngf_buffer)cmd_buf_res_state->res_handle)->sync_state); ngfvk_barrier_data patch_barrier_data; if (ngfvk_sync_barrier( global_sync_state, &cmd_buf_res_state->expected_sync_req, &patch_barrier_data)) { patch_barrier_data.res.type = cmd_buf_res_state->res_type; if (patch_barrier_data.res.type == NGFVK_SYNC_RES_IMAGE) { patch_barrier_data.res.data.img = (ngf_image)cmd_buf_res_state->res_handle; pending_patch_barriers.npending_img_bars++; } else { patch_barrier_data.res.data.buf = (ngf_buffer)cmd_buf_res_state->res_handle; pending_patch_barriers.npending_buf_bars++; } pending_patch_barriers.barriers.append( patch_barrier_data, CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena); } if (cmd_buf_res_state->sync_state.last_writer_masks.access_mask != 0) { const bool skip_hazard_tracking = global_sync_state->skip_hazard_tracking; *global_sync_state = cmd_buf_res_state->sync_state; global_sync_state->skip_hazard_tracking = skip_hazard_tracking; } else { global_sync_state->active_readers_masks.access_mask |= cmd_buf_res_state->sync_state.active_readers_masks.access_mask; global_sync_state->per_stage_readers_mask |= cmd_buf_res_state->sync_state.per_stage_readers_mask; } } if (pending_patch_barriers.npending_buf_bars + pending_patch_barriers.npending_img_bars > 0u) { VkCommandBuffer aux_cmd_buf; VkCommandPool aux_cmd_pool; ngfvk_cmd_buffer_allocate_for_frame( CURRENT_CONTEXT->current_frame_token, &aux_cmd_pool, &aux_cmd_buf); ngfvk_debug_label_begin(aux_cmd_buf, "ngf - patch barrier cmd buffer"); ngfvk_sync_commit_pending_barriers(&pending_patch_barriers, aux_cmd_buf); ngfvk_debug_label_end(aux_cmd_buf); vkEndCommandBuffer(aux_cmd_buf); submitted_cmd_buf_handles[submitted_cmd_buf_handles_idx++] = aux_cmd_buf; frame_res->retire.append(ngfvk_cmd_buf_with_pool {aux_cmd_buf, aux_cmd_pool}); } pending_patch_barriers.barriers.clear(); submitted_cmd_buf_handles[submitted_cmd_buf_handles_idx++] = cmd_buf->vk_cmd_buffer; NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_SUBMITTED); cmd_buf->active_gfx_pipe = NULL; cmd_buf->active_compute_pipe = NULL; cmd_buf->active_rt = NULL; ngfvk_cmd_buf_reset_res_states(cmd_buf); frame_res->retire.append( ngfvk_cmd_buf_with_pool {cmd_buf->vk_cmd_buffer, cmd_buf->vk_cmd_pool}); cmd_buf->vk_cmd_buffer = VK_NULL_HANDLE; cmd_buf->vk_cmd_pool = VK_NULL_HANDLE; if (cmd_buf->destroy_on_submit) { ngf_destroy_cmd_buffer(cmd_buf); } } frame_res->submitted_cmd_bufs.clear(); // Transition the swapchain image to VK_IMAGE_LAYOUT_PRESENT_SRC if necessary. const bool needs_present = CURRENT_CONTEXT->swapchain && wait_semaphore != VK_NULL_HANDLE; if (needs_present) { if (CURRENT_CONTEXT->swapchain->image_idx == ngfvk::global::invalid_idx) ngfvk_maybe_acquire_swapchain_image(); ngf_image swapchain_image = CURRENT_CONTEXT->swapchain->wrapper_imgs[CURRENT_CONTEXT->swapchain->image_idx].get(); if (swapchain_image->sync_state.layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) { VkCommandBuffer aux_cmd_buf; VkCommandPool aux_cmd_pool; ngfvk_cmd_buffer_allocate_for_frame( CURRENT_CONTEXT->current_frame_token, &aux_cmd_pool, &aux_cmd_buf); const VkImageMemoryBarrier swapchain_mem_bar = { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, .pNext = NULL, .srcAccessMask = swapchain_image->sync_state.last_writer_masks.access_mask, .dstAccessMask = 0u, .oldLayout = swapchain_image->sync_state.layout, .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .image = (VkImage)swapchain_image->alloc.obj_handle, .subresourceRange = { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0u, .levelCount = 1u, .baseArrayLayer = 0u, .layerCount = 1u}}; vkCmdPipelineBarrier( aux_cmd_buf, swapchain_image->sync_state.last_writer_masks.stage_mask, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0u, 0u, NULL, 0u, NULL, 1u, &swapchain_mem_bar); vkEndCommandBuffer(aux_cmd_buf); memset(&swapchain_image->sync_state, 0, sizeof(swapchain_image->sync_state)); swapchain_image->sync_state.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; submitted_cmd_buf_handles[submitted_cmd_buf_handles_idx++] = aux_cmd_buf; frame_res->retire.append(ngfvk_cmd_buf_with_pool {aux_cmd_buf, aux_cmd_pool}); } } const VkPipelineStageFlags wait_masks[] = {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT}; const VkSubmitInfo submit_info = { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .pNext = NULL, .waitSemaphoreCount = needs_present ? 1u : 0u, .pWaitSemaphores = needs_present ? &wait_semaphore : NULL, .pWaitDstStageMask = wait_masks, .commandBufferCount = submitted_cmd_buf_handles_idx, .pCommandBuffers = submitted_cmd_buf_handles, .signalSemaphoreCount = needs_present ? 1u : 0u, .pSignalSemaphores = needs_present ? &CURRENT_CONTEXT->swapchain->submit_sems[CURRENT_CONTEXT->swapchain->image_idx] : NULL}; VkResult submit_result = vkQueueSubmit(_vk.gfx_queue, 1, &submit_info, signal_fence); if (submit_result != VK_SUCCESS) err = NGF_ERROR_INVALID_OPERATION; return err; } static void ngfvk_reset_desc_pools_list(ngfvk_desc_pools_list* superpool) { for (ngfvk_desc_pool* pool = superpool->list; pool; pool = pool->next) { vkResetDescriptorPool(_vk.device, pool->vk_pool, 0u); memset(&pool->utilization, 0, sizeof(pool->utilization)); } superpool->active_pool = superpool->list; } #if defined(__APPLE__) void* ngfvk_create_ca_metal_layer(const ngf_swapchain_info*); #endif void ngfi_dump_sys_alloc_dbgstats(FILE* out); static bool ngfi_skip_hazard_tracking_for_bind_op(const ngf_resource_bind_op& op) { switch (op.type) { case NGF_DESCRIPTOR_UNIFORM_BUFFER: case NGF_DESCRIPTOR_STORAGE_BUFFER: return op.info.buffer.buffer->sync_state.skip_hazard_tracking; case NGF_DESCRIPTOR_STORAGE_IMAGE: case NGF_DESCRIPTOR_IMAGE: case NGF_DESCRIPTOR_IMAGE_AND_SAMPLER: return !op.info.image_sampler.is_image_view ? op.info.image_sampler.resource.image->sync_state.skip_hazard_tracking : op.info.image_sampler.resource.view->src->sync_state.skip_hazard_tracking; default: return false; } } #pragma endregion #pragma region external_funcs extern "C" ngf_error ngf_get_device_list(const ngf_device** devices, uint32_t* ndevices) NGF_NOEXCEPT { if (!ngfvk_init_loader_if_necessary()) { return NGF_ERROR_OPERATION_FAILED; } if (ngfvk::global::num_phys_devices == 0) { ngf_error err = NGF_ERROR_OK; VkInstance tmp_instance = VK_NULL_HANDLE; VkResult vk_err = ngfvk_create_instance(false, false, &tmp_instance, NULL); if (vk_err != VK_SUCCESS) { return NGF_ERROR_OBJECT_CREATION_FAILED; } auto tmp_proc_addr = [tmp_instance](PtrT, const char* name) { return (PtrT)vkGetInstanceProcAddr(tmp_instance, name); }; #define NGFVK_PROCADDR(name) (tmp_proc_addr((PFN_vk##name) nullptr, "vk" #name)) auto enumerate_vk_phys_devs = NGFVK_PROCADDR(EnumeratePhysicalDevices); auto get_vk_phys_dev_properties = NGFVK_PROCADDR(GetPhysicalDeviceProperties); auto get_vk_phys_dev_features = NGFVK_PROCADDR(GetPhysicalDeviceFeatures); auto get_vk_phys_dev_mem_props = NGFVK_PROCADDR(GetPhysicalDeviceMemoryProperties); auto enumerate_extension_props = NGFVK_PROCADDR(EnumerateDeviceExtensionProperties); auto destroy_vk_instance = NGFVK_PROCADDR(DestroyInstance); #undef NGFVK_PROCADDR ngfvk::global::num_phys_devices = ngfvk::global::max_phys_dev; VkPhysicalDevice phys_devs[ngfvk::global::max_phys_dev]; vk_err = enumerate_vk_phys_devs(tmp_instance, &ngfvk::global::num_phys_devices, phys_devs); if (vk_err == VK_SUCCESS) { for (size_t i = 0; i < ngfvk::global::num_phys_devices; ++i) { VkPhysicalDeviceProperties dev_props; VkPhysicalDeviceFeatures dev_features; VkPhysicalDeviceMemoryProperties mem_props; get_vk_phys_dev_properties(phys_devs[i], &dev_props); get_vk_phys_dev_features(phys_devs[i], &dev_features); get_vk_phys_dev_mem_props(phys_devs[i], &mem_props); ngfvk_device_info* ngfdevinfo = &ngfvk::global::phys_device_infos[i]; ngfdevinfo->device_id = dev_props.deviceID; ngfdevinfo->vendor_id = dev_props.vendorID; ngf_device* ngfdev = &ngfvk::global::phys_devices[i]; ngfdev->handle = (ngf_device_handle)i; switch (dev_props.deviceType) { case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU: ngfdev->performance_tier = NGF_DEVICE_PERFORMANCE_TIER_HIGH; break; case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU: case VK_PHYSICAL_DEVICE_TYPE_CPU: ngfdev->performance_tier = NGF_DEVICE_PERFORMANCE_TIER_LOW; break; default: ngfdev->performance_tier = NGF_DEVICE_PERFORMANCE_TIER_UNKNOWN; } strncpy( ngfdev->name, dev_props.deviceName, NGFI_MIN(NGF_DEVICE_NAME_MAX_LENGTH, VK_MAX_PHYSICAL_DEVICE_NAME_SIZE)); ngf_device_capabilities* devcaps = &ngfdev->capabilities; const VkPhysicalDeviceLimits* vkdevlimits = &dev_props.limits; // Populate basic device capabilities. devcaps->clipspace_z_zero_to_one = true; devcaps->uniform_buffer_offset_alignment = (size_t)vkdevlimits->minUniformBufferOffsetAlignment; devcaps->storage_buffer_offset_alignment = (size_t)vkdevlimits->minStorageBufferOffsetAlignment; devcaps->texel_buffer_offset_alignment = (size_t)vkdevlimits->minTexelBufferOffsetAlignment; devcaps->max_vertex_input_attributes_per_pipeline = vkdevlimits->maxVertexInputAttributes; devcaps->max_sampled_images_per_stage = vkdevlimits->maxPerStageDescriptorSampledImages; devcaps->max_samplers_per_stage = vkdevlimits->maxPerStageDescriptorSamplers; devcaps->max_fragment_input_components = vkdevlimits->maxFragmentInputComponents; devcaps->max_fragment_inputs = (devcaps->max_fragment_input_components) / 4; /* as per vk spec. */ devcaps->max_1d_image_dimension = vkdevlimits->maxImageDimension1D; devcaps->max_2d_image_dimension = vkdevlimits->maxImageDimension2D; devcaps->max_3d_image_dimension = vkdevlimits->maxImageDimension3D; devcaps->max_cube_image_dimension = vkdevlimits->maxImageDimensionCube; devcaps->max_image_layers = vkdevlimits->maxImageArrayLayers; devcaps->max_color_attachments_per_pass = vkdevlimits->maxColorAttachments; devcaps->max_uniform_buffers_per_stage = vkdevlimits->maxPerStageDescriptorUniformBuffers; devcaps->max_sampler_anisotropy = vkdevlimits->maxSamplerAnisotropy; devcaps->max_uniform_buffer_range = vkdevlimits->maxUniformBufferRange; devcaps->cubemap_arrays_supported = dev_features.imageCubeArray; devcaps->framebuffer_color_sample_counts = vkdevlimits->framebufferColorSampleCounts; devcaps->framebuffer_depth_sample_counts = vkdevlimits->framebufferDepthSampleCounts; devcaps->texture_color_sample_counts = vkdevlimits->sampledImageColorSampleCounts; devcaps->texture_depth_sample_counts = vkdevlimits->sampledImageDepthSampleCounts; devcaps->max_supported_framebuffer_color_sample_count = ngfi_get_highest_sample_count(devcaps->framebuffer_color_sample_counts); devcaps->max_supported_framebuffer_depth_sample_count = ngfi_get_highest_sample_count(devcaps->framebuffer_depth_sample_counts); devcaps->max_supported_texture_color_sample_count = ngfi_get_highest_sample_count(devcaps->texture_color_sample_counts); devcaps->max_supported_texture_depth_sample_count = ngfi_get_highest_sample_count(devcaps->texture_depth_sample_counts); // Device capabilities: detect device-local host-visible memory. devcaps->device_local_memory_is_host_visible = false; for (size_t mem_type_idx = 0u; !devcaps->device_local_memory_is_host_visible && mem_type_idx < mem_props.memoryTypeCount; ++mem_type_idx) { const VkMemoryType* mem_type = &mem_props.memoryTypes[mem_type_idx]; const VkMemoryPropertyFlags local_visible = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; if ((mem_type->propertyFlags & local_visible) == local_visible) { // Some systems only expose <= 256M device-local host-visible memory, we don't want // that. Only set the cap flag if a large region of device-local memory is also // host-visible. devcaps->device_local_memory_is_host_visible = mem_props.memoryHeaps[mem_type->heapIndex].size > (256u * 1024u * 1024u); } } // Device capabilities: determine enabled extensions. ngfi::array supported_phys_dev_exts; uint32_t nsupported_phys_dev_exts = 0u; vk_err = enumerate_extension_props(phys_devs[i], nullptr, &nsupported_phys_dev_exts, nullptr); supported_phys_dev_exts.resize(nsupported_phys_dev_exts); vk_err = enumerate_extension_props( phys_devs[i], nullptr, &nsupported_phys_dev_exts, supported_phys_dev_exts.data()); auto ext_supported = [&](const char* ext_name) { for (const VkExtensionProperties& supported_ext : supported_phys_dev_exts) { if (strcmp(ext_name, supported_ext.extensionName) == 0) { return true; } } return false; }; auto& enabled_exts = ngfdevinfo->enabled_ext_names; auto add_optional_ext = [&enabled_exts, &ext_supported](const char* ext_name) { const bool r = ext_supported(ext_name); if (r) enabled_exts.push_back(ext_name); return r; }; enabled_exts.push_back("VK_KHR_maintenance1"); enabled_exts.push_back("VK_KHR_swapchain"); const bool shader_float16_int8_supported = add_optional_ext("VK_KHR_shader_float16_int8"); const bool sync2_supported = add_optional_ext("VK_KHR_synchronization2"); const bool inline_ray_tracing_supported = add_optional_ext("VK_KHR_acceleration_structure") && add_optional_ext("VK_KHR_buffer_device_address") && add_optional_ext("VK_KHR_deferred_host_operations") && add_optional_ext("VK_KHR_spirv_1_4") && add_optional_ext("VK_KHR_shader_float_controls") && add_optional_ext("VK_KHR_ray_query") && add_optional_ext("VK_EXT_descriptor_indexing"); // Device capabilities: features structs. const VkBool32 enable_cubemap_arrays = devcaps->cubemap_arrays_supported ? VK_TRUE : VK_FALSE; ngfdevinfo->required_features = VkPhysicalDeviceFeatures { .imageCubeArray = enable_cubemap_arrays, .independentBlend = VK_TRUE, .depthBiasClamp = VK_TRUE, .samplerAnisotropy = VK_TRUE, .shaderStorageImageReadWithoutFormat = VK_TRUE, .shaderStorageImageWriteWithoutFormat = VK_TRUE}; ngfdevinfo->sf16i8_features = VkPhysicalDeviceShaderFloat16Int8Features { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES}; ngfdevinfo->sync2_features = VkPhysicalDeviceSynchronization2Features { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES}; ngfdevinfo->bda_features = VkPhysicalDeviceBufferDeviceAddressFeatures { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES}; ngfdevinfo->accls_features = VkPhysicalDeviceAccelerationStructureFeaturesKHR { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR}; ngfdevinfo->ray_query_features = VkPhysicalDeviceRayQueryFeaturesKHR { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR}; void* features_structs = nullptr; auto append_feature_struct = [&features_structs](auto& s) { s.pNext = features_structs; features_structs = &s; }; if (shader_float16_int8_supported) append_feature_struct(ngfdevinfo->sf16i8_features); if (sync2_supported) append_feature_struct(ngfdevinfo->sync2_features); if (inline_ray_tracing_supported) { append_feature_struct(ngfdevinfo->bda_features); append_feature_struct(ngfdevinfo->accls_features); append_feature_struct(ngfdevinfo->ray_query_features); } devcaps->supports_inline_raytracing = inline_ray_tracing_supported; ngfdevinfo->phys_dev_features2 = VkPhysicalDeviceFeatures2 { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, .pNext = features_structs}; } } else { err = NGF_ERROR_OPERATION_FAILED; } if (tmp_instance != VK_NULL_HANDLE) { destroy_vk_instance(tmp_instance, NULL); } if (err != NGF_ERROR_OK) return err; } if (devices) { *devices = ngfvk::global::phys_devices; } if (ndevices) { *ndevices = (uint32_t)ngfvk::global::num_phys_devices; } return NGF_ERROR_OK; } extern "C" ngf_error ngf_initialize(const ngf_init_info* init_info) NGF_NOEXCEPT { assert(init_info); // Sanity checks. if (_vk.instance != VK_NULL_HANDLE) { // Disallow double initialization. NGFI_DIAG_ERROR("double-initialization detected. `ngf_initialize` may only be called once.") return NGF_ERROR_INVALID_OPERATION; } // Install user-provided diagnostic callbacks and set preferred log verbosity. if (init_info->diag_info != nullptr) { ngfi_diag_info = *init_info->diag_info; } else { ngfi_diag_info.callback = nullptr; ngfi_diag_info.userdata = nullptr; ngfi_diag_info.verbosity = NGF_DIAGNOSTICS_VERBOSITY_DEFAULT; } NGFI_DIAG_INFO("Initializing nicegraf."); // Install user-provided allocation callbacks. ngfi_set_allocation_callbacks(init_info->allocation_callbacks); // Engage RenderDoc if requested. if (init_info->renderdoc_info) { ngfi_module_handle ngf_renderdoc_mod = LoadLibraryA(init_info->renderdoc_info->renderdoc_lib_path); if (ngf_renderdoc_mod != NULL) { pRENDERDOC_GetAPI RENDERDOC_GetAPI = (pRENDERDOC_GetAPI)GetProcAddress(ngf_renderdoc_mod, "RENDERDOC_GetAPI"); if (!RENDERDOC_GetAPI(eRENDERDOC_API_Version_1_6_0, (void**)&_renderdoc.api)) { return NGF_ERROR_OBJECT_CREATION_FAILED; } if (init_info->renderdoc_info->renderdoc_destination_template) { _renderdoc.api->SetCaptureFilePathTemplate( init_info->renderdoc_info->renderdoc_destination_template); } _renderdoc.is_capturing = false; _renderdoc.capture_next = false; } } // Load basic vk entrypoints. if (!ngfvk_init_loader_if_necessary()) { NGFI_DIAG_ERROR("Failed to initialize vulkan loader!"); return NGF_ERROR_OPERATION_FAILED; } // Create vk instance, attempting to enable api validation according to user preference. bool validation_enabled = false; const VkResult instance_create_result = ngfvk_create_instance( ngfi_diag_info.verbosity == NGF_DIAGNOSTICS_VERBOSITY_DETAILED, ngfi_diag_info.enable_debug_groups, &_vk.instance, &validation_enabled); if (instance_create_result != VK_SUCCESS) { NGFI_DIAG_INFO("Failed to set up a new vulkan instance."); return NGF_ERROR_INVALID_OPERATION; } vkl_init_instance( _vk.instance); // load instance-level Vulkan functions into the global namespace. // If validation was enabled, install a debug callback to forward // vulkan debug messages to the user. if (validation_enabled) { NGFI_DIAG_INFO("vulkan validation layers enabled"); const VkDebugUtilsMessengerCreateInfoEXT debug_callback_info = { .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT, .pNext = NULL, .flags = 0u, .messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT, .messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT, .pfnUserCallback = ngfvk_debug_message_callback, .pUserData = NULL}; vkCreateDebugUtilsMessengerEXT(_vk.instance, &debug_callback_info, NULL, &_vk.debug_messenger); } else { NGFI_DIAG_INFO("vulkan validation is disabled"); } // Obtain a list of available physical devices. uint32_t nphysdev = ngfvk::global::max_phys_dev; VkPhysicalDevice physdevs[ngfvk::global::max_phys_dev]; VkResult vk_err = vkEnumeratePhysicalDevices(_vk.instance, &nphysdev, physdevs); if (vk_err != VK_SUCCESS) { NGFI_DIAG_ERROR("Failed to enumerate Vulkan physical devices, VK error %d.", vk_err); return NGF_ERROR_INVALID_OPERATION; } // Sanity-check the requested device handle. const uint32_t device_idx = (uint32_t)init_info->device; if (device_idx >= ngfvk::global::num_phys_devices) { return NGF_ERROR_INVALID_OPERATION; } // Pick a suitable physical device based on user's preference. uint32_t vk_device_index = ngfvk::global::invalid_idx; ngfvk_device_info* ngfdevinfo = &ngfvk::global::phys_device_infos[device_idx]; VkPhysicalDeviceProperties phys_dev_properties; for (uint32_t i = 0; i < nphysdev && vk_device_index == ngfvk::global::invalid_idx; ++i) { vkGetPhysicalDeviceProperties(physdevs[i], &phys_dev_properties); if (phys_dev_properties.deviceID == ngfdevinfo->device_id && phys_dev_properties.vendorID == ngfdevinfo->vendor_id) { vk_device_index = i; } } if (vk_device_index == ngfvk::global::invalid_idx) { NGFI_DIAG_ERROR("Failed to find a suitable physical device."); return NGF_ERROR_INVALID_OPERATION; } _vk.phys_dev = physdevs[vk_device_index]; // Obtain a list of queue family properties from the device. uint32_t num_queue_families = 0U; vkGetPhysicalDeviceQueueFamilyProperties(_vk.phys_dev, &num_queue_families, NULL); VkQueueFamilyProperties* queue_families = ngfi::tmp_arena().alloc(num_queue_families); assert(queue_families); vkGetPhysicalDeviceQueueFamilyProperties(_vk.phys_dev, &num_queue_families, queue_families); // Pick suitable queue families for graphics and present, ensuring graphics also supports compute. uint32_t gfx_family_idx = ngfvk::global::invalid_idx; uint32_t present_family_idx = ngfvk::global::invalid_idx; for (uint32_t q = 0; queue_families && q < num_queue_families; ++q) { const VkQueueFlags flags = queue_families[q].queueFlags; const bool is_gfx = (flags & VK_QUEUE_GRAPHICS_BIT) != 0; const bool is_present = ngfvk_query_presentation_support(_vk.phys_dev, q); const bool is_compute = (flags & VK_QUEUE_COMPUTE_BIT) != 0; if (gfx_family_idx == ngfvk::global::invalid_idx && is_gfx && is_compute) { gfx_family_idx = q; } if (present_family_idx == ngfvk::global::invalid_idx && is_present) { present_family_idx = q; } } queue_families = NULL; if (gfx_family_idx == ngfvk::global::invalid_idx || present_family_idx == ngfvk::global::invalid_idx) { NGFI_DIAG_ERROR("Could not find a suitable queue family for graphics and/or presentation."); return NGF_ERROR_INVALID_OPERATION; } _vk.gfx_family_idx = gfx_family_idx; _vk.present_family_idx = present_family_idx; // Create logical device. const float queue_prio = 1.0f; const bool same_gfx_and_present = _vk.gfx_family_idx == _vk.present_family_idx; const uint32_t num_queue_infos = (same_gfx_and_present ? 1u : 2u); VkDeviceQueueCreateInfo queue_infos[] = { {.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, .pNext = NULL, .flags = 0, .queueFamilyIndex = _vk.present_family_idx, .queueCount = 1, .pQueuePriorities = &queue_prio}, {.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, .pNext = NULL, .flags = 0, .queueFamilyIndex = _vk.gfx_family_idx, .queueCount = 1, .pQueuePriorities = &queue_prio}}; if (vkGetPhysicalDeviceFeatures2KHR) { vkGetPhysicalDeviceFeatures2KHR(_vk.phys_dev, &ngfdevinfo->phys_dev_features2); } const VkDeviceCreateInfo dev_info = { .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, .pNext = ngfdevinfo->phys_dev_features2.pNext, .flags = 0, .queueCreateInfoCount = num_queue_infos, .pQueueCreateInfos = &queue_infos[same_gfx_and_present ? 1u : 0u], .enabledLayerCount = 0, .ppEnabledLayerNames = NULL, .enabledExtensionCount = static_cast(ngfdevinfo->enabled_ext_names.size()), .ppEnabledExtensionNames = ngfdevinfo->enabled_ext_names.data(), .pEnabledFeatures = &ngfdevinfo->required_features}; vk_err = vkCreateDevice(_vk.phys_dev, &dev_info, NULL, &_vk.device); if (vk_err != VK_SUCCESS) { NGFI_DIAG_ERROR("Failed to create a Vulkan device, VK error %d.", vk_err); return NGF_ERROR_INVALID_OPERATION; } // Load device-level entry points. vkl_init_device(_vk.device, ngfdevinfo->sync2_features.synchronization2); // Set up VMA. VmaVulkanFunctions vma_vk_fns = { .vkGetInstanceProcAddr = vkGetInstanceProcAddr, .vkGetDeviceProcAddr = vkGetDeviceProcAddr, }; VmaAllocatorCreateInfo vma_info = { .flags = ngfvk::global::phys_devices[device_idx].capabilities.supports_inline_raytracing ? VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT : 0u, .physicalDevice = _vk.phys_dev, .device = _vk.device, .preferredLargeHeapBlockSize = 0u, .pAllocationCallbacks = NULL, .pDeviceMemoryCallbacks = NULL, .pHeapSizeLimit = NULL, .pVulkanFunctions = &vma_vk_fns, .instance = _vk.instance, .vulkanApiVersion = 0}; vk_err = vmaCreateAllocator(&vma_info, &_vk.allocator); // Obtain queue handles. vkGetDeviceQueue(_vk.device, _vk.gfx_family_idx, 0, &_vk.gfx_queue); vkGetDeviceQueue(_vk.device, _vk.present_family_idx, 0, &_vk.present_queue); // Populate device capabilities. ngfvk::global::phys_device_caps = ngfvk::global::phys_devices[init_info->device].capabilities; // Create dummy objects to pre-bind in fresh descriptor sets. const ngf_image_info dummy_img_info = { .type = NGF_IMAGE_TYPE_IMAGE_2D, .extent = {1u, 1u, 1u}, .nmips = 1u, .nlayers = 1u, .format = NGF_IMAGE_FORMAT_R8, .sample_count = NGF_SAMPLE_COUNT_1, .usage_hint = NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_STORAGE}; const ngf_image_info dummy_cube_info = { .type = NGF_IMAGE_TYPE_CUBE, .extent = {1u, 1u, 1u}, .nmips = 1u, .nlayers = 1u, .format = NGF_IMAGE_FORMAT_R8, .sample_count = NGF_SAMPLE_COUNT_1, .usage_hint = NGF_IMAGE_USAGE_SAMPLE_FROM | NGF_IMAGE_USAGE_STORAGE}; const ngf_buffer_info dummy_buf_info = { .size = 1u, .storage_type = NGF_BUFFER_STORAGE_DEVICE_LOCAL, .buffer_usage = NGF_BUFFER_USAGE_STORAGE_BUFFER | NGF_BUFFER_USAGE_UNIFORM_BUFFER | NGF_BUFFER_USAGE_TEXEL_BUFFER}; ngf_sampler_info dummy_samp_info; memset(&dummy_samp_info, 0, sizeof(dummy_samp_info)); ngf_create_image(&dummy_img_info, &_vk.dummy_res.img); ngf_create_image(&dummy_cube_info, &_vk.dummy_res.cube); ngf_create_buffer(&dummy_buf_info, &_vk.dummy_res.buf); ngf_create_sampler(&dummy_samp_info, &_vk.dummy_res.samp); const ngf_texel_buffer_view_info tbuf_info = {.buffer = _vk.dummy_res.buf, .offset = 0u, .size = 1u, .texel_format = NGF_IMAGE_FORMAT_R8}; ngf_create_texel_buffer_view(&tbuf_info, &_vk.dummy_res.tbuf); _vk.dummy_res.buf_info.buffer = (VkBuffer)_vk.dummy_res.buf->alloc.obj_handle; _vk.dummy_res.buf_info.offset = 0u; _vk.dummy_res.buf_info.range = 1u; _vk.dummy_res.img_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL; _vk.dummy_res.img_info.imageView = _vk.dummy_res.img->vkview; _vk.dummy_res.img_info.sampler = VK_NULL_HANDLE; _vk.dummy_res.cube_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL; _vk.dummy_res.cube_info.imageView = _vk.dummy_res.cube->vkview; _vk.dummy_res.cube_info.sampler = VK_NULL_HANDLE; _vk.dummy_res.img_arr_info = _vk.dummy_res.img_info; _vk.dummy_res.img_arr_info.imageView = _vk.dummy_res.img->vkview_arrayed; _vk.dummy_res.cube_arr_info = _vk.dummy_res.cube_info; _vk.dummy_res.cube_arr_info.imageView = _vk.dummy_res.cube->vkview_arrayed; _vk.dummy_res.samp_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL; _vk.dummy_res.samp_info.imageView = VK_NULL_HANDLE; _vk.dummy_res.samp_info.sampler = _vk.dummy_res.samp->vksampler; _vk.dummy_res.imgsamp_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL; _vk.dummy_res.imgsamp_info.imageView = _vk.dummy_res.img->vkview; _vk.dummy_res.imgsamp_info.sampler = _vk.dummy_res.samp->vksampler; _vk.dummy_res.imgsamp_arr_info = _vk.dummy_res.imgsamp_info; _vk.dummy_res.imgsamp_arr_info.imageView = _vk.dummy_res.img->vkview_arrayed; _vk.dummy_res.dummy_accel_struct = VK_NULL_HANDLE; _vk.dummy_res.image_transitioned = false; pthread_mutex_init(&_vk.dummy_res.img_mu, NULL); // Done! return NGF_ERROR_OK; } extern "C" void ngf_shutdown(void) NGF_NOEXCEPT { NGFI_DIAG_INFO("Shutting down nicegraf."); if (CURRENT_CONTEXT != NULL) { NGFI_DIAG_ERROR("Context not destroyed before shutdown.") } NGFI_FREE(_vk.dummy_res.tbuf); NGFI_FREE(_vk.dummy_res.img); NGFI_FREE(_vk.dummy_res.cube); NGFI_FREE(_vk.dummy_res.buf); NGFI_FREE(_vk.dummy_res.samp); if (_vk.allocator != VK_NULL_HANDLE) { vmaDestroyAllocator(_vk.allocator); } if (_vk.device != VK_NULL_HANDLE) { vkDestroyDevice(_vk.device, NULL); } if (_vk.debug_messenger) { vkDestroyDebugUtilsMessengerEXT(_vk.instance, _vk.debug_messenger, NULL); } if (_vk.instance != VK_NULL_HANDLE) { vkDestroyInstance(_vk.instance, NULL); } _vk.instance = VK_NULL_HANDLE; #if defined(__linux__) if (_vk.xcb_connection) { xcb_disconnect(_vk.xcb_connection); _vk.xcb_visualid = 0; _vk.xcb_connection = NULL; } #endif } extern "C" ngf_error ngf_create_context(const ngf_context_info* info, ngf_context* result) NGF_NOEXCEPT { assert(info); assert(result); auto maybe_ctx = ngf_context_t::make(*info); if (!maybe_ctx.has_error()) result[0] = maybe_ctx.value().release(); return maybe_ctx.has_error() ? maybe_ctx.error() : NGF_ERROR_OK; } extern "C" const ngf_device_capabilities* ngf_get_device_capabilities(void) NGF_NOEXCEPT { return &ngfvk::global::phys_device_caps; } extern "C" ngf_error ngf_resize_context(ngf_context ctx, uint32_t new_width, uint32_t new_height) NGF_NOEXCEPT { assert(ctx); if (!ctx || !ctx->default_render_target || !ctx->swapchain) { return NGF_ERROR_INVALID_OPERATION; } ctx->swapchain_info.width = NGFI_MAX(1, new_width); ctx->swapchain_info.height = NGFI_MAX(1, new_height); ctx->default_render_target->width = ctx->swapchain_info.width; ctx->default_render_target->height = ctx->swapchain_info.height; // swapchain needs to be explicitly destroyed before // creating a new one with the same surface. ctx->swapchain = ngfi::unique_ptr {}; auto maybe_swapchain = ngfvk_swapchain::make(ctx->swapchain_info, ctx->default_render_target.get(), ctx->surface); if (!maybe_swapchain.has_error()) { ctx->swapchain = ngfi::move(maybe_swapchain.value()); return NGF_ERROR_OK; } else { return maybe_swapchain.error(); } } extern "C" void ngf_destroy_context(ngf_context ctx) NGF_NOEXCEPT { if (ctx != nullptr) { ngfi::free(ctx); } } extern "C" ngf_error ngf_set_context(ngf_context ctx) NGF_NOEXCEPT { CURRENT_CONTEXT = ctx; return NGF_ERROR_OK; } extern "C" ngf_context ngf_get_context() NGF_NOEXCEPT { return CURRENT_CONTEXT; } extern "C" ngf_error ngf_create_cmd_buffer(const ngf_cmd_buffer_info*, ngf_cmd_buffer* result) NGF_NOEXCEPT { assert(result); auto cmd_buf = ngf_cmd_buffer_t::make(); if (!cmd_buf.has_error()) { result[0] = cmd_buf.value().release(); } return cmd_buf.has_error() ? cmd_buf.error() : NGF_ERROR_OK; } extern "C" ngf_error ngf_cmd_begin_render_pass_simple( ngf_cmd_buffer cmd_buf, ngf_render_target rt, float clear_color_r, float clear_color_g, float clear_color_b, float clear_color_a, float clear_depth, uint32_t clear_stencil, ngf_render_encoder* enc) NGF_NOEXCEPT { ngfi::tmp_arena().reset(); auto load_ops = ngfi::tmp_alloc(rt->nattachments); auto store_ops = ngfi::tmp_alloc(rt->nattachments); auto clears = ngfi::tmp_alloc(rt->nattachments); for (size_t i = 0u; i < rt->nattachments; ++i) { load_ops[i] = NGF_LOAD_OP_CLEAR; if (rt->attachment_descs[i].type == NGF_ATTACHMENT_COLOR) { clears[i].clear_color[0] = clear_color_r; clears[i].clear_color[1] = clear_color_g; clears[i].clear_color[2] = clear_color_b; clears[i].clear_color[3] = clear_color_a; } else if ( rt->attachment_descs[i].type == NGF_ATTACHMENT_DEPTH || rt->attachment_descs[i].type == NGF_ATTACHMENT_DEPTH_STENCIL) { clears[i].clear_depth_stencil.clear_depth = clear_depth; clears[i].clear_depth_stencil.clear_stencil = clear_stencil; } else { assert(false); } const bool needs_resolve = rt->attachment_descs[i].type == NGF_ATTACHMENT_COLOR && rt->have_resolve_attachments && rt->attachment_descs[i].sample_count > NGF_SAMPLE_COUNT_1; store_ops[i] = needs_resolve ? NGF_STORE_OP_RESOLVE : NGF_STORE_OP_STORE; } const ngf_render_pass_info pass_info = { .render_target = rt, .load_ops = load_ops, .store_ops = store_ops, .clears = clears, }; return ngf_cmd_begin_render_pass(cmd_buf, &pass_info, enc); } extern "C" ngf_error ngf_cmd_begin_render_pass( ngf_cmd_buffer cmd_buf, const ngf_render_pass_info* pass_info, ngf_render_encoder* enc) NGF_NOEXCEPT { if (pass_info->render_target->is_default && ngfvk_maybe_acquire_swapchain_image() != NGF_ERROR_OK) { return NGF_ERROR_INVALID_OPERATION; } ngf_error err = NGF_ERROR_OK; ngfvk_encoder_start(cmd_buf); if (err != NGF_ERROR_OK) return err; err = ngfvk_initialize_generic_encoder(cmd_buf, &enc->pvt_data_donotuse); if (err != NGF_ERROR_OK) { return err; } ngfi::tmp_arena().reset(); cmd_buf->active_rt = pass_info->render_target; cmd_buf->renderpass_active = true; cmd_buf->pending_render_pass_info.render_target = pass_info->render_target; auto cloned_load_ops = ngfi::frame_alloc(pass_info->render_target->nattachments); cmd_buf->pending_render_pass_info.load_ops = cloned_load_ops; if (cmd_buf->pending_render_pass_info.load_ops == NULL) { return NGF_ERROR_OUT_OF_MEM; } memcpy( cloned_load_ops, pass_info->load_ops, sizeof(ngf_attachment_load_op) * pass_info->render_target->nattachments); auto cloned_store_ops = ngfi::frame_alloc(pass_info->render_target->nattachments); cmd_buf->pending_render_pass_info.store_ops = cloned_store_ops; if (cmd_buf->pending_render_pass_info.store_ops == NULL) { return NGF_ERROR_OUT_OF_MEM; } memcpy( cloned_store_ops, pass_info->store_ops, sizeof(ngf_attachment_store_op) * pass_info->render_target->nattachments); uint32_t nclears = 0u; auto cloned_clears = ngfi::frame_alloc(pass_info->render_target->nattachments); if (cloned_clears == NULL) { return NGF_ERROR_OUT_OF_MEM; } for (uint32_t i = 0u; i < pass_info->render_target->nattachments; ++i) { if (cmd_buf->pending_render_pass_info.load_ops[i] == NGF_LOAD_OP_CLEAR) { nclears = NGFI_MAX(nclears, i + 1); cloned_clears[i] = pass_info->clears[i]; } } if (nclears > 0u) { cmd_buf->pending_render_pass_info.clears = cloned_clears; } else { cmd_buf->pending_render_pass_info.clears = NULL; } cmd_buf->pending_clear_value_count = (uint16_t)nclears; ngfvk_sync_req_batch sync_req_batch; ngfvk_sync_req_batch_init(pass_info->render_target->nattachments, &sync_req_batch); for (size_t i = 0u; i < pass_info->render_target->nattachments; ++i) { const ngf_attachment_type attachment_type = pass_info->render_target->attachment_descs[i].type; const ngf_sample_count attachment_sample_count = pass_info->render_target->attachment_descs[i].sample_count; switch (attachment_type) { case NGF_ATTACHMENT_COLOR: { ngfvk_sync_req sync_req; sync_req.barrier_masks.access_mask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; sync_req.barrier_masks.stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; sync_req.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; ngf_image color_image = cmd_buf->active_rt->is_default ? (attachment_sample_count == NGF_SAMPLE_COUNT_1 ? CURRENT_CONTEXT->swapchain ->wrapper_imgs[CURRENT_CONTEXT->swapchain->image_idx] .get() : CURRENT_CONTEXT->swapchain ->multisample_imgs[CURRENT_CONTEXT->swapchain->image_idx] .get()) : pass_info->render_target->attachment_images[i]; ngfvk_sync_res res = ngfvk_sync_res_from_img(color_image); ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &res, &sync_req); break; } case NGF_ATTACHMENT_DEPTH: case NGF_ATTACHMENT_DEPTH_STENCIL: { ngfvk_sync_req sync_req; sync_req.barrier_masks.access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; sync_req.barrier_masks.stage_mask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; sync_req.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; ngf_image depth_stencil_image = cmd_buf->active_rt->is_default ? CURRENT_CONTEXT->swapchain->depth_img : pass_info->render_target->attachment_images[i]; ngfvk_sync_res res = ngfvk_sync_res_from_img(depth_stencil_image); ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &res, &sync_req); break; } default: assert(0); } } ngfvk_sync_req_batch_process(&sync_req_batch, cmd_buf); return NGF_ERROR_OK; } extern "C" ngf_error ngf_cmd_begin_xfer_pass( ngf_cmd_buffer cmd_buf, const ngf_xfer_pass_info* pass_info, ngf_xfer_encoder* enc) NGF_NOEXCEPT { (void)pass_info; ngf_error err = ngfvk_encoder_start(cmd_buf); if (err != NGF_ERROR_OK) return err; err = ngfvk_initialize_generic_encoder(cmd_buf, &enc->pvt_data_donotuse); if (err != NGF_ERROR_OK) { return err; } cmd_buf->xfer_pass_active = true; return NGF_ERROR_OK; } extern "C" ngf_error ngf_cmd_begin_compute_pass( ngf_cmd_buffer cmd_buf, const ngf_compute_pass_info* pass_info, ngf_compute_encoder* enc) NGF_NOEXCEPT { (void)pass_info; ngf_error err = ngfvk_encoder_start(cmd_buf); if (err != NGF_ERROR_OK) return err; err = ngfvk_initialize_generic_encoder(cmd_buf, &enc->pvt_data_donotuse); if (err != NGF_ERROR_OK) { return err; } cmd_buf->compute_pass_active = true; return NGF_ERROR_OK; } extern "C" ngf_error ngf_cmd_end_render_pass(ngf_render_encoder enc) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); // Commit all the pending barriers. ngfvk_sync_commit_pending_barriers(&buf->pending_barriers, buf->vk_cmd_buffer); // Begin the real render pass. const ngf_render_pass_info* pass_info = &buf->pending_render_pass_info; const VkRenderPass render_pass = ngfvk_lookup_renderpass( pass_info->render_target, ngfvk_renderpass_ops_key( pass_info->render_target, pass_info->load_ops, pass_info->store_ops)); const ngfvk_swapchain* swapchain = CURRENT_CONTEXT->swapchain.get(); const ngf_render_target target = pass_info->render_target; const VkFramebuffer fb = target->is_default ? swapchain->framebufs[swapchain->image_idx] : target->frame_buffer; const VkExtent2D render_extent = { target->is_default ? CURRENT_CONTEXT->swapchain_info.width : target->width, target->is_default ? CURRENT_CONTEXT->swapchain_info.height : target->height}; const uint32_t clear_value_count = buf->pending_clear_value_count; auto vk_clears = clear_value_count > 0 ? ngfi::tmp_alloc(clear_value_count) : nullptr; if (clear_value_count > 0) { for (size_t i = 0; i < clear_value_count; ++i) { VkClearValue* vk_clear_val = &vk_clears[i]; const ngf_clear* clear = &pass_info->clears[i]; if (target->attachment_descs[i].format != NGF_IMAGE_FORMAT_DEPTH16 && target->attachment_descs[i].format != NGF_IMAGE_FORMAT_DEPTH32 && target->attachment_descs[i].format != NGF_IMAGE_FORMAT_DEPTH24_STENCIL8) { VkClearColorValue* clear_color_var = &vk_clear_val->color; clear_color_var->float32[0] = clear->clear_color[0]; clear_color_var->float32[1] = clear->clear_color[1]; clear_color_var->float32[2] = clear->clear_color[2]; clear_color_var->float32[3] = clear->clear_color[3]; } else { VkClearDepthStencilValue* clear_depth_stencil_val = &vk_clear_val->depthStencil; clear_depth_stencil_val->depth = clear->clear_depth_stencil.clear_depth; clear_depth_stencil_val->stencil = clear->clear_depth_stencil.clear_stencil; } } } const VkRenderPassBeginInfo begin_info = { .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, .pNext = NULL, .renderPass = render_pass, .framebuffer = fb, .renderArea = {.offset = {0u, 0u}, .extent = render_extent}, .clearValueCount = clear_value_count, .pClearValues = vk_clears}; vkCmdBeginRenderPass(buf->vk_cmd_buffer, &begin_info, VK_SUBPASS_CONTENTS_INLINE); // Clean up after the begin operation. ngfi::tmp_arena().reset(); // Encode each pending render command. ngfvk_cmd_buf_record_render_cmds(buf, buf->in_pass_cmd_chnks); // Reset pending render command storage. ngfvk_cmd_buf_reset_render_cmds(buf); // Finish renderpass. vkCmdEndRenderPass(buf->vk_cmd_buffer); buf->renderpass_active = false; buf->active_rt = NULL; return ngfvk_encoder_end(buf, &enc.pvt_data_donotuse); } extern "C" ngf_error ngf_cmd_end_xfer_pass(ngf_xfer_encoder enc) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); buf->xfer_pass_active = false; return ngfvk_encoder_end(buf, &enc.pvt_data_donotuse); } extern "C" ngf_error ngf_cmd_end_compute_pass(ngf_compute_encoder enc) NGF_NOEXCEPT { ngf_cmd_buffer cmd_buf = NGFVK_ENC2CMDBUF(enc); cmd_buf->compute_pass_active = false; return ngfvk_encoder_end(cmd_buf, &enc.pvt_data_donotuse); } extern "C" ngf_error ngf_start_cmd_buffer(ngf_cmd_buffer cmd_buf, ngf_frame_token token) NGF_NOEXCEPT { assert(cmd_buf); NGFI_TRANSITION_CMD_BUF(cmd_buf, ngfi::CMD_BUFFER_STATE_READY); cmd_buf->parent_frame = token; cmd_buf->desc_pools_list = nullptr; cmd_buf->active_rt = nullptr; cmd_buf->active_gfx_pipe = nullptr; cmd_buf->active_compute_pipe = nullptr; cmd_buf->compute_pass_active = false; cmd_buf->renderpass_active = false; cmd_buf->npending_bind_ops = 0u; cmd_buf->virt_bind_ops_ranges.clear(); cmd_buf->in_pass_cmd_chnks.clear(); cmd_buf->pending_barriers.barriers.clear(); cmd_buf->local_res_states.clear(); ngfvk_cleanup_pending_binds(cmd_buf); return ngfvk_cmd_buffer_allocate_for_frame(token, &cmd_buf->vk_cmd_pool, &cmd_buf->vk_cmd_buffer); } extern "C" void ngf_destroy_cmd_buffer(ngf_cmd_buffer buffer) NGF_NOEXCEPT { if (buffer && buffer->state != ngfi::CMD_BUFFER_STATE_PENDING) { NGFI_FREE(buffer); } else if (buffer) { buffer->destroy_on_submit = true; } } extern "C" ngf_error ngf_submit_cmd_buffers(uint32_t nbuffers, ngf_cmd_buffer* cmd_bufs) NGF_NOEXCEPT { assert(cmd_bufs); uint32_t frame_id = CURRENT_CONTEXT->frame_id; ngfvk_frame_resources* frame_res_data = &CURRENT_CONTEXT->frame_res[frame_id]; for (uint32_t i = 0u; i < nbuffers; ++i) { ngf_cmd_buffer cmd_buf = cmd_bufs[i]; if (cmd_buf->parent_frame != CURRENT_CONTEXT->current_frame_token) { NGFI_DIAG_ERROR("submitting a command buffer for the wrong frame"); return NGF_ERROR_INVALID_OPERATION; } NGFI_TRANSITION_CMD_BUF(cmd_bufs[i], ngfi::CMD_BUFFER_STATE_PENDING); if (cmd_buf->desc_pools_list) { frame_res_data->retire.append(cmd_buf->desc_pools_list); } vkEndCommandBuffer(cmd_buf->vk_cmd_buffer); frame_res_data->submitted_cmd_bufs.push_back(cmd_buf); } return NGF_ERROR_OK; } extern "C" ngf_error ngf_begin_frame(ngf_frame_token* token) NGF_NOEXCEPT { ngf_error err = NGF_ERROR_OK; // increment frame id. const uint32_t fi = (CURRENT_CONTEXT->frame_id + 1u) % CURRENT_CONTEXT->max_inflight_frames; CURRENT_CONTEXT->frame_id = fi; // setup frame capture if (_renderdoc.api && _renderdoc.capture_next) { _renderdoc.capture_next = false; _renderdoc.is_capturing = true; _renderdoc.api->StartFrameCapture( RENDERDOC_DEVICEPOINTER_FROM_VKINSTANCE(_vk.instance), (RENDERDOC_WindowHandle)CURRENT_CONTEXT->swapchain_info.native_handle); } // reset stack allocators. ngfi::tmp_arena().reset(); ngfi::frame_arena().reset(); // Retire resources. ngfvk_frame_resources* next_frame_res = &CURRENT_CONTEXT->frame_res[fi]; ngfvk_retire_resources(next_frame_res); next_frame_res->res_frame_arena.reset(); if (CURRENT_CONTEXT->swapchain) { CURRENT_CONTEXT->swapchain->image_idx = ngfvk::global::invalid_idx; } CURRENT_CONTEXT->current_frame_token = ngfi_encode_frame_token( (uint16_t)((uintptr_t)CURRENT_CONTEXT & 0xffff), (uint8_t)CURRENT_CONTEXT->max_inflight_frames, (uint8_t)CURRENT_CONTEXT->frame_id); *token = CURRENT_CONTEXT->current_frame_token; return err; } extern "C" ngf_error ngf_get_current_swapchain_image(ngf_frame_token token, ngf_image* result) NGF_NOEXCEPT { assert(CURRENT_CONTEXT); assert(result); if (token != CURRENT_CONTEXT->current_frame_token) { NGFI_DIAG_ERROR("unexpected frame token"); return NGF_ERROR_INVALID_OPERATION; } if (!CURRENT_CONTEXT->swapchain || CURRENT_CONTEXT->swapchain->vk_swapchain == VK_NULL_HANDLE) { NGFI_DIAG_ERROR( "requesting a swapchain image handle from a context that does not have a swapchain"); return NGF_ERROR_INVALID_OPERATION; } ngfvk_maybe_acquire_swapchain_image(); *result = CURRENT_CONTEXT->swapchain->wrapper_imgs[CURRENT_CONTEXT->swapchain->image_idx].get(); return NGF_ERROR_OK; } extern "C" ngf_error ngf_end_frame(ngf_frame_token token) NGF_NOEXCEPT { if (token != CURRENT_CONTEXT->current_frame_token) { NGFI_DIAG_ERROR("ending a frame with an unexpected frame token"); return NGF_ERROR_INVALID_OPERATION; } ngf_error err = NGF_ERROR_OK; // Obtain the current frame resource structure. const uint32_t fi = CURRENT_CONTEXT->frame_id; ngfvk_frame_resources* frame_res = &CURRENT_CONTEXT->frame_res[fi]; frame_res->nwait_fences = 0u; // Submit pending commands & present. VkSemaphore image_semaphore = VK_NULL_HANDLE; const bool needs_present = CURRENT_CONTEXT->swapchain && CURRENT_CONTEXT->swapchain->vk_swapchain != VK_NULL_HANDLE; if (needs_present) { image_semaphore = CURRENT_CONTEXT->swapchain->acquire_sems[fi]; } ngf_error submit_result = ngfvk_submit_pending_cmd_buffers( frame_res, image_semaphore, frame_res->fences[frame_res->nwait_fences++]); // Present if necessary. if (submit_result == NGF_ERROR_OK && needs_present) { const VkPresentInfoKHR present_info = { .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, .pNext = NULL, .waitSemaphoreCount = 1u, .pWaitSemaphores = &CURRENT_CONTEXT->swapchain->submit_sems[CURRENT_CONTEXT->swapchain->image_idx], .swapchainCount = 1, .pSwapchains = &CURRENT_CONTEXT->swapchain->vk_swapchain, .pImageIndices = &CURRENT_CONTEXT->swapchain->image_idx, .pResults = NULL}; const VkResult present_result = vkQueuePresentKHR(_vk.present_queue, &present_info); if (present_result != VK_SUCCESS) err = NGF_ERROR_INVALID_OPERATION; } // end frame capture if (_renderdoc.api && _renderdoc.is_capturing) { _renderdoc.api->EndFrameCapture( RENDERDOC_DEVICEPOINTER_FROM_VKINSTANCE(_vk.instance), (RENDERDOC_WindowHandle)CURRENT_CONTEXT->swapchain_info.native_handle); _renderdoc.is_capturing = false; _renderdoc.capture_next = false; } return err; } extern "C" ngf_error ngf_create_shader_stage(const ngf_shader_stage_info* info, ngf_shader_stage* result) NGF_NOEXCEPT { assert(info); assert(result); auto maybe_stage = ngf_shader_stage_t::make(*info); if (!maybe_stage.has_error()) result[0] = maybe_stage.value().release(); return maybe_stage.has_error() ? maybe_stage.error() : NGF_ERROR_OK; } extern "C" void ngf_destroy_shader_stage(ngf_shader_stage stage) NGF_NOEXCEPT { if (stage) { NGFI_FREE(stage); } } extern "C" ngf_error ngf_create_graphics_pipeline( const ngf_graphics_pipeline_info* info, ngf_graphics_pipeline* result) NGF_NOEXCEPT { assert(info); assert(result); auto maybe_pipeline = ngfvk_generic_pipeline::make(*info); if (!maybe_pipeline.has_error()) result[0] = (ngf_graphics_pipeline)maybe_pipeline.value().release(); return maybe_pipeline.has_error() ? maybe_pipeline.error() : NGF_ERROR_OK; } extern "C" void ngf_destroy_graphics_pipeline(ngf_graphics_pipeline p) NGF_NOEXCEPT { if (p) { auto gp = (ngfvk_generic_pipeline*)p; NGFI_FREE(gp); } } extern "C" ngf_error ngf_create_compute_pipeline( const ngf_compute_pipeline_info* info, ngf_compute_pipeline* result) NGF_NOEXCEPT { assert(info); assert(result); auto maybe_pipeline = ngfvk_generic_pipeline::make(*info); if (!maybe_pipeline.has_error()) result[0] = (ngf_compute_pipeline)maybe_pipeline.value().release(); return maybe_pipeline.has_error() ? maybe_pipeline.error() : NGF_ERROR_OK; } extern "C" void ngf_destroy_compute_pipeline(ngf_compute_pipeline p) NGF_NOEXCEPT { if (p) { auto gp = (ngfvk_generic_pipeline*)p; NGFI_FREE(gp); } } extern "C" ngf_render_target ngf_default_render_target() NGF_NOEXCEPT { if (CURRENT_CONTEXT) { return CURRENT_CONTEXT->default_render_target.get(); } else { return NULL; } } extern "C" const ngf_attachment_descriptions* ngf_default_render_target_attachment_descs() NGF_NOEXCEPT { if (CURRENT_CONTEXT->default_render_target) { CURRENT_CONTEXT->default_attachment_descriptions_list.ndescs = CURRENT_CONTEXT->swapchain_info.depth_format != NGF_IMAGE_FORMAT_UNDEFINED ? 2u : 1u; CURRENT_CONTEXT->default_attachment_descriptions_list.descs = CURRENT_CONTEXT->default_render_target->attachment_descs.data(); return &CURRENT_CONTEXT->default_attachment_descriptions_list; } else { return NULL; } } extern "C" ngf_error ngf_create_render_target( const ngf_render_target_info* info, ngf_render_target* result) NGF_NOEXCEPT { assert(info); assert(result); auto maybe_rt = ngf_render_target_t::make(*info); if (!maybe_rt.has_error()) result[0] = maybe_rt.value().release(); return maybe_rt.has_error() ? maybe_rt.error() : NGF_ERROR_OK; } extern "C" void ngf_destroy_render_target(ngf_render_target target) NGF_NOEXCEPT { if (target) { if (target->is_default) { NGFI_DIAG_ERROR("default RT can only be destroyed by owning context\n"); return; } NGFI_FREE(target); } } extern "C" void ngf_cmd_dispatch( ngf_compute_encoder enc, uint32_t x_threadgroups, uint32_t y_threadgroups, uint32_t z_threadgroups) NGF_NOEXCEPT { ngf_cmd_buffer cmd_buf = NGFVK_ENC2CMDBUF(enc); ngfi::tmp_arena().reset(); // Prepare a batch of sync requests by scanning all pending bind operations. ngfvk_sync_req_batch sync_req_batch; ngfvk_sync_req_batch_init(cmd_buf->npending_bind_ops, &sync_req_batch); for (const ngf_resource_bind_op& bind_op_ref : cmd_buf->pending_bind_ops) { const ngf_resource_bind_op* bind_op = &bind_op_ref; ngfvk_sync_req sync_req = ngfvk_sync_req_for_bind_op( bind_op, (ngfvk_generic_pipeline*)(cmd_buf->active_compute_pipe)); if (sync_req.barrier_masks.stage_mask == 0u) { continue; } const ngfvk_sync_res res = ngfvk_sync_res_from_bind_op(bind_op); if (res.type == NGFVK_SYNC_RES_COUNT) { continue; } ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &res, &sync_req); } // Emit the necessary barriers prior to dispatch. ngfvk_sync_req_batch_commit(&sync_req_batch, cmd_buf); // Allocate and write descriptor sets. ngfvk_execute_pending_binds(cmd_buf); vkCmdDispatch(cmd_buf->vk_cmd_buffer, x_threadgroups, y_threadgroups, z_threadgroups); } extern "C" void ngf_cmd_draw( ngf_render_encoder enc, bool indexed, uint32_t first_element, uint32_t nelements, uint32_t ninstances) NGF_NOEXCEPT { ngf_cmd_buffer cmd_buf = NGFVK_ENC2CMDBUF(enc); uint32_t nmax_pending_sync_reqs = 2u; for (const ngfvk_virt_bind_range& r : cmd_buf->virt_bind_ops_ranges) { nmax_pending_sync_reqs += r.count; } ngfvk_sync_req_batch sync_req_batch; ngfvk_sync_req_batch_init(nmax_pending_sync_reqs, &sync_req_batch); const ngfvk_sync_req attr_buf_sync_req = { .barrier_masks = {.access_mask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, .stage_mask = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT}, .layout = VK_IMAGE_LAYOUT_UNDEFINED}; if (cmd_buf->active_attr_buf) { const ngfvk_sync_res attr_buf_res = ngfvk_sync_res_from_buf(cmd_buf->active_attr_buf); ngfvk_sync_req_batch_add_with_lookup( &sync_req_batch, cmd_buf, &attr_buf_res, &attr_buf_sync_req); } if (indexed && cmd_buf->active_idx_buf) { const ngfvk_sync_req idx_buf_sync_req = { .barrier_masks = {.access_mask = VK_ACCESS_INDEX_READ_BIT, .stage_mask = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT}, .layout = VK_IMAGE_LAYOUT_UNDEFINED}; const ngfvk_sync_res idx_buf_res = ngfvk_sync_res_from_buf(cmd_buf->active_idx_buf); ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &idx_buf_res, &idx_buf_sync_req); } cmd_buf->active_attr_buf = NULL; cmd_buf->active_idx_buf = NULL; for (const ngfvk_virt_bind_range& r : cmd_buf->virt_bind_ops_ranges) { for (uint32_t j = 0u; j < r.count; ++j) { const ngfvk_render_cmd* render_cmd = &r.start[j]; assert(render_cmd->type == NGFVK_RENDER_CMD_BIND_RESOURCE); const ngfvk_sync_req sync_req = ngfvk_sync_req_for_bind_op( &render_cmd->data.bind_resource, (ngfvk_generic_pipeline*)(cmd_buf->active_gfx_pipe)); if (sync_req.barrier_masks.stage_mask == 0u) { continue; } const ngfvk_sync_res sync_res = ngfvk_sync_res_from_bind_op(&render_cmd->data.bind_resource); ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &sync_res, &sync_req); } } cmd_buf->virt_bind_ops_ranges.clear(); ngfvk_sync_req_batch_process(&sync_req_batch, cmd_buf); const ngfvk_render_cmd cmd = { .data = {.draw = {.first_element = first_element, .nelements = nelements, .ninstances = ninstances, .indexed = indexed}}, .type = NGFVK_RENDER_CMD_DRAW}; ngfvk_cmd_buf_add_render_cmd(cmd_buf, &cmd, true); } extern "C" void ngf_cmd_bind_gfx_pipeline(ngf_render_encoder enc, ngf_graphics_pipeline pipeline) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); const ngfvk_render_cmd cmd = { .data = {.pipeline = pipeline}, .type = NGFVK_RENDER_CMD_BIND_PIPELINE}; ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true); buf->active_gfx_pipe = pipeline; } extern "C" void ngf_cmd_bind_resources( ngf_render_encoder enc, const ngf_resource_bind_op* bind_operations, uint32_t nbind_operations) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); if (nbind_operations <= 0u) { return; } ngfvk_virt_bind_range curr_range = {.start = nullptr, .count = 0u}; const ngfvk_render_cmd* prev_cmd = nullptr; for (uint32_t i = 0u; i < nbind_operations; ++i) { const ngfvk_render_cmd cmd = { .data = {.bind_resource = bind_operations[i]}, .type = NGFVK_RENDER_CMD_BIND_RESOURCE}; const ngfvk_render_cmd* cmd_ptr = buf->in_pass_cmd_chnks.append( cmd, CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena); // Check if the bound resource is marked as read-only. // Do not add such resources to the cmd buffer's virt_bind_ops_ranges. // This will preclude hazard tracking from occurring for said resources. if (ngfi_skip_hazard_tracking_for_bind_op(bind_operations[i])) { continue; } // Check if this command is contiguous with the previous one (same chunk) if (prev_cmd != nullptr && cmd_ptr != prev_cmd + 1) { // New chunk started, flush current range if (curr_range.start != nullptr) { buf->virt_bind_ops_ranges.append( curr_range, CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena); } curr_range.start = cmd_ptr; curr_range.count = 0u; // 0 is intentional, we increment count at the end of loop. } else if (curr_range.start == nullptr) { // First command curr_range.start = cmd_ptr; } ++curr_range.count; prev_cmd = cmd_ptr; } if (curr_range.start != nullptr) { buf->virt_bind_ops_ranges.append( curr_range, CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id].res_frame_arena); } } extern "C" void ngf_cmd_bind_compute_resources( ngf_compute_encoder enc, const ngf_resource_bind_op* bind_operations, uint32_t nbind_operations) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); ngfvk_cmd_bind_resources(buf, bind_operations, nbind_operations); } extern "C" void ngf_cmd_bind_compute_pipeline(ngf_compute_encoder enc, ngf_compute_pipeline pipeline) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); if (buf->active_compute_pipe && buf->npending_bind_ops > 0u) { ngfvk_execute_pending_binds(buf); } buf->active_compute_pipe = pipeline; vkCmdBindPipeline( buf->vk_cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, ((ngfvk_generic_pipeline*)pipeline)->vk_pipeline); } extern "C" void ngf_cmd_viewport(ngf_render_encoder enc, const ngf_irect2d* r) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); const ngfvk_render_cmd cmd = {.data = {.rect = *r}, .type = NGFVK_RENDER_CMD_SET_VIEWPORT}; ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true); } extern "C" void ngf_cmd_scissor(ngf_render_encoder enc, const ngf_irect2d* r) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); const ngfvk_render_cmd cmd = {.data = {.rect = *r}, .type = NGFVK_RENDER_CMD_SET_SCISSOR}; ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true); } extern "C" void ngf_cmd_stencil_reference(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); const ngfvk_render_cmd cmd = { .data = {.stencil_values = {.front = front, .back = back}}, .type = NGFVK_RENDER_CMD_SET_STENCIL_REFERENCE}; ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true); } extern "C" void ngf_cmd_stencil_compare_mask(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); const ngfvk_render_cmd cmd = { .data = {.stencil_values = {.front = front, .back = back}}, .type = NGFVK_RENDER_CMD_SET_STENCIL_COMPARE_MASK}; ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true); } extern "C" void ngf_cmd_stencil_write_mask(ngf_render_encoder enc, uint32_t front, uint32_t back) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); const ngfvk_render_cmd cmd = { .data = {.stencil_values = {.front = front, .back = back}}, .type = NGFVK_RENDER_CMD_SET_STENCIL_WRITE_MASK}; ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true); } extern "C" void ngf_cmd_set_depth_bias(ngf_render_encoder enc, float const_scale, float slope_scale, float clamp) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); const ngfvk_render_cmd cmd = { .data = {.depth_bias = {.const_factor = const_scale, .slope_factor = slope_scale, .clamp = clamp}}, .type = NGFVK_RENDER_CMD_SET_DEPTH_BIAS}; ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true); } extern "C" void ngf_cmd_bind_attrib_buffer(ngf_render_encoder enc, ngf_buffer abuf, uint32_t binding, size_t offset) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); const ngfvk_render_cmd cmd = { .data = {.bind_attrib_buffer = {.buffer = abuf, .binding = binding, .offset = offset}}, .type = NGFVK_RENDER_CMD_BIND_ATTRIB_BUFFER}; buf->active_attr_buf = abuf; ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true); } extern "C" void ngf_cmd_bind_index_buffer( ngf_render_encoder enc, ngf_buffer ibuf, size_t offset, ngf_type index_type) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); const ngfvk_render_cmd cmd = { .data = {.bind_index_buffer = {.buffer = ibuf, .offset = offset, .type = index_type}}, .type = NGFVK_RENDER_CMD_BIND_INDEX_BUFFER}; buf->active_idx_buf = ibuf; ngfvk_cmd_buf_add_render_cmd(buf, &cmd, true); } extern "C" void ngf_cmd_copy_buffer( ngf_xfer_encoder enc, ngf_buffer src, ngf_buffer dst, size_t size, size_t src_offset, size_t dst_offset) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); assert(buf); ngfvk_sync_req_batch sync_req_batch; ngfi::tmp_arena().reset(); ngfvk_sync_req_batch_init(2, &sync_req_batch); const ngfvk_sync_req src_sync_req = { .barrier_masks = {.access_mask = VK_ACCESS_TRANSFER_READ_BIT, .stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT}, .layout = VK_IMAGE_LAYOUT_UNDEFINED}; const ngfvk_sync_res src_sync_res = ngfvk_sync_res_from_buf(src); ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, buf, &src_sync_res, &src_sync_req); const ngfvk_sync_req dst_sync_req = { .barrier_masks = {.access_mask = VK_ACCESS_TRANSFER_WRITE_BIT, .stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT}, .layout = VK_IMAGE_LAYOUT_UNDEFINED}; const ngfvk_sync_res dst_sync_res = ngfvk_sync_res_from_buf(dst); ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, buf, &dst_sync_res, &dst_sync_req); ngfvk_sync_req_batch_commit(&sync_req_batch, buf); const VkBufferCopy copy_region = {.srcOffset = src_offset, .dstOffset = dst_offset, .size = size}; vkCmdCopyBuffer( buf->vk_cmd_buffer, (VkBuffer)src->alloc.obj_handle, (VkBuffer)dst->alloc.obj_handle, 1u, ©_region); } extern "C" void ngf_cmd_write_image( ngf_xfer_encoder enc, ngf_buffer src, ngf_image dst, const ngf_image_write* writes, uint32_t nwrites) NGF_NOEXCEPT { ngf_cmd_buffer cmd_buf = NGFVK_ENC2CMDBUF(enc); assert(cmd_buf); assert(nwrites == 0u || writes); if (nwrites == 0u) return; ngfvk_sync_req_batch sync_req_batch; ngfi::tmp_arena().reset(); ngfvk_sync_req_batch_init(2, &sync_req_batch); const ngfvk_sync_req src_sync_req = { .barrier_masks = {.access_mask = VK_ACCESS_TRANSFER_READ_BIT, .stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT}, .layout = VK_IMAGE_LAYOUT_UNDEFINED}; const ngfvk_sync_res src_sync_res = ngfvk_sync_res_from_buf(src); ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &src_sync_res, &src_sync_req); const ngfvk_sync_req dst_sync_req = { .barrier_masks = {.access_mask = VK_ACCESS_TRANSFER_WRITE_BIT, .stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT}, .layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL}; const ngfvk_sync_res dst_sync_res = ngfvk_sync_res_from_img(dst); ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, cmd_buf, &dst_sync_res, &dst_sync_req); ngfvk_sync_req_batch_commit(&sync_req_batch, cmd_buf); ngfi::tmp_arena().reset(); auto vk_writes = ngfi::tmp_alloc(nwrites); if (vk_writes) { for (size_t i = 0u; i < nwrites; ++i) { const ngf_image_write* ngf_write = &writes[i]; VkBufferImageCopy* vk_write = &vk_writes[i]; memset(vk_write, 0, sizeof(VkBufferImageCopy)); vk_write->bufferOffset = ngf_write->src_offset; vk_write->imageOffset.x = ngf_write->dst_offset.x; vk_write->imageOffset.y = ngf_write->dst_offset.y; vk_write->imageOffset.z = ngf_write->dst_offset.z; vk_write->imageExtent.width = ngf_write->extent.width; vk_write->imageExtent.height = ngf_write->extent.height; vk_write->imageExtent.depth = ngf_write->extent.depth; vk_write->imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vk_write->imageSubresource.mipLevel = ngf_write->dst_level; vk_write->imageSubresource.baseArrayLayer = ngf_write->dst_base_layer; vk_write->imageSubresource.layerCount = ngf_write->nlayers; } vkCmdCopyBufferToImage( cmd_buf->vk_cmd_buffer, (VkBuffer)src->alloc.obj_handle, (VkImage)dst->alloc.obj_handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, nwrites, vk_writes); } else { NGFI_DIAG_ERROR("Image write failed"); } } extern "C" void ngf_cmd_copy_image_to_buffer( ngf_xfer_encoder enc, const ngf_image_ref src, ngf_offset3d src_offset, ngf_extent3d src_extent, uint32_t nlayers, ngf_buffer dst, size_t dst_offset) NGF_NOEXCEPT { ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(enc); assert(buf); ngfvk_sync_req_batch sync_req_batch; ngfi::tmp_arena().reset(); ngfvk_sync_req_batch_init(2, &sync_req_batch); const ngfvk_sync_req src_sync_req = { .barrier_masks = {.access_mask = VK_ACCESS_TRANSFER_READ_BIT, .stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT}, .layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL}; const ngfvk_sync_res src_sync_res = ngfvk_sync_res_from_img(src.image); ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, buf, &src_sync_res, &src_sync_req); const ngfvk_sync_req dst_sync_req = { .barrier_masks = {.access_mask = VK_ACCESS_TRANSFER_WRITE_BIT, .stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT}, .layout = VK_IMAGE_LAYOUT_UNDEFINED}; const ngfvk_sync_res dst_sync_res = ngfvk_sync_res_from_buf(dst); ngfvk_sync_req_batch_add_with_lookup(&sync_req_batch, buf, &dst_sync_res, &dst_sync_req); ngfvk_sync_req_batch_commit(&sync_req_batch, buf); const uint32_t src_layer = src.image->type == NGF_IMAGE_TYPE_CUBE ? 6u * src.layer + src.cubemap_face : src.layer; const VkBufferImageCopy copy_op = { .bufferOffset = dst_offset, .bufferRowLength = 0u, .bufferImageHeight = 0u, .imageSubresource = {.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel = src.mip_level, .baseArrayLayer = src_layer, .layerCount = nlayers}, .imageOffset = {.x = src_offset.x, .y = src_offset.y, .z = src_offset.z}, .imageExtent = {.width = src_extent.width, .height = src_extent.height, .depth = src_extent.depth}}; vkCmdCopyImageToBuffer( buf->vk_cmd_buffer, (VkImage)src.image->alloc.obj_handle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, (VkBuffer)dst->alloc.obj_handle, 1u, ©_op); } extern "C" ngf_error ngf_cmd_generate_mipmaps(ngf_xfer_encoder xfenc, ngf_image img) NGF_NOEXCEPT { if (!(img->usage_flags & NGF_IMAGE_USAGE_MIPMAP_GENERATION)) { NGFI_DIAG_ERROR("mipmap generation was requested for an image that was created without " "the NGF_IMAGE_USAGE_MIPMAP_GENERATION usage flag."); return NGF_ERROR_INVALID_OPERATION; } ngf_cmd_buffer buf = NGFVK_ENC2CMDBUF(xfenc); assert(buf); // TODO: ensure the pixel format is valid for mip generation. // TODO: hazard-track images on mip + level granularity. ngfvk_sync_req sync_req = { .barrier_masks = {.access_mask = VK_ACCESS_TRANSFER_WRITE_BIT, .stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT}, .layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL}; ngfvk_sync_res img_res = ngfvk_sync_res_from_img(img); ngfvk_handle_single_sync_req(buf, &img_res, &sync_req); uint32_t src_w = img->extent.width, src_h = img->extent.height, src_d = img->extent.depth, dst_w = 0, dst_h = 0, dst_d = 0; const uint32_t nlayers = img->nlayers; for (uint32_t src_level = 0u; src_level < img->nlevels; ++src_level) { const uint32_t dst_level = src_level + 1u; dst_w = src_w > 1u ? (src_w >> 1u) : 1u; dst_h = src_h > 1u ? (src_h >> 1u) : 1u; dst_d = src_d > 1u ? (src_d >> 1u) : 1u; const VkImageMemoryBarrier pre_blit_barrier = { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, .pNext = NULL, .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .image = (VkImage)img->alloc.obj_handle, .subresourceRange = { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = src_level, .levelCount = 1u, .baseArrayLayer = 0u, .layerCount = nlayers}}; vkCmdPipelineBarrier( buf->vk_cmd_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, NULL, 0u, NULL, 1u, &pre_blit_barrier); if (src_level < img->nlevels - 1) { const VkImageBlit blit_region = { .srcSubresource = {.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel = src_level, .baseArrayLayer = 0u, .layerCount = nlayers}, .srcOffsets = {{0, 0, 0}, {(int32_t)src_w, (int32_t)src_h, (int32_t)src_d}}, .dstSubresource = {.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel = dst_level, .baseArrayLayer = 0u, .layerCount = nlayers}, .dstOffsets = {{0, 0, 0}, {(int32_t)dst_w, (int32_t)dst_h, (int32_t)dst_d}}}; vkCmdBlitImage( buf->vk_cmd_buffer, (VkImage)img->alloc.obj_handle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, (VkImage)img->alloc.obj_handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit_region, VK_FILTER_LINEAR); src_w = dst_w; src_h = dst_h; src_d = dst_d; } } ngfvk_sync_res r = ngfvk_sync_res_from_img(img); ngfvk_sync_res_data* sync_res_data = NULL; ngfvk_cmd_buf_lookup_sync_res(buf, &r, &sync_res_data); sync_res_data->sync_state.active_readers_masks.stage_mask |= VK_PIPELINE_STAGE_TRANSFER_BIT; sync_res_data->sync_state.active_readers_masks.access_mask |= VK_ACCESS_TRANSFER_READ_BIT; sync_res_data->sync_state.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; return NGF_ERROR_OK; } extern "C" void ngf_cmd_begin_debug_group(ngf_cmd_buffer cmd_buffer, const char* name) NGF_NOEXCEPT { ngfvk_debug_label_begin(cmd_buffer->vk_cmd_buffer, name); } extern "C" void ngf_cmd_end_current_debug_group(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT { ngfvk_debug_label_end(cmd_buffer->vk_cmd_buffer); } extern "C" ngf_error ngf_create_texel_buffer_view( const ngf_texel_buffer_view_info* info, ngf_texel_buffer_view* result) NGF_NOEXCEPT { assert(info); assert(result); auto maybe_buf_view = ngf_texel_buffer_view_t::make(*info); if (!maybe_buf_view.has_error()) result[0] = maybe_buf_view.value().release(); return maybe_buf_view.has_error() ? maybe_buf_view.error() : NGF_ERROR_OK; } extern "C" void ngf_destroy_texel_buffer_view(ngf_texel_buffer_view buf_view) NGF_NOEXCEPT { if (buf_view) { const uint32_t fi = CURRENT_CONTEXT->frame_id; CURRENT_CONTEXT->frame_res[fi].retire.append(buf_view); } } extern "C" ngf_error ngf_create_buffer(const ngf_buffer_info* info, ngf_buffer* result) NGF_NOEXCEPT { assert(info); assert(result); auto maybe_buf = ngf_buffer_t::make(*info); if (!maybe_buf.has_error()) { result[0] = maybe_buf.value().release(); } return maybe_buf.has_error() ? maybe_buf.error() : NGF_ERROR_OK; } extern "C" void ngf_destroy_buffer(ngf_buffer buffer) NGF_NOEXCEPT { if (buffer) { const uint32_t fi = CURRENT_CONTEXT->frame_id; CURRENT_CONTEXT->frame_res[fi].retire.append(buffer); } } extern "C" void* ngf_buffer_map_range(ngf_buffer buf, size_t offset, size_t) NGF_NOEXCEPT { buf->mapped_offset = offset; return (uint8_t*)buf->alloc.mapped_data + buf->mapped_offset; } extern "C" void ngf_buffer_flush_range(ngf_buffer buf, size_t offset, size_t size) NGF_NOEXCEPT { vmaFlushAllocation(_vk.allocator, buf->alloc.vma_alloc, buf->mapped_offset + offset, size); } extern "C" void ngf_buffer_unmap(ngf_buffer) NGF_NOEXCEPT { // vk buffers are persistently mapped. } extern "C" ngf_error ngf_create_image_view(const ngf_image_view_info* info, ngf_image_view* result) NGF_NOEXCEPT { assert(info); assert(result); auto maybe_view = ngf_image_view_t::make(*info); if (!maybe_view.has_error()) result[0] = maybe_view.value().release(); return maybe_view.has_error() ? maybe_view.error() : NGF_ERROR_OK; } extern "C" void ngf_destroy_image_view(ngf_image_view view) NGF_NOEXCEPT { if (view) { const uint32_t fi = CURRENT_CONTEXT->frame_id; CURRENT_CONTEXT->frame_res[fi].retire.append(view); } } extern "C" ngf_error ngf_create_image(const ngf_image_info* info, ngf_image* result) NGF_NOEXCEPT { assert(info); assert(result); auto maybe_image = ngf_image_t::make(*info); if (!maybe_image.has_error()) result[0] = maybe_image.value().release(); return maybe_image.has_error() ? maybe_image.error() : NGF_ERROR_OK; } extern "C" void ngf_destroy_image(ngf_image img) NGF_NOEXCEPT { if (img != NULL) { const uint32_t fi = CURRENT_CONTEXT->frame_id; CURRENT_CONTEXT->frame_res[fi].retire.append(img); } } ngfi::maybe_ngfptr ngf_sampler_t::make(const ngf_sampler_info& info) NGF_NOEXCEPT { auto sampler = ngfi::unique_ptr::make(); if (!sampler) return NGF_ERROR_OUT_OF_MEM; const VkSamplerCreateInfo vk_sampler_info = { .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, .pNext = NULL, .flags = 0u, .magFilter = get_vk_filter(info.mag_filter), .minFilter = get_vk_filter(info.min_filter), .mipmapMode = get_vk_mipmode(info.mip_filter), .addressModeU = get_vk_address_mode(info.wrap_u), .addressModeV = get_vk_address_mode(info.wrap_v), .addressModeW = get_vk_address_mode(info.wrap_w), .mipLodBias = info.lod_bias, .anisotropyEnable = info.enable_anisotropy ? VK_TRUE : VK_FALSE, .maxAnisotropy = info.max_anisotropy, .compareEnable = info.compare_op != NGF_COMPARE_OP_NEVER, .compareOp = get_vk_compare_op(info.compare_op), .minLod = info.lod_min, .maxLod = info.lod_max, .borderColor = VK_BORDER_COLOR_INT_TRANSPARENT_BLACK, .unnormalizedCoordinates = VK_FALSE}; const VkResult vk_sampler_create_result = vkCreateSampler(_vk.device, &vk_sampler_info, NULL, &sampler->vksampler); if (vk_sampler_create_result != VK_SUCCESS) return NGF_ERROR_OBJECT_CREATION_FAILED; return sampler; } extern "C" ngf_error ngf_create_sampler(const ngf_sampler_info* info, ngf_sampler* result) NGF_NOEXCEPT { assert(info); assert(result); auto maybe_sampler = ngf_sampler_t::make(*info); if (!maybe_sampler.has_error()) result[0] = maybe_sampler.value().release(); return maybe_sampler.has_error() ? maybe_sampler.error() : NGF_ERROR_OK; } ngf_sampler_t::~ngf_sampler_t() NGF_NOEXCEPT { vkDestroySampler(_vk.device, vksampler, nullptr); } extern "C" void ngf_destroy_sampler(ngf_sampler sampler) NGF_NOEXCEPT { if (sampler) { const uint32_t fi = CURRENT_CONTEXT->frame_id; CURRENT_CONTEXT->frame_res[fi].retire.append(sampler); } } extern "C" void ngf_finish(void) NGF_NOEXCEPT { if (CURRENT_CONTEXT->current_frame_token != ~0u) { ngfvk_frame_resources* frame_res = &CURRENT_CONTEXT->frame_res[CURRENT_CONTEXT->frame_id]; ngfvk_submit_pending_cmd_buffers(frame_res, VK_NULL_HANDLE, VK_NULL_HANDLE); } vkDeviceWaitIdle(_vk.device); } // Pushes via the context's default layout; values persist across compatible pipeline binds. static ngf_error ngfvk_set_bytes_impl( ngf_cmd_buffer cmd_buf, const void* data, size_t size_bytes) { if (!data || size_bytes == 0u) return NGF_ERROR_OK; if (size_bytes > NGF_MAX_ENCODER_INLINE_BYTES || (size_bytes & 0x3u) != 0u) { NGFI_DIAG_ERROR( "push-constant size %zu must be <= %u and a multiple of 4", size_bytes, NGF_MAX_ENCODER_INLINE_BYTES); return NGF_ERROR_INVALID_SIZE; } vkCmdPushConstants( cmd_buf->vk_cmd_buffer, CURRENT_CONTEXT->vk_default_push_layout, VK_SHADER_STAGE_ALL, 0u, static_cast(size_bytes), data); return NGF_ERROR_OK; } extern "C" ngf_error ngf_set_bytes( ngf_render_encoder enc, const void* data, size_t size_bytes) NGF_NOEXCEPT { return ngfvk_set_bytes_impl(NGFVK_ENC2CMDBUF(enc), data, size_bytes); } extern "C" ngf_error ngf_set_compute_bytes( ngf_compute_encoder enc, const void* data, size_t size_bytes) NGF_NOEXCEPT { return ngfvk_set_bytes_impl(NGFVK_ENC2CMDBUF(enc), data, size_bytes); } extern "C" void ngf_mark_read_only(ngf_image* imgs, uint32_t nimgs, ngf_buffer* bufs, uint32_t nbufs) NGF_NOEXCEPT { for (size_t i = 0u; i < nimgs; ++i) { imgs[i]->sync_state.skip_hazard_tracking = true; } for (size_t i = 0u; i < nbufs; ++i) { bufs[i]->sync_state.skip_hazard_tracking = true; } } extern "C" void ngf_renderdoc_capture_next_frame() NGF_NOEXCEPT { if (_renderdoc.api) _renderdoc.capture_next = true; } extern "C" void ngf_renderdoc_capture_begin() NGF_NOEXCEPT { if (_renderdoc.api && !_renderdoc.api->IsFrameCapturing()) { _renderdoc.api->StartFrameCapture( RENDERDOC_DEVICEPOINTER_FROM_VKINSTANCE(_vk.instance), (RENDERDOC_WindowHandle)CURRENT_CONTEXT->swapchain_info.native_handle); } } extern "C" void ngf_renderdoc_capture_end() NGF_NOEXCEPT { if (_renderdoc.api && _renderdoc.api->IsFrameCapturing()) { _renderdoc.api->EndFrameCapture( RENDERDOC_DEVICEPOINTER_FROM_VKINSTANCE(_vk.instance), (RENDERDOC_WindowHandle)CURRENT_CONTEXT->swapchain_info.native_handle); } } extern "C" uintptr_t ngf_get_vk_device_handle() NGF_NOEXCEPT { return (uintptr_t)_vk.device; } extern "C" uintptr_t ngf_get_vk_instance_handle() NGF_NOEXCEPT { return (uintptr_t)_vk.instance; } extern "C" uintptr_t ngf_get_vk_image_handle(ngf_image image) NGF_NOEXCEPT { return image->alloc.obj_handle; } extern "C" uintptr_t ngf_get_vk_buffer_handle(ngf_buffer buffer) NGF_NOEXCEPT { return buffer->alloc.obj_handle; } extern "C" uintptr_t ngf_get_vk_cmd_buffer_handle(ngf_cmd_buffer cmd_buffer) NGF_NOEXCEPT { return (uintptr_t)(cmd_buffer->vk_cmd_buffer); } extern "C" uintptr_t ngf_get_vk_sampler_handle(ngf_sampler sampler) NGF_NOEXCEPT { return (uintptr_t)(sampler->vksampler); } extern "C" uint32_t ngf_get_vk_image_format_index(ngf_image_format format) NGF_NOEXCEPT { return (uint32_t)get_vk_image_format(format); } #pragma endregion #if defined(NGFVK_TEST_MODE) #include "../tests/vk-backend-tests.cpp" #endif ================================================ FILE: source/ngf-vk/vk_10.c ================================================ #include "ngf-common/silence.h" #include "vk_10.h" #include "ngf-common/macros.h" #define TO_STRING(str) #str #define STRINGIFY(str) TO_STRING(str) #if defined(_WIN32) || defined(_WIN64) #define VK_LOADER_LIB "vulkan-1.dll" #define VK_HIDE_SYMBOL #else #define VK_HIDE_SYMBOL __attribute__((visibility("hidden"))) #if defined(__APPLE__) #define VK_LOADER_LIB "libMoltenVK.dylib" #else #define VK_LOADER_LIB "libvulkan.so.1" #endif #endif VK_HIDE_SYMBOL PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; VK_HIDE_SYMBOL PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; VK_HIDE_SYMBOL PFN_vkCreateInstance vkCreateInstance; VK_HIDE_SYMBOL PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion; VK_HIDE_SYMBOL PFN_vkCreateDevice vkCreateDevice; VK_HIDE_SYMBOL PFN_vkDestroyInstance vkDestroyInstance; VK_HIDE_SYMBOL PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; VK_HIDE_SYMBOL PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties; VK_HIDE_SYMBOL PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; VK_HIDE_SYMBOL PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; VK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures; VK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties; VK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties; VK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; VK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; VK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; VK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties; #if !defined(__APPLE__) VK_HIDE_SYMBOL VK_GET_DEVICE_PRES_FN_TYPE VK_GET_DEVICE_PRES_FN; #endif VK_HIDE_SYMBOL VK_CREATE_SURFACE_FN_TYPE VK_CREATE_SURFACE_FN; VK_HIDE_SYMBOL PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; VK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; VK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; VK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; VK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; VK_HIDE_SYMBOL PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT; VK_HIDE_SYMBOL PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT; VK_HIDE_SYMBOL PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT; VK_HIDE_SYMBOL PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; VK_HIDE_SYMBOL PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; VK_HIDE_SYMBOL PFN_vkAllocateMemory vkAllocateMemory; VK_HIDE_SYMBOL PFN_vkBeginCommandBuffer vkBeginCommandBuffer; VK_HIDE_SYMBOL PFN_vkBindBufferMemory vkBindBufferMemory; VK_HIDE_SYMBOL PFN_vkBindImageMemory vkBindImageMemory; VK_HIDE_SYMBOL PFN_vkCmdBeginQuery vkCmdBeginQuery; VK_HIDE_SYMBOL PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; VK_HIDE_SYMBOL PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; VK_HIDE_SYMBOL PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; VK_HIDE_SYMBOL PFN_vkCmdBindPipeline vkCmdBindPipeline; VK_HIDE_SYMBOL PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; VK_HIDE_SYMBOL PFN_vkCmdBlitImage vkCmdBlitImage; VK_HIDE_SYMBOL PFN_vkCmdClearAttachments vkCmdClearAttachments; VK_HIDE_SYMBOL PFN_vkCmdClearColorImage vkCmdClearColorImage; VK_HIDE_SYMBOL PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage; VK_HIDE_SYMBOL PFN_vkCmdCopyBuffer vkCmdCopyBuffer; VK_HIDE_SYMBOL PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; VK_HIDE_SYMBOL PFN_vkCmdCopyImage vkCmdCopyImage; VK_HIDE_SYMBOL PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; VK_HIDE_SYMBOL PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; VK_HIDE_SYMBOL PFN_vkCmdDispatch vkCmdDispatch; VK_HIDE_SYMBOL PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; VK_HIDE_SYMBOL PFN_vkCmdDraw vkCmdDraw; VK_HIDE_SYMBOL PFN_vkCmdDrawIndexed vkCmdDrawIndexed; VK_HIDE_SYMBOL PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect; VK_HIDE_SYMBOL PFN_vkCmdDrawIndirect vkCmdDrawIndirect; VK_HIDE_SYMBOL PFN_vkCmdEndQuery vkCmdEndQuery; VK_HIDE_SYMBOL PFN_vkCmdEndRenderPass vkCmdEndRenderPass; VK_HIDE_SYMBOL PFN_vkCmdExecuteCommands vkCmdExecuteCommands; VK_HIDE_SYMBOL PFN_vkCmdFillBuffer vkCmdFillBuffer; VK_HIDE_SYMBOL PFN_vkCmdNextSubpass vkCmdNextSubpass; VK_HIDE_SYMBOL PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; VK_HIDE_SYMBOL PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2; VK_HIDE_SYMBOL PFN_vkCmdPushConstants vkCmdPushConstants; VK_HIDE_SYMBOL PFN_vkCmdResetEvent vkCmdResetEvent; VK_HIDE_SYMBOL PFN_vkCmdResetQueryPool vkCmdResetQueryPool; VK_HIDE_SYMBOL PFN_vkCmdResolveImage vkCmdResolveImage; VK_HIDE_SYMBOL PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; VK_HIDE_SYMBOL PFN_vkCmdSetDepthBias vkCmdSetDepthBias; VK_HIDE_SYMBOL PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; VK_HIDE_SYMBOL PFN_vkCmdSetEvent vkCmdSetEvent; VK_HIDE_SYMBOL PFN_vkCmdSetLineWidth vkCmdSetLineWidth; VK_HIDE_SYMBOL PFN_vkCmdSetScissor vkCmdSetScissor; VK_HIDE_SYMBOL PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; VK_HIDE_SYMBOL PFN_vkCmdSetStencilReference vkCmdSetStencilReference; VK_HIDE_SYMBOL PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; VK_HIDE_SYMBOL PFN_vkCmdSetViewport vkCmdSetViewport; VK_HIDE_SYMBOL PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; VK_HIDE_SYMBOL PFN_vkCmdWaitEvents vkCmdWaitEvents; VK_HIDE_SYMBOL PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; VK_HIDE_SYMBOL PFN_vkCreateBuffer vkCreateBuffer; VK_HIDE_SYMBOL PFN_vkCreateBufferView vkCreateBufferView; VK_HIDE_SYMBOL PFN_vkCreateCommandPool vkCreateCommandPool; VK_HIDE_SYMBOL PFN_vkCreateComputePipelines vkCreateComputePipelines; VK_HIDE_SYMBOL PFN_vkCreateDescriptorPool vkCreateDescriptorPool; VK_HIDE_SYMBOL PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; VK_HIDE_SYMBOL PFN_vkCreateEvent vkCreateEvent; VK_HIDE_SYMBOL PFN_vkCreateFence vkCreateFence; VK_HIDE_SYMBOL PFN_vkCreateFramebuffer vkCreateFramebuffer; VK_HIDE_SYMBOL PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; VK_HIDE_SYMBOL PFN_vkCreateImage vkCreateImage; VK_HIDE_SYMBOL PFN_vkCreateImageView vkCreateImageView; VK_HIDE_SYMBOL PFN_vkCreatePipelineCache vkCreatePipelineCache; VK_HIDE_SYMBOL PFN_vkCreatePipelineLayout vkCreatePipelineLayout; VK_HIDE_SYMBOL PFN_vkCreateQueryPool vkCreateQueryPool; VK_HIDE_SYMBOL PFN_vkCreateRenderPass vkCreateRenderPass; VK_HIDE_SYMBOL PFN_vkCreateSampler vkCreateSampler; VK_HIDE_SYMBOL PFN_vkCreateSemaphore vkCreateSemaphore; VK_HIDE_SYMBOL PFN_vkCreateShaderModule vkCreateShaderModule; VK_HIDE_SYMBOL PFN_vkDestroyBuffer vkDestroyBuffer; VK_HIDE_SYMBOL PFN_vkDestroyBufferView vkDestroyBufferView; VK_HIDE_SYMBOL PFN_vkDestroyCommandPool vkDestroyCommandPool; VK_HIDE_SYMBOL PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; VK_HIDE_SYMBOL PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; VK_HIDE_SYMBOL PFN_vkDestroyDevice vkDestroyDevice; VK_HIDE_SYMBOL PFN_vkDestroyEvent vkDestroyEvent; VK_HIDE_SYMBOL PFN_vkDestroyFence vkDestroyFence; VK_HIDE_SYMBOL PFN_vkDestroyFramebuffer vkDestroyFramebuffer; VK_HIDE_SYMBOL PFN_vkDestroyImage vkDestroyImage; VK_HIDE_SYMBOL PFN_vkDestroyImageView vkDestroyImageView; VK_HIDE_SYMBOL PFN_vkDestroyPipeline vkDestroyPipeline; VK_HIDE_SYMBOL PFN_vkDestroyPipelineCache vkDestroyPipelineCache; VK_HIDE_SYMBOL PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; VK_HIDE_SYMBOL PFN_vkDestroyQueryPool vkDestroyQueryPool; VK_HIDE_SYMBOL PFN_vkDestroyRenderPass vkDestroyRenderPass; VK_HIDE_SYMBOL PFN_vkDestroySampler vkDestroySampler; VK_HIDE_SYMBOL PFN_vkDestroySemaphore vkDestroySemaphore; VK_HIDE_SYMBOL PFN_vkDestroyShaderModule vkDestroyShaderModule; VK_HIDE_SYMBOL PFN_vkDeviceWaitIdle vkDeviceWaitIdle; VK_HIDE_SYMBOL PFN_vkEndCommandBuffer vkEndCommandBuffer; VK_HIDE_SYMBOL PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; VK_HIDE_SYMBOL PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; VK_HIDE_SYMBOL PFN_vkFreeCommandBuffers vkFreeCommandBuffers; VK_HIDE_SYMBOL PFN_vkFreeDescriptorSets vkFreeDescriptorSets; VK_HIDE_SYMBOL PFN_vkFreeMemory vkFreeMemory; VK_HIDE_SYMBOL PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; VK_HIDE_SYMBOL PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; VK_HIDE_SYMBOL PFN_vkGetDeviceQueue vkGetDeviceQueue; VK_HIDE_SYMBOL PFN_vkGetEventStatus vkGetEventStatus; VK_HIDE_SYMBOL PFN_vkGetFenceStatus vkGetFenceStatus; VK_HIDE_SYMBOL PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; VK_HIDE_SYMBOL PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements; VK_HIDE_SYMBOL PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; VK_HIDE_SYMBOL PFN_vkGetPipelineCacheData vkGetPipelineCacheData; VK_HIDE_SYMBOL PFN_vkGetQueryPoolResults vkGetQueryPoolResults; VK_HIDE_SYMBOL PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity; VK_HIDE_SYMBOL PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; VK_HIDE_SYMBOL PFN_vkMapMemory vkMapMemory; VK_HIDE_SYMBOL PFN_vkMergePipelineCaches vkMergePipelineCaches; VK_HIDE_SYMBOL PFN_vkQueueBindSparse vkQueueBindSparse; VK_HIDE_SYMBOL PFN_vkQueueSubmit vkQueueSubmit; VK_HIDE_SYMBOL PFN_vkQueueWaitIdle vkQueueWaitIdle; VK_HIDE_SYMBOL PFN_vkResetCommandBuffer vkResetCommandBuffer; VK_HIDE_SYMBOL PFN_vkResetCommandPool vkResetCommandPool; VK_HIDE_SYMBOL PFN_vkResetDescriptorPool vkResetDescriptorPool; VK_HIDE_SYMBOL PFN_vkResetEvent vkResetEvent; VK_HIDE_SYMBOL PFN_vkResetFences vkResetFences; VK_HIDE_SYMBOL PFN_vkSetEvent vkSetEvent; VK_HIDE_SYMBOL PFN_vkUnmapMemory vkUnmapMemory; VK_HIDE_SYMBOL PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; VK_HIDE_SYMBOL PFN_vkWaitForFences vkWaitForFences; VK_HIDE_SYMBOL PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; VK_HIDE_SYMBOL PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; VK_HIDE_SYMBOL PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; VK_HIDE_SYMBOL PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; VK_HIDE_SYMBOL PFN_vkQueuePresentKHR vkQueuePresentKHR; VK_HIDE_SYMBOL PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR; VK_HIDE_SYMBOL PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT; bool vkl_init_loader(void) { ngfi_module_handle vkdll = LoadLibraryA(VK_LOADER_LIB); if (!vkdll) { return false; } vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)GetProcAddress(vkdll, "vkGetInstanceProcAddr"); vkCreateInstance = (PFN_vkCreateInstance)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vkCreateInstance"); vkEnumerateInstanceLayerProperties = (PFN_vkEnumerateInstanceLayerProperties) vkGetInstanceProcAddr(VK_NULL_HANDLE, "vkEnumerateInstanceLayerProperties"); vkEnumerateInstanceVersion = (PFN_vkEnumerateInstanceVersion) vkGetInstanceProcAddr(VK_NULL_HANDLE, "vkEnumerateInstanceVersion"); vkEnumerateInstanceExtensionProperties = (PFN_vkEnumerateInstanceExtensionProperties) vkGetInstanceProcAddr(VK_NULL_HANDLE, "vkEnumerateInstanceExtensionProperties"); return true; } #if !defined(__APPLE__) extern VK_GET_DEVICE_PRES_FN_TYPE VK_GET_DEVICE_PRES_FN; #endif extern VK_CREATE_SURFACE_FN_TYPE VK_CREATE_SURFACE_FN; void vkl_init_instance(VkInstance inst) { vkCreateDevice = (PFN_vkCreateDevice)vkGetInstanceProcAddr(inst, "vkCreateDevice"); vkDestroyInstance = (PFN_vkDestroyInstance)vkGetInstanceProcAddr(inst, "vkDestroyInstance"); vkEnumerateDeviceExtensionProperties = (PFN_vkEnumerateDeviceExtensionProperties)vkGetInstanceProcAddr(inst, "vkEnumerateDeviceExtensionProperties"); vkEnumerateDeviceLayerProperties = (PFN_vkEnumerateDeviceLayerProperties)vkGetInstanceProcAddr(inst, "vkEnumerateDeviceLayerProperties"); vkEnumeratePhysicalDevices = (PFN_vkEnumeratePhysicalDevices)vkGetInstanceProcAddr(inst, "vkEnumeratePhysicalDevices"); vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetInstanceProcAddr(inst, "vkGetDeviceProcAddr"); vkGetPhysicalDeviceFeatures = (PFN_vkGetPhysicalDeviceFeatures)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceFeatures"); vkGetPhysicalDeviceFormatProperties = (PFN_vkGetPhysicalDeviceFormatProperties)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceFormatProperties"); vkGetPhysicalDeviceImageFormatProperties = (PFN_vkGetPhysicalDeviceImageFormatProperties)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceImageFormatProperties"); vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceMemoryProperties"); vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceProperties"); vkGetPhysicalDeviceQueueFamilyProperties = (PFN_vkGetPhysicalDeviceQueueFamilyProperties)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceQueueFamilyProperties"); vkGetPhysicalDeviceSparseImageFormatProperties = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceSparseImageFormatProperties"); #if !defined(__APPLE__) VK_GET_DEVICE_PRES_FN = (VK_GET_DEVICE_PRES_FN_TYPE)vkGetInstanceProcAddr(inst, STRINGIFY(VK_GET_DEVICE_PRES_FN)); #endif VK_CREATE_SURFACE_FN = (VK_CREATE_SURFACE_FN_TYPE)vkGetInstanceProcAddr(inst, STRINGIFY(VK_CREATE_SURFACE_FN)); vkDestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)vkGetInstanceProcAddr(inst, "vkDestroySurfaceKHR"); vkGetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)vkGetInstanceProcAddr( inst, "vkGetPhysicalDeviceSurfaceSupportKHR"); vkCreateDebugUtilsMessengerEXT = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(inst, "vkCreateDebugUtilsMessengerEXT"); vkGetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceSurfacePresentModesKHR"); vkGetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceSurfaceFormatsKHR"); vkGetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceFeatures2KHR"); vkDestroyDebugUtilsMessengerEXT = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr( inst, "vkDestroyDebugUtilsMessengerEXT"); vkCmdBeginDebugUtilsLabelEXT = (PFN_vkCmdBeginDebugUtilsLabelEXT)vkGetInstanceProcAddr(inst, "vkCmdBeginDebugUtilsLabelEXT"); vkCmdEndDebugUtilsLabelEXT = (PFN_vkCmdEndDebugUtilsLabelEXT)vkGetInstanceProcAddr(inst, "vkCmdEndDebugUtilsLabelEXT"); } void vkl_init_device(VkDevice dev, bool sync2_supported) { vkAllocateCommandBuffers = (PFN_vkAllocateCommandBuffers)vkGetDeviceProcAddr(dev, "vkAllocateCommandBuffers"); vkAllocateDescriptorSets = (PFN_vkAllocateDescriptorSets)vkGetDeviceProcAddr(dev, "vkAllocateDescriptorSets"); vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(dev, "vkAllocateMemory"); vkBeginCommandBuffer = (PFN_vkBeginCommandBuffer)vkGetDeviceProcAddr(dev, "vkBeginCommandBuffer"); vkBindBufferMemory = (PFN_vkBindBufferMemory)vkGetDeviceProcAddr(dev, "vkBindBufferMemory"); vkBindImageMemory = (PFN_vkBindImageMemory)vkGetDeviceProcAddr(dev, "vkBindImageMemory"); vkCmdBeginQuery = (PFN_vkCmdBeginQuery)vkGetDeviceProcAddr(dev, "vkCmdBeginQuery"); vkCmdBeginRenderPass = (PFN_vkCmdBeginRenderPass)vkGetDeviceProcAddr(dev, "vkCmdBeginRenderPass"); vkCmdBindDescriptorSets = (PFN_vkCmdBindDescriptorSets)vkGetDeviceProcAddr(dev, "vkCmdBindDescriptorSets"); vkCmdBindIndexBuffer = (PFN_vkCmdBindIndexBuffer)vkGetDeviceProcAddr(dev, "vkCmdBindIndexBuffer"); vkCmdBindPipeline = (PFN_vkCmdBindPipeline)vkGetDeviceProcAddr(dev, "vkCmdBindPipeline"); vkCmdBindVertexBuffers = (PFN_vkCmdBindVertexBuffers)vkGetDeviceProcAddr(dev, "vkCmdBindVertexBuffers"); vkCmdBlitImage = (PFN_vkCmdBlitImage)vkGetDeviceProcAddr(dev, "vkCmdBlitImage"); vkCmdClearAttachments = (PFN_vkCmdClearAttachments)vkGetDeviceProcAddr(dev, "vkCmdClearAttachments"); vkCmdClearColorImage = (PFN_vkCmdClearColorImage)vkGetDeviceProcAddr(dev, "vkCmdClearColorImage"); vkCmdClearDepthStencilImage = (PFN_vkCmdClearDepthStencilImage)vkGetDeviceProcAddr(dev, "vkCmdClearDepthStencilImage"); vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkGetDeviceProcAddr(dev, "vkCmdCopyBuffer"); vkCmdCopyBufferToImage = (PFN_vkCmdCopyBufferToImage)vkGetDeviceProcAddr(dev, "vkCmdCopyBufferToImage"); vkCmdCopyImage = (PFN_vkCmdCopyImage)vkGetDeviceProcAddr(dev, "vkCmdCopyImage"); vkCmdCopyImageToBuffer = (PFN_vkCmdCopyImageToBuffer)vkGetDeviceProcAddr(dev, "vkCmdCopyImageToBuffer"); vkCmdCopyQueryPoolResults = (PFN_vkCmdCopyQueryPoolResults)vkGetDeviceProcAddr(dev, "vkCmdCopyQueryPoolResults"); vkCmdDispatch = (PFN_vkCmdDispatch)vkGetDeviceProcAddr(dev, "vkCmdDispatch"); vkCmdDispatchIndirect = (PFN_vkCmdDispatchIndirect)vkGetDeviceProcAddr(dev, "vkCmdDispatchIndirect"); vkCmdDraw = (PFN_vkCmdDraw)vkGetDeviceProcAddr(dev, "vkCmdDraw"); vkCmdDrawIndexed = (PFN_vkCmdDrawIndexed)vkGetDeviceProcAddr(dev, "vkCmdDrawIndexed"); vkCmdDrawIndexedIndirect = (PFN_vkCmdDrawIndexedIndirect)vkGetDeviceProcAddr(dev, "vkCmdDrawIndexedIndirect"); vkCmdDrawIndirect = (PFN_vkCmdDrawIndirect)vkGetDeviceProcAddr(dev, "vkCmdDrawIndirect"); vkCmdEndQuery = (PFN_vkCmdEndQuery)vkGetDeviceProcAddr(dev, "vkCmdEndQuery"); vkCmdEndRenderPass = (PFN_vkCmdEndRenderPass)vkGetDeviceProcAddr(dev, "vkCmdEndRenderPass"); vkCmdExecuteCommands = (PFN_vkCmdExecuteCommands)vkGetDeviceProcAddr(dev, "vkCmdExecuteCommands"); vkCmdFillBuffer = (PFN_vkCmdFillBuffer)vkGetDeviceProcAddr(dev, "vkCmdFillBuffer"); vkCmdNextSubpass = (PFN_vkCmdNextSubpass)vkGetDeviceProcAddr(dev, "vkCmdNextSubpass"); vkCmdPipelineBarrier = (PFN_vkCmdPipelineBarrier)vkGetDeviceProcAddr(dev, "vkCmdPipelineBarrier"); vkCmdPushConstants = (PFN_vkCmdPushConstants)vkGetDeviceProcAddr(dev, "vkCmdPushConstants"); vkCmdResetEvent = (PFN_vkCmdResetEvent)vkGetDeviceProcAddr(dev, "vkCmdResetEvent"); vkCmdResetQueryPool = (PFN_vkCmdResetQueryPool)vkGetDeviceProcAddr(dev, "vkCmdResetQueryPool"); vkCmdResolveImage = (PFN_vkCmdResolveImage)vkGetDeviceProcAddr(dev, "vkCmdResolveImage"); vkCmdSetBlendConstants = (PFN_vkCmdSetBlendConstants)vkGetDeviceProcAddr(dev, "vkCmdSetBlendConstants"); vkCmdSetDepthBias = (PFN_vkCmdSetDepthBias)vkGetDeviceProcAddr(dev, "vkCmdSetDepthBias"); vkCmdSetDepthBounds = (PFN_vkCmdSetDepthBounds)vkGetDeviceProcAddr(dev, "vkCmdSetDepthBounds"); vkCmdSetEvent = (PFN_vkCmdSetEvent)vkGetDeviceProcAddr(dev, "vkCmdSetEvent"); vkCmdSetLineWidth = (PFN_vkCmdSetLineWidth)vkGetDeviceProcAddr(dev, "vkCmdSetLineWidth"); vkCmdSetScissor = (PFN_vkCmdSetScissor)vkGetDeviceProcAddr(dev, "vkCmdSetScissor"); vkCmdSetStencilCompareMask = (PFN_vkCmdSetStencilCompareMask)vkGetDeviceProcAddr(dev, "vkCmdSetStencilCompareMask"); vkCmdSetStencilReference = (PFN_vkCmdSetStencilReference)vkGetDeviceProcAddr(dev, "vkCmdSetStencilReference"); vkCmdSetStencilWriteMask = (PFN_vkCmdSetStencilWriteMask)vkGetDeviceProcAddr(dev, "vkCmdSetStencilWriteMask"); vkCmdSetViewport = (PFN_vkCmdSetViewport)vkGetDeviceProcAddr(dev, "vkCmdSetViewport"); vkCmdUpdateBuffer = (PFN_vkCmdUpdateBuffer)vkGetDeviceProcAddr(dev, "vkCmdUpdateBuffer"); vkCmdWaitEvents = (PFN_vkCmdWaitEvents)vkGetDeviceProcAddr(dev, "vkCmdWaitEvents"); vkCmdWriteTimestamp = (PFN_vkCmdWriteTimestamp)vkGetDeviceProcAddr(dev, "vkCmdWriteTimestamp"); vkCreateBuffer = (PFN_vkCreateBuffer)vkGetDeviceProcAddr(dev, "vkCreateBuffer"); vkCreateBufferView = (PFN_vkCreateBufferView)vkGetDeviceProcAddr(dev, "vkCreateBufferView"); vkCreateCommandPool = (PFN_vkCreateCommandPool)vkGetDeviceProcAddr(dev, "vkCreateCommandPool"); vkCreateComputePipelines = (PFN_vkCreateComputePipelines)vkGetDeviceProcAddr(dev, "vkCreateComputePipelines"); vkCreateDescriptorPool = (PFN_vkCreateDescriptorPool)vkGetDeviceProcAddr(dev, "vkCreateDescriptorPool"); vkCreateDescriptorSetLayout = (PFN_vkCreateDescriptorSetLayout)vkGetDeviceProcAddr(dev, "vkCreateDescriptorSetLayout"); vkCreateEvent = (PFN_vkCreateEvent)vkGetDeviceProcAddr(dev, "vkCreateEvent"); vkCreateFence = (PFN_vkCreateFence)vkGetDeviceProcAddr(dev, "vkCreateFence"); vkCreateFramebuffer = (PFN_vkCreateFramebuffer)vkGetDeviceProcAddr(dev, "vkCreateFramebuffer"); vkCreateGraphicsPipelines = (PFN_vkCreateGraphicsPipelines)vkGetDeviceProcAddr(dev, "vkCreateGraphicsPipelines"); vkCreateImage = (PFN_vkCreateImage)vkGetDeviceProcAddr(dev, "vkCreateImage"); vkCreateImageView = (PFN_vkCreateImageView)vkGetDeviceProcAddr(dev, "vkCreateImageView"); vkCreatePipelineCache = (PFN_vkCreatePipelineCache)vkGetDeviceProcAddr(dev, "vkCreatePipelineCache"); vkCreatePipelineLayout = (PFN_vkCreatePipelineLayout)vkGetDeviceProcAddr(dev, "vkCreatePipelineLayout"); vkCreateQueryPool = (PFN_vkCreateQueryPool)vkGetDeviceProcAddr(dev, "vkCreateQueryPool"); vkCreateRenderPass = (PFN_vkCreateRenderPass)vkGetDeviceProcAddr(dev, "vkCreateRenderPass"); vkCreateSampler = (PFN_vkCreateSampler)vkGetDeviceProcAddr(dev, "vkCreateSampler"); vkCreateSemaphore = (PFN_vkCreateSemaphore)vkGetDeviceProcAddr(dev, "vkCreateSemaphore"); vkCreateShaderModule = (PFN_vkCreateShaderModule)vkGetDeviceProcAddr(dev, "vkCreateShaderModule"); vkDestroyBuffer = (PFN_vkDestroyBuffer)vkGetDeviceProcAddr(dev, "vkDestroyBuffer"); vkDestroyBufferView = (PFN_vkDestroyBufferView)vkGetDeviceProcAddr(dev, "vkDestroyBufferView"); vkDestroyCommandPool = (PFN_vkDestroyCommandPool)vkGetDeviceProcAddr(dev, "vkDestroyCommandPool"); vkDestroyDescriptorPool = (PFN_vkDestroyDescriptorPool)vkGetDeviceProcAddr(dev, "vkDestroyDescriptorPool"); vkDestroyDescriptorSetLayout = (PFN_vkDestroyDescriptorSetLayout)vkGetDeviceProcAddr(dev, "vkDestroyDescriptorSetLayout"); vkDestroyDevice = (PFN_vkDestroyDevice)vkGetDeviceProcAddr(dev, "vkDestroyDevice"); vkDestroyEvent = (PFN_vkDestroyEvent)vkGetDeviceProcAddr(dev, "vkDestroyEvent"); vkDestroyFence = (PFN_vkDestroyFence)vkGetDeviceProcAddr(dev, "vkDestroyFence"); vkDestroyFramebuffer = (PFN_vkDestroyFramebuffer)vkGetDeviceProcAddr(dev, "vkDestroyFramebuffer"); vkDestroyImage = (PFN_vkDestroyImage)vkGetDeviceProcAddr(dev, "vkDestroyImage"); vkDestroyImageView = (PFN_vkDestroyImageView)vkGetDeviceProcAddr(dev, "vkDestroyImageView"); vkDestroyPipeline = (PFN_vkDestroyPipeline)vkGetDeviceProcAddr(dev, "vkDestroyPipeline"); vkDestroyPipelineCache = (PFN_vkDestroyPipelineCache)vkGetDeviceProcAddr(dev, "vkDestroyPipelineCache"); vkDestroyPipelineLayout = (PFN_vkDestroyPipelineLayout)vkGetDeviceProcAddr(dev, "vkDestroyPipelineLayout"); vkDestroyQueryPool = (PFN_vkDestroyQueryPool)vkGetDeviceProcAddr(dev, "vkDestroyQueryPool"); vkDestroyRenderPass = (PFN_vkDestroyRenderPass)vkGetDeviceProcAddr(dev, "vkDestroyRenderPass"); vkDestroySampler = (PFN_vkDestroySampler)vkGetDeviceProcAddr(dev, "vkDestroySampler"); vkDestroySemaphore = (PFN_vkDestroySemaphore)vkGetDeviceProcAddr(dev, "vkDestroySemaphore"); vkDestroyShaderModule = (PFN_vkDestroyShaderModule)vkGetDeviceProcAddr(dev, "vkDestroyShaderModule"); vkDeviceWaitIdle = (PFN_vkDeviceWaitIdle)vkGetDeviceProcAddr(dev, "vkDeviceWaitIdle"); vkEndCommandBuffer = (PFN_vkEndCommandBuffer)vkGetDeviceProcAddr(dev, "vkEndCommandBuffer"); vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkGetDeviceProcAddr(dev, "vkFlushMappedMemoryRanges"); vkFreeCommandBuffers = (PFN_vkFreeCommandBuffers)vkGetDeviceProcAddr(dev, "vkFreeCommandBuffers"); vkFreeDescriptorSets = (PFN_vkFreeDescriptorSets)vkGetDeviceProcAddr(dev, "vkFreeDescriptorSets"); vkFreeMemory = (PFN_vkFreeMemory)vkGetDeviceProcAddr(dev, "vkFreeMemory"); vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetDeviceProcAddr(dev, "vkGetBufferMemoryRequirements"); vkGetDeviceMemoryCommitment = (PFN_vkGetDeviceMemoryCommitment)vkGetDeviceProcAddr(dev, "vkGetDeviceMemoryCommitment"); vkGetDeviceQueue = (PFN_vkGetDeviceQueue)vkGetDeviceProcAddr(dev, "vkGetDeviceQueue"); vkGetEventStatus = (PFN_vkGetEventStatus)vkGetDeviceProcAddr(dev, "vkGetEventStatus"); vkGetFenceStatus = (PFN_vkGetFenceStatus)vkGetDeviceProcAddr(dev, "vkGetFenceStatus"); vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetDeviceProcAddr(dev, "vkGetImageMemoryRequirements"); vkGetImageSparseMemoryRequirements = (PFN_vkGetImageSparseMemoryRequirements)vkGetDeviceProcAddr( dev, "vkGetImageSparseMemoryRequirements"); vkGetImageSubresourceLayout = (PFN_vkGetImageSubresourceLayout)vkGetDeviceProcAddr(dev, "vkGetImageSubresourceLayout"); vkGetPipelineCacheData = (PFN_vkGetPipelineCacheData)vkGetDeviceProcAddr(dev, "vkGetPipelineCacheData"); vkGetQueryPoolResults = (PFN_vkGetQueryPoolResults)vkGetDeviceProcAddr(dev, "vkGetQueryPoolResults"); vkGetRenderAreaGranularity = (PFN_vkGetRenderAreaGranularity)vkGetDeviceProcAddr(dev, "vkGetRenderAreaGranularity"); vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkGetDeviceProcAddr( dev, "vkInvalidateMappedMemoryRanges"); vkMapMemory = (PFN_vkMapMemory)vkGetDeviceProcAddr(dev, "vkMapMemory"); vkMergePipelineCaches = (PFN_vkMergePipelineCaches)vkGetDeviceProcAddr(dev, "vkMergePipelineCaches"); vkQueueBindSparse = (PFN_vkQueueBindSparse)vkGetDeviceProcAddr(dev, "vkQueueBindSparse"); vkQueueSubmit = (PFN_vkQueueSubmit)vkGetDeviceProcAddr(dev, "vkQueueSubmit"); vkQueueWaitIdle = (PFN_vkQueueWaitIdle)vkGetDeviceProcAddr(dev, "vkQueueWaitIdle"); vkResetCommandBuffer = (PFN_vkResetCommandBuffer)vkGetDeviceProcAddr(dev, "vkResetCommandBuffer"); vkResetCommandPool = (PFN_vkResetCommandPool)vkGetDeviceProcAddr(dev, "vkResetCommandPool"); vkResetDescriptorPool = (PFN_vkResetDescriptorPool)vkGetDeviceProcAddr(dev, "vkResetDescriptorPool"); vkResetEvent = (PFN_vkResetEvent)vkGetDeviceProcAddr(dev, "vkResetEvent"); vkResetFences = (PFN_vkResetFences)vkGetDeviceProcAddr(dev, "vkResetFences"); vkSetEvent = (PFN_vkSetEvent)vkGetDeviceProcAddr(dev, "vkSetEvent"); vkUnmapMemory = (PFN_vkUnmapMemory)vkGetDeviceProcAddr(dev, "vkUnmapMemory"); vkUpdateDescriptorSets = (PFN_vkUpdateDescriptorSets)vkGetDeviceProcAddr(dev, "vkUpdateDescriptorSets"); vkWaitForFences = (PFN_vkWaitForFences)vkGetDeviceProcAddr(dev, "vkWaitForFences"); vkCreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)vkGetDeviceProcAddr(dev, "vkCreateSwapchainKHR"); vkDestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)vkGetDeviceProcAddr(dev, "vkDestroySwapchainKHR"); vkGetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)vkGetDeviceProcAddr(dev, "vkGetSwapchainImagesKHR"); vkAcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)vkGetDeviceProcAddr(dev, "vkAcquireNextImageKHR"); vkQueuePresentKHR = (PFN_vkQueuePresentKHR)vkGetDeviceProcAddr(dev, "vkQueuePresentKHR"); if (sync2_supported) { vkCmdPipelineBarrier2 = (PFN_vkCmdPipelineBarrier2)vkGetDeviceProcAddr(dev, "vkCmdPipelineBarrier2KHR"); } } ================================================ FILE: source/ngf-vk/vk_10.h ================================================ #pragma once #if defined(_WIN32)||defined(_WIN64) #define VK_GET_DEVICE_PRES_FN vkGetPhysicalDeviceWin32PresentationSupportKHR #define VK_GET_DEVICE_PRES_FN_TYPE PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR #define VK_SURFACE_EXT "VK_KHR_win32_surface" #define VK_CREATE_SURFACE_FN vkCreateWin32SurfaceKHR #define VK_CREATE_SURFACE_FN_TYPE PFN_vkCreateWin32SurfaceKHR #define VK_USE_PLATFORM_WIN32_KHR #define WIN32_LEAN_AND_MEAN #include #elif defined(__ANDROID__) #define VK_GET_DEVICE_PRES_FN vkGetPhysicalDeviceAndroidPresentationSupportKHR #define VK_GET_DEVICE_PRES_FN_TYPE PFN_vkGetPhysicalDeviceAndroidPresentationSupportKHR #define VK_SURFACE_EXT "VK_KHR_android_surface" #define VK_CREATE_SURFACE_FN vkCreateAndroidSurfaceKHR #define VK_CREATE_SURFACE_FN_TYPE PFN_vkCreateAndroidSurfaceKHR #define VK_USE_PLATFORM_ANDROID_KHR #elif defined(__APPLE__) #include #define VK_SURFACE_EXT "VK_EXT_metal_surface" #define VK_CREATE_SURFACE_FN vkCreateMetalSurfaceEXT #define VK_CREATE_SURFACE_FN_TYPE PFN_vkCreateMetalSurfaceEXT #define VK_USE_PLATFORM_METAL_EXT #else #include #include #include #define VK_GET_DEVICE_PRES_FN vkGetPhysicalDeviceXcbPresentationSupportKHR #define VK_GET_DEVICE_PRES_FN_TYPE PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR #define VK_SURFACE_EXT "VK_KHR_xcb_surface" #define VK_CREATE_SURFACE_FN vkCreateXcbSurfaceKHR #define VK_CREATE_SURFACE_FN_TYPE PFN_vkCreateXcbSurfaceKHR #define VK_USE_PLATFORM_XCB_KHR #endif #define VK_NO_PROTOTYPES #include #include #ifdef __cplusplus extern "C" { #endif extern PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; extern PFN_vkCreateInstance vkCreateInstance; extern PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion; extern PFN_vkCreateDevice vkCreateDevice; extern PFN_vkDestroyInstance vkDestroyInstance; extern PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; extern PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties; extern PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; extern PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures; extern PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties; extern PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties; extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; extern PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties; #if !defined(__APPLE__) extern VK_GET_DEVICE_PRES_FN_TYPE VK_GET_DEVICE_PRES_FN; #endif extern VK_CREATE_SURFACE_FN_TYPE VK_CREATE_SURFACE_FN; extern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; extern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; extern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; extern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; extern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; extern PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT; extern PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT; extern PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT; extern PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT; extern PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; extern PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; extern PFN_vkAllocateMemory vkAllocateMemory; extern PFN_vkBeginCommandBuffer vkBeginCommandBuffer; extern PFN_vkBindBufferMemory vkBindBufferMemory; extern PFN_vkBindImageMemory vkBindImageMemory; extern PFN_vkCmdBeginQuery vkCmdBeginQuery; extern PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; extern PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; extern PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; extern PFN_vkCmdBindPipeline vkCmdBindPipeline; extern PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; extern PFN_vkCmdBlitImage vkCmdBlitImage; extern PFN_vkCmdClearAttachments vkCmdClearAttachments; extern PFN_vkCmdClearColorImage vkCmdClearColorImage; extern PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage; extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; extern PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; extern PFN_vkCmdCopyImage vkCmdCopyImage; extern PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; extern PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; extern PFN_vkCmdDispatch vkCmdDispatch; extern PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; extern PFN_vkCmdDraw vkCmdDraw; extern PFN_vkCmdDrawIndexed vkCmdDrawIndexed; extern PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect; extern PFN_vkCmdDrawIndirect vkCmdDrawIndirect; extern PFN_vkCmdEndQuery vkCmdEndQuery; extern PFN_vkCmdEndRenderPass vkCmdEndRenderPass; extern PFN_vkCmdExecuteCommands vkCmdExecuteCommands; extern PFN_vkCmdFillBuffer vkCmdFillBuffer; extern PFN_vkCmdNextSubpass vkCmdNextSubpass; extern PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; extern PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2; extern PFN_vkCmdPushConstants vkCmdPushConstants; extern PFN_vkCmdResetEvent vkCmdResetEvent; extern PFN_vkCmdResetQueryPool vkCmdResetQueryPool; extern PFN_vkCmdResolveImage vkCmdResolveImage; extern PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; extern PFN_vkCmdSetDepthBias vkCmdSetDepthBias; extern PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; extern PFN_vkCmdSetEvent vkCmdSetEvent; extern PFN_vkCmdSetLineWidth vkCmdSetLineWidth; extern PFN_vkCmdSetScissor vkCmdSetScissor; extern PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; extern PFN_vkCmdSetStencilReference vkCmdSetStencilReference; extern PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; extern PFN_vkCmdSetViewport vkCmdSetViewport; extern PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; extern PFN_vkCmdWaitEvents vkCmdWaitEvents; extern PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; extern PFN_vkCreateBuffer vkCreateBuffer; extern PFN_vkCreateBufferView vkCreateBufferView; extern PFN_vkCreateCommandPool vkCreateCommandPool; extern PFN_vkCreateComputePipelines vkCreateComputePipelines; extern PFN_vkCreateDescriptorPool vkCreateDescriptorPool; extern PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; extern PFN_vkCreateEvent vkCreateEvent; extern PFN_vkCreateFence vkCreateFence; extern PFN_vkCreateFramebuffer vkCreateFramebuffer; extern PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; extern PFN_vkCreateImage vkCreateImage; extern PFN_vkCreateImageView vkCreateImageView; extern PFN_vkCreatePipelineCache vkCreatePipelineCache; extern PFN_vkCreatePipelineLayout vkCreatePipelineLayout; extern PFN_vkCreateQueryPool vkCreateQueryPool; extern PFN_vkCreateRenderPass vkCreateRenderPass; extern PFN_vkCreateSampler vkCreateSampler; extern PFN_vkCreateSemaphore vkCreateSemaphore; extern PFN_vkCreateShaderModule vkCreateShaderModule; extern PFN_vkDestroyBuffer vkDestroyBuffer; extern PFN_vkDestroyBufferView vkDestroyBufferView; extern PFN_vkDestroyCommandPool vkDestroyCommandPool; extern PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; extern PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; extern PFN_vkDestroyDevice vkDestroyDevice; extern PFN_vkDestroyEvent vkDestroyEvent; extern PFN_vkDestroyFence vkDestroyFence; extern PFN_vkDestroyFramebuffer vkDestroyFramebuffer; extern PFN_vkDestroyImage vkDestroyImage; extern PFN_vkDestroyImageView vkDestroyImageView; extern PFN_vkDestroyPipeline vkDestroyPipeline; extern PFN_vkDestroyPipelineCache vkDestroyPipelineCache; extern PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; extern PFN_vkDestroyQueryPool vkDestroyQueryPool; extern PFN_vkDestroyRenderPass vkDestroyRenderPass; extern PFN_vkDestroySampler vkDestroySampler; extern PFN_vkDestroySemaphore vkDestroySemaphore; extern PFN_vkDestroyShaderModule vkDestroyShaderModule; extern PFN_vkDeviceWaitIdle vkDeviceWaitIdle; extern PFN_vkEndCommandBuffer vkEndCommandBuffer; extern PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; extern PFN_vkFreeCommandBuffers vkFreeCommandBuffers; extern PFN_vkFreeDescriptorSets vkFreeDescriptorSets; extern PFN_vkFreeMemory vkFreeMemory; extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; extern PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; extern PFN_vkGetDeviceQueue vkGetDeviceQueue; extern PFN_vkGetEventStatus vkGetEventStatus; extern PFN_vkGetFenceStatus vkGetFenceStatus; extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; extern PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements; extern PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; extern PFN_vkGetPipelineCacheData vkGetPipelineCacheData; extern PFN_vkGetQueryPoolResults vkGetQueryPoolResults; extern PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity; extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; extern PFN_vkMapMemory vkMapMemory; extern PFN_vkMergePipelineCaches vkMergePipelineCaches; extern PFN_vkQueueBindSparse vkQueueBindSparse; extern PFN_vkQueueSubmit vkQueueSubmit; extern PFN_vkQueueWaitIdle vkQueueWaitIdle; extern PFN_vkResetCommandBuffer vkResetCommandBuffer; extern PFN_vkResetCommandPool vkResetCommandPool; extern PFN_vkResetDescriptorPool vkResetDescriptorPool; extern PFN_vkResetEvent vkResetEvent; extern PFN_vkResetFences vkResetFences; extern PFN_vkSetEvent vkSetEvent; extern PFN_vkUnmapMemory vkUnmapMemory; extern PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; extern PFN_vkWaitForFences vkWaitForFences; extern PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; extern PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; extern PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; extern PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; extern PFN_vkQueuePresentKHR vkQueuePresentKHR; extern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR; bool vkl_init_loader(void); void vkl_init_instance(VkInstance instance); void vkl_init_device(VkDevice device, bool sync2_supported); #ifdef __cplusplus } #endif ================================================ FILE: tests/arena-alloc-tests.cpp ================================================ /** * Copyright (c) 2025 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ // Disable warning about setjmp/longjmp interaction with C++ object destruction. // This is expected when using nicetest with C++ objects - memory may leak on // assertion failure, but this is acceptable for testing. #if defined(_MSC_VER) #pragma warning(disable : 4611) #endif #define NT_BREAK_ON_ASSERT_FAIL #include "ngf-common/arena.h" #include "ngf-common/macros.h" // Wrap nicetest.h in extern "C" to match linkage with test-suite-runner.c extern "C" { #include "nicetest.h" } #include #include #include // Helper to check pointer alignment static bool is_aligned(void* ptr, size_t alignment) { return (reinterpret_cast(ptr) & (alignment - 1)) == 0; } NT_TESTSUITE { /* Basic tests */ NT_TESTCASE("arena: create and destroy") { ngfi::arena arena = ngfi::arena::create(1024); NT_ASSERT(arena.is_valid()); NT_ASSERT(arena.total_allocated() > 0); NT_ASSERT(arena.total_used() == 0); } NT_TESTCASE("arena: default constructed is invalid") { ngfi::arena arena; NT_ASSERT(!arena.is_valid()); NT_ASSERT(arena.total_allocated() == 0); NT_ASSERT(arena.total_used() == 0); } NT_TESTCASE("arena: single allocation") { ngfi::arena arena = ngfi::arena::create(1024); NT_ASSERT(arena.is_valid()); void* ptr = arena.alloc(64); NT_ASSERT(ptr != nullptr); NT_ASSERT(is_aligned(ptr, NGFI_MAX_ALIGNMENT)); NT_ASSERT(arena.total_used() >= 64); // Write to the memory to verify it's usable std::memset(ptr, 0xAB, 64); } NT_TESTCASE("arena: multiple sequential allocations") { ngfi::arena arena = ngfi::arena::create(1024); NT_ASSERT(arena.is_valid()); void* ptrs[10]; for (int i = 0; i < 10; ++i) { ptrs[i] = arena.alloc(32); NT_ASSERT(ptrs[i] != nullptr); NT_ASSERT(is_aligned(ptrs[i], NGFI_MAX_ALIGNMENT)); // Write unique pattern std::memset(ptrs[i], i + 1, 32); } // Verify patterns for (int i = 0; i < 10; ++i) { uint8_t* bytes = static_cast(ptrs[i]); for (int j = 0; j < 32; ++j) { NT_ASSERT(bytes[j] == static_cast(i + 1)); } } } NT_TESTCASE("arena: allocations are distinct") { ngfi::arena arena = ngfi::arena::create(1024); NT_ASSERT(arena.is_valid()); void* ptr1 = arena.alloc(100); void* ptr2 = arena.alloc(100); void* ptr3 = arena.alloc(100); NT_ASSERT(ptr1 != nullptr); NT_ASSERT(ptr2 != nullptr); NT_ASSERT(ptr3 != nullptr); NT_ASSERT(ptr1 != ptr2); NT_ASSERT(ptr2 != ptr3); NT_ASSERT(ptr1 != ptr3); } /* Capacity tests */ NT_TESTCASE("arena: fill initial capacity") { const size_t capacity = 256; ngfi::arena arena = ngfi::arena::create(capacity); NT_ASSERT(arena.is_valid()); // Allocate small chunks until we exceed initial capacity size_t total_alloc = 0; while (total_alloc < capacity * 2) { void* ptr = arena.alloc(16); NT_ASSERT(ptr != nullptr); total_alloc += 16; } } NT_TESTCASE("arena: trigger block growth") { const size_t initial_capacity = 64; ngfi::arena arena = ngfi::arena::create(initial_capacity); NT_ASSERT(arena.is_valid()); size_t initial_allocated = arena.total_allocated(); // Allocate more than initial capacity void* ptr1 = arena.alloc(initial_capacity); NT_ASSERT(ptr1 != nullptr); void* ptr2 = arena.alloc(initial_capacity); NT_ASSERT(ptr2 != nullptr); // Should have grown NT_ASSERT(arena.total_allocated() > initial_allocated); } NT_TESTCASE("arena: large allocation exceeding initial capacity") { const size_t initial_capacity = 64; ngfi::arena arena = ngfi::arena::create(initial_capacity); NT_ASSERT(arena.is_valid()); // Allocate more than initial capacity in one go void* ptr = arena.alloc(initial_capacity * 4); NT_ASSERT(ptr != nullptr); NT_ASSERT(is_aligned(ptr, NGFI_MAX_ALIGNMENT)); } NT_TESTCASE("arena: many small allocations") { ngfi::arena arena = ngfi::arena::create(128); NT_ASSERT(arena.is_valid()); // Many small allocations for (int i = 0; i < 1000; ++i) { void* ptr = arena.alloc(1); NT_ASSERT(ptr != nullptr); } } /* Reset tests */ NT_TESTCASE("arena: reset and reallocate") { ngfi::arena arena = ngfi::arena::create(256); NT_ASSERT(arena.is_valid()); void* ptr1 = arena.alloc(100); NT_ASSERT(ptr1 != nullptr); NT_ASSERT(arena.total_used() >= 100); arena.reset(); NT_ASSERT(arena.total_used() == 0); void* ptr2 = arena.alloc(100); NT_ASSERT(ptr2 != nullptr); NT_ASSERT(arena.total_used() >= 100); } NT_TESTCASE("arena: multiple reset cycles") { ngfi::arena arena = ngfi::arena::create(128); NT_ASSERT(arena.is_valid()); for (int cycle = 0; cycle < 10; ++cycle) { for (int i = 0; i < 20; ++i) { void* ptr = arena.alloc(16); NT_ASSERT(ptr != nullptr); } arena.reset(); NT_ASSERT(arena.total_used() == 0); } } NT_TESTCASE("arena: reset releases overflow blocks") { const size_t initial_capacity = 64; ngfi::arena arena = ngfi::arena::create(initial_capacity); NT_ASSERT(arena.is_valid()); size_t initial_allocated = arena.total_allocated(); // Force overflow blocks for (int i = 0; i < 10; ++i) { arena.alloc(initial_capacity); } NT_ASSERT(arena.total_allocated() > initial_allocated); arena.reset(); NT_ASSERT(arena.total_used() == 0); NT_ASSERT(arena.total_allocated() == initial_allocated); } /* Alignment tests */ NT_TESTCASE("arena: default alignment") { ngfi::arena arena = ngfi::arena::create(1024); NT_ASSERT(arena.is_valid()); for (int i = 0; i < 100; ++i) { void* ptr = arena.alloc(1 + (i % 32)); NT_ASSERT(ptr != nullptr); NT_ASSERT(is_aligned(ptr, NGFI_MAX_ALIGNMENT)); } } NT_TESTCASE("arena: custom alignments") { ngfi::arena arena = ngfi::arena::create(4096); NT_ASSERT(arena.is_valid()); size_t alignments[] = {1, 2, 4, 8, 16, 32, 64}; for (size_t align : alignments) { void* ptr = arena.alloc_aligned(32, align); NT_ASSERT(ptr != nullptr); NT_ASSERT(is_aligned(ptr, align)); } } NT_TESTCASE("arena: alignment near block boundary") { const size_t initial_capacity = 128; ngfi::arena arena = ngfi::arena::create(initial_capacity); NT_ASSERT(arena.is_valid()); // Fill most of the block arena.alloc(initial_capacity - 20); // Allocate with large alignment - should go to new block void* ptr = arena.alloc_aligned(16, 64); NT_ASSERT(ptr != nullptr); NT_ASSERT(is_aligned(ptr, 64)); } /* Edge cases */ NT_TESTCASE("arena: zero-size allocation returns nullptr") { ngfi::arena arena = ngfi::arena::create(1024); NT_ASSERT(arena.is_valid()); void* ptr = arena.alloc(0); NT_ASSERT(ptr == nullptr); } NT_TESTCASE("arena: alloc on invalid arena returns nullptr") { ngfi::arena arena; NT_ASSERT(!arena.is_valid()); void* ptr = arena.alloc(64); NT_ASSERT(ptr == nullptr); } NT_TESTCASE("arena: reset on invalid arena is safe") { ngfi::arena arena; NT_ASSERT(!arena.is_valid()); arena.reset(); // Should not crash } /* Move semantics tests */ NT_TESTCASE("arena: move constructor") { ngfi::arena arena1 = ngfi::arena::create(256); NT_ASSERT(arena1.is_valid()); void* ptr = arena1.alloc(64); NT_ASSERT(ptr != nullptr); size_t used = arena1.total_used(); size_t allocated = arena1.total_allocated(); ngfi::arena arena2(static_cast(arena1)); // arena2 should have taken ownership NT_ASSERT(arena2.is_valid()); NT_ASSERT(arena2.total_used() == used); NT_ASSERT(arena2.total_allocated() == allocated); // arena1 should be invalid NT_ASSERT(!arena1.is_valid()); NT_ASSERT(arena1.total_used() == 0); NT_ASSERT(arena1.total_allocated() == 0); // Can still allocate from arena2 void* ptr2 = arena2.alloc(64); NT_ASSERT(ptr2 != nullptr); } /* Fuzz tests */ NT_TESTCASE("arena: fuzz random allocation sizes") { std::srand(static_cast(std::time(nullptr))); ngfi::arena arena = ngfi::arena::create(256); NT_ASSERT(arena.is_valid()); for (int i = 0; i < 1000; ++i) { size_t size = 1 + (std::rand() % 256); void* ptr = arena.alloc(size); NT_ASSERT(ptr != nullptr); NT_ASSERT(is_aligned(ptr, NGFI_MAX_ALIGNMENT)); } } NT_TESTCASE("arena: fuzz random reset patterns") { std::srand(static_cast(std::time(nullptr))); ngfi::arena arena = ngfi::arena::create(128); NT_ASSERT(arena.is_valid()); for (int i = 0; i < 500; ++i) { size_t size = 1 + (std::rand() % 64); void* ptr = arena.alloc(size); NT_ASSERT(ptr != nullptr); // Randomly reset if (std::rand() % 10 == 0) { arena.reset(); NT_ASSERT(arena.total_used() == 0); } } } /* Statistics tests */ NT_TESTCASE("arena: total_allocated tracking") { ngfi::arena arena = ngfi::arena::create(256); NT_ASSERT(arena.is_valid()); size_t initial = arena.total_allocated(); NT_ASSERT(initial > 0); // Force growth arena.alloc(512); NT_ASSERT(arena.total_allocated() > initial); } NT_TESTCASE("arena: total_used tracking") { ngfi::arena arena = ngfi::arena::create(1024); NT_ASSERT(arena.is_valid()); NT_ASSERT(arena.total_used() == 0); arena.alloc(64); NT_ASSERT(arena.total_used() >= 64); size_t used_before = arena.total_used(); arena.alloc(128); NT_ASSERT(arena.total_used() >= used_before + 128); arena.reset(); NT_ASSERT(arena.total_used() == 0); } } ================================================ FILE: tests/common-tests.cpp ================================================ #include "ngf-common/arena.h" #include "ngf-common/array.h" #include "ngf-common/chunked-list.h" #include "ngf-common/cmdbuf-state.h" #include "ngf-common/frame-token.h" #include "ngf-common/hashtable.h" #include "ngf-common/unique-ptr.h" #include "ngf-common/value-or-error.h" #include "utest.h" // Use system allocator for tests to avoid NGF allocation callback setup. template using test_array = ngfi::array; UTEST_STATE(); int main(int argc, const char* const argv[]) { // Initialize NGF allocation callbacks (initializes the mutex). ngfi_set_allocation_callbacks(NULL); return utest_main(argc, argv); } UTEST(array, default_construction) { test_array arr; ASSERT_EQ(0u, arr.size()); ASSERT_EQ(0u, arr.capacity()); ASSERT_TRUE(arr.empty()); ASSERT_EQ(nullptr, arr.data()); } UTEST(array, size_construction) { test_array arr(10); ASSERT_EQ(10u, arr.size()); ASSERT_EQ(10u, arr.capacity()); ASSERT_FALSE(arr.empty()); ASSERT_NE(nullptr, arr.data()); } UTEST(array, push_back) { test_array arr; arr.push_back(1); arr.push_back(2); arr.push_back(3); ASSERT_EQ(3u, arr.size()); ASSERT_EQ(1, arr[0]); ASSERT_EQ(2, arr[1]); ASSERT_EQ(3, arr[2]); } UTEST(array, emplace_back) { test_array arr; arr.emplace_back(42); arr.emplace_back(100); ASSERT_EQ(2u, arr.size()); ASSERT_EQ(42, arr[0]); ASSERT_EQ(100, arr[1]); } UTEST(array, pop_back) { test_array arr; arr.push_back(1); arr.push_back(2); arr.push_back(3); arr.pop_back(); ASSERT_EQ(2u, arr.size()); ASSERT_EQ(1, arr[0]); ASSERT_EQ(2, arr[1]); } UTEST(array, pop_back_empty) { test_array arr; arr.pop_back(); // Should not crash. ASSERT_EQ(0u, arr.size()); } UTEST(array, clear) { test_array arr; arr.push_back(1); arr.push_back(2); arr.clear(); ASSERT_EQ(0u, arr.size()); ASSERT_TRUE(arr.empty()); ASSERT_GT(arr.capacity(), 0u); // Capacity should remain. } UTEST(array, front_and_back) { test_array arr; arr.push_back(10); arr.push_back(20); arr.push_back(30); ASSERT_EQ(10, arr.front()); ASSERT_EQ(30, arr.back()); } UTEST(array, resize_grow) { test_array arr; arr.resize(5); ASSERT_EQ(5u, arr.size()); ASSERT_GE(arr.capacity(), 5u); } UTEST(array, resize_shrink) { test_array arr; arr.push_back(1); arr.push_back(2); arr.push_back(3); arr.resize(1); ASSERT_EQ(1u, arr.size()); ASSERT_EQ(1, arr[0]); } UTEST(array, reserve) { test_array arr; arr.reserve(100); ASSERT_EQ(0u, arr.size()); ASSERT_GE(arr.capacity(), 100u); } UTEST(array, reserve_smaller_noop) { test_array arr; arr.reserve(100); size_t cap = arr.capacity(); arr.reserve(50); ASSERT_EQ(cap, arr.capacity()); // Should not shrink. } UTEST(array, iterators) { test_array arr; arr.push_back(1); arr.push_back(2); arr.push_back(3); int sum = 0; for (auto it = arr.begin(); it != arr.end(); ++it) { sum += *it; } ASSERT_EQ(6, sum); } UTEST(array, range_for) { test_array arr; arr.push_back(10); arr.push_back(20); arr.push_back(30); int sum = 0; for (int val : arr) { sum += val; } ASSERT_EQ(60, sum); } UTEST(array, move_construction) { test_array arr1; arr1.push_back(1); arr1.push_back(2); test_array arr2(ngfi::move(arr1)); ASSERT_EQ(2u, arr2.size()); ASSERT_EQ(1, arr2[0]); ASSERT_EQ(2, arr2[1]); ASSERT_EQ(0u, arr1.size()); ASSERT_EQ(nullptr, arr1.data()); } UTEST(array, move_assignment) { test_array arr1; arr1.push_back(1); arr1.push_back(2); test_array arr2; arr2.push_back(100); arr2 = ngfi::move(arr1); ASSERT_EQ(2u, arr2.size()); ASSERT_EQ(1, arr2[0]); ASSERT_EQ(2, arr2[1]); ASSERT_EQ(0u, arr1.size()); ASSERT_EQ(nullptr, arr1.data()); } UTEST(array, growth_on_push) { test_array arr; for (int i = 0; i < 100; ++i) { arr.push_back(i); } ASSERT_EQ(100u, arr.size()); for (int i = 0; i < 100; ++i) { ASSERT_EQ(i, arr[(size_t)i]); } } // Helper struct for value_or_error tests. struct test_value { int x; int y; test_value(int x_, int y_) : x(x_), y(y_) {} test_value(test_value&& other) : x(other.x), y(other.y) { other.x = 0; other.y = 0; } test_value& operator=(test_value&& other) { x = other.x; y = other.y; other.x = 0; other.y = 0; return *this; } }; UTEST(value_or_error, construct_with_value) { ngfi::value_or_ngferr result{42}; ASSERT_FALSE(result.has_error()); ASSERT_EQ(NGF_ERROR_OK, result.error()); ASSERT_EQ(42, result.value()); } UTEST(value_or_error, construct_with_error) { ngfi::value_or_ngferr result{NGF_ERROR_OUT_OF_MEM}; ASSERT_TRUE(result.has_error()); ASSERT_EQ(NGF_ERROR_OUT_OF_MEM, result.error()); } UTEST(value_or_error, construct_with_struct_value) { ngfi::value_or_ngferr result{test_value{10, 20}}; ASSERT_FALSE(result.has_error()); ASSERT_EQ(10, result.value().x); ASSERT_EQ(20, result.value().y); } UTEST(value_or_error, modify_value) { ngfi::value_or_ngferr result{100}; result.value() = 200; ASSERT_EQ(200, result.value()); } UTEST(value_or_error, move_construction_with_value) { ngfi::value_or_ngferr result1{test_value{5, 10}}; ngfi::value_or_ngferr result2{ngfi::move(result1)}; ASSERT_FALSE(result2.has_error()); ASSERT_EQ(5, result2.value().x); ASSERT_EQ(10, result2.value().y); // After move, result1 should have error (missing_value_error). ASSERT_TRUE(result1.has_error()); } UTEST(value_or_error, move_construction_with_error) { ngfi::value_or_ngferr result1{NGF_ERROR_OBJECT_CREATION_FAILED}; ngfi::value_or_ngferr result2{ngfi::move(result1)}; ASSERT_TRUE(result2.has_error()); ASSERT_EQ(NGF_ERROR_OBJECT_CREATION_FAILED, result2.error()); } UTEST(value_or_error, move_assignment_value_to_value) { ngfi::value_or_ngferr result1{test_value{1, 2}}; ngfi::value_or_ngferr result2{test_value{3, 4}}; result2 = ngfi::move(result1); ASSERT_FALSE(result2.has_error()); ASSERT_EQ(1, result2.value().x); ASSERT_EQ(2, result2.value().y); ASSERT_TRUE(result1.has_error()); } UTEST(value_or_error, move_assignment_error_to_value) { ngfi::value_or_ngferr result1{NGF_ERROR_OUT_OF_MEM}; ngfi::value_or_ngferr result2{42}; result2 = ngfi::move(result1); ASSERT_TRUE(result2.has_error()); ASSERT_EQ(NGF_ERROR_OUT_OF_MEM, result2.error()); } UTEST(value_or_error, move_assignment_value_to_error) { ngfi::value_or_ngferr result1{42}; ngfi::value_or_ngferr result2{NGF_ERROR_OUT_OF_MEM}; result2 = ngfi::move(result1); ASSERT_FALSE(result2.has_error()); ASSERT_EQ(42, result2.value()); } UTEST(value_or_error, const_value_access) { const ngfi::value_or_ngferr result{99}; ASSERT_EQ(99, result.value()); } // Helper struct for unique_ptr tests. struct tracked_object { static int instance_count; int value; tracked_object(int v = 0) : value(v) { ++instance_count; } ~tracked_object() { --instance_count; } }; int tracked_object::instance_count = 0; UTEST(unique_ptr, default_construction) { ngfi::unique_ptr ptr; ASSERT_FALSE(ptr); ASSERT_EQ(nullptr, ptr.get()); } UTEST(unique_ptr, construct_from_pointer) { tracked_object::instance_count = 0; { auto* raw = NGFI_ALLOC(tracked_object); ngfi::unique_ptr ptr{raw}; ASSERT_TRUE(ptr); ASSERT_EQ(raw, ptr.get()); ASSERT_EQ(1, tracked_object::instance_count); } ASSERT_EQ(0, tracked_object::instance_count); } UTEST(unique_ptr, make) { tracked_object::instance_count = 0; { auto ptr = ngfi::unique_ptr::make(42); ASSERT_TRUE(ptr); ASSERT_EQ(42, ptr->value); ASSERT_EQ(1, tracked_object::instance_count); } ASSERT_EQ(0, tracked_object::instance_count); } UTEST(unique_ptr, arrow_operator) { auto ptr = ngfi::unique_ptr::make(100); ASSERT_EQ(100, ptr->value); ptr->value = 200; ASSERT_EQ(200, ptr->value); } UTEST(unique_ptr, release) { tracked_object::instance_count = 0; tracked_object* raw = nullptr; { auto ptr = ngfi::unique_ptr::make(5); raw = ptr.release(); ASSERT_FALSE(ptr); ASSERT_EQ(nullptr, ptr.get()); ASSERT_EQ(1, tracked_object::instance_count); } // Object should still exist after unique_ptr destruction. ASSERT_EQ(1, tracked_object::instance_count); ASSERT_EQ(5, raw->value); NGFI_FREE(raw); ASSERT_EQ(0, tracked_object::instance_count); } UTEST(unique_ptr, move_construction) { tracked_object::instance_count = 0; { auto ptr1 = ngfi::unique_ptr::make(10); auto* raw = ptr1.get(); ngfi::unique_ptr ptr2{ngfi::move(ptr1)}; ASSERT_FALSE(ptr1); ASSERT_TRUE(ptr2); ASSERT_EQ(raw, ptr2.get()); ASSERT_EQ(10, ptr2->value); ASSERT_EQ(1, tracked_object::instance_count); } ASSERT_EQ(0, tracked_object::instance_count); } UTEST(unique_ptr, move_assignment) { tracked_object::instance_count = 0; { auto ptr1 = ngfi::unique_ptr::make(1); auto ptr2 = ngfi::unique_ptr::make(2); ASSERT_EQ(2, tracked_object::instance_count); auto* raw1 = ptr1.get(); ptr2 = ngfi::move(ptr1); ASSERT_FALSE(ptr1); ASSERT_TRUE(ptr2); ASSERT_EQ(raw1, ptr2.get()); ASSERT_EQ(1, ptr2->value); // Old ptr2 object should be destroyed. ASSERT_EQ(1, tracked_object::instance_count); } ASSERT_EQ(0, tracked_object::instance_count); } UTEST(unique_ptr, move_assignment_to_empty) { tracked_object::instance_count = 0; { auto ptr1 = ngfi::unique_ptr::make(7); ngfi::unique_ptr ptr2; ptr2 = ngfi::move(ptr1); ASSERT_FALSE(ptr1); ASSERT_TRUE(ptr2); ASSERT_EQ(7, ptr2->value); ASSERT_EQ(1, tracked_object::instance_count); } ASSERT_EQ(0, tracked_object::instance_count); } UTEST(unique_ptr, const_get) { auto ptr = ngfi::unique_ptr::make(50); const auto& const_ptr = ptr; ASSERT_EQ(ptr.get(), const_ptr.get()); ASSERT_EQ(50, const_ptr.get()->value); } UTEST(unique_ptr, bool_conversion) { ngfi::unique_ptr empty; auto filled = ngfi::unique_ptr::make(); ASSERT_FALSE(empty); ASSERT_TRUE(filled); if (empty) { ASSERT_TRUE(false); // Should not reach here. } if (filled) { ASSERT_TRUE(true); // Should reach here. } else { ASSERT_TRUE(false); // Should not reach here. } } UTEST(hashtable, default_construction) { ngfi::hashtable ht; ASSERT_EQ(0u, ht.size()); ASSERT_EQ(0u, ht.capacity()); ASSERT_TRUE(ht.empty()); } UTEST(hashtable, construction_with_capacity) { ngfi::hashtable ht(200); ASSERT_EQ(0u, ht.size()); ASSERT_TRUE(ht.empty()); // Capacity is only allocated on first insert. } UTEST(hashtable, insert_and_get) { ngfi::hashtable ht; int* val = ht.insert(42, 100); ASSERT_NE(nullptr, val); ASSERT_EQ(100, *val); ASSERT_EQ(1u, ht.size()); ASSERT_FALSE(ht.empty()); int* retrieved = ht.get(42); ASSERT_NE(nullptr, retrieved); ASSERT_EQ(100, *retrieved); } UTEST(hashtable, get_nonexistent) { ngfi::hashtable ht; ht.insert(1, 10); int* val = ht.get(999); ASSERT_EQ(nullptr, val); } UTEST(hashtable, get_empty_table) { ngfi::hashtable ht; int* val = ht.get(42); ASSERT_EQ(nullptr, val); } UTEST(hashtable, insert_update_existing) { ngfi::hashtable ht; ht.insert(5, 50); ASSERT_EQ(1u, ht.size()); int* val = ht.insert(5, 500); ASSERT_NE(nullptr, val); ASSERT_EQ(500, *val); ASSERT_EQ(1u, ht.size()); // Size should not increase. } UTEST(hashtable, multiple_inserts) { ngfi::hashtable ht; for (uint64_t i = 0; i < 50; ++i) { ht.insert(i, static_cast(i * 10)); } ASSERT_EQ(50u, ht.size()); for (uint64_t i = 0; i < 50; ++i) { int* val = ht.get(i); ASSERT_NE(nullptr, val); ASSERT_EQ(static_cast(i * 10), *val); } } UTEST(hashtable, get_or_insert_new) { ngfi::hashtable ht; bool is_new = false; int* val = ht.get_or_insert(10, 100, is_new); ASSERT_NE(nullptr, val); ASSERT_EQ(100, *val); ASSERT_TRUE(is_new); ASSERT_EQ(1u, ht.size()); } UTEST(hashtable, get_or_insert_existing) { ngfi::hashtable ht; ht.insert(10, 100); bool is_new = true; int* val = ht.get_or_insert(10, 999, is_new); ASSERT_NE(nullptr, val); ASSERT_EQ(100, *val); // Should return existing value, not default. ASSERT_FALSE(is_new); ASSERT_EQ(1u, ht.size()); } UTEST(hashtable, clear) { ngfi::hashtable ht; ht.insert(1, 10); ht.insert(2, 20); ht.insert(3, 30); ASSERT_EQ(3u, ht.size()); ht.clear(); ASSERT_EQ(0u, ht.size()); ASSERT_TRUE(ht.empty()); ASSERT_GT(ht.capacity(), 0u); // Capacity should remain. // Verify entries are gone. ASSERT_EQ(nullptr, ht.get(1)); ASSERT_EQ(nullptr, ht.get(2)); ASSERT_EQ(nullptr, ht.get(3)); } UTEST(hashtable, prehashed_operations) { ngfi::hashtable ht; auto kh = ngfi::hashtable::compute_hash(42); int* val = ht.insert_prehashed(kh, 100); ASSERT_NE(nullptr, val); ASSERT_EQ(100, *val); int* retrieved = ht.get_prehashed(kh); ASSERT_NE(nullptr, retrieved); ASSERT_EQ(100, *retrieved); } UTEST(hashtable, move_construction) { ngfi::hashtable ht1; ht1.insert(1, 10); ht1.insert(2, 20); ngfi::hashtable ht2(ngfi::move(ht1)); ASSERT_EQ(0u, ht1.size()); ASSERT_EQ(2u, ht2.size()); ASSERT_EQ(10, *ht2.get(1)); ASSERT_EQ(20, *ht2.get(2)); } UTEST(hashtable, move_assignment) { ngfi::hashtable ht1; ht1.insert(1, 10); ht1.insert(2, 20); ngfi::hashtable ht2; ht2.insert(100, 1000); ht2 = ngfi::move(ht1); ASSERT_EQ(0u, ht1.size()); ASSERT_EQ(2u, ht2.size()); ASSERT_EQ(10, *ht2.get(1)); ASSERT_EQ(20, *ht2.get(2)); ASSERT_EQ(nullptr, ht2.get(100)); // Old entry should be gone. } UTEST(hashtable, iteration) { ngfi::hashtable ht; ht.insert(1, 10); ht.insert(2, 20); ht.insert(3, 30); int sum_keys = 0; int sum_values = 0; int count = 0; for (auto it = ht.begin(); it != ht.end(); ++it) { sum_keys += static_cast(it->key); sum_values += it->value; ++count; } ASSERT_EQ(3, count); ASSERT_EQ(6, sum_keys); // 1 + 2 + 3 ASSERT_EQ(60, sum_values); // 10 + 20 + 30 } UTEST(hashtable, iteration_empty) { ngfi::hashtable ht; int count = 0; for (auto it = ht.begin(); it != ht.end(); ++it) { ++count; } ASSERT_EQ(0, count); } UTEST(hashtable, rehash_on_load) { ngfi::hashtable ht(10); // Small initial capacity. size_t initial_cap = 0; for (uint64_t i = 0; i < 100; ++i) { ht.insert(i, static_cast(i)); if (i == 0) { initial_cap = ht.capacity(); } } ASSERT_EQ(100u, ht.size()); ASSERT_GT(ht.capacity(), initial_cap); // Should have grown. // Verify all entries are still accessible. for (uint64_t i = 0; i < 100; ++i) { int* val = ht.get(i); ASSERT_NE(nullptr, val); ASSERT_EQ(static_cast(i), *val); } } UTEST(hashtable, const_get) { ngfi::hashtable ht; ht.insert(42, 100); const auto& const_ht = ht; const int* val = const_ht.get(42); ASSERT_NE(nullptr, val); ASSERT_EQ(100, *val); } UTEST(hashtable, struct_value) { struct point { int x; int y; }; ngfi::hashtable ht; ht.insert(1, point{10, 20}); ht.insert(2, point{30, 40}); point* p1 = ht.get(1); ASSERT_NE(nullptr, p1); ASSERT_EQ(10, p1->x); ASSERT_EQ(20, p1->y); point* p2 = ht.get(2); ASSERT_NE(nullptr, p2); ASSERT_EQ(30, p2->x); ASSERT_EQ(40, p2->y); } // Mock command buffer for testing state transitions. struct mock_cmd_buffer { ngfi::cmd_buffer_state state; bool renderpass_active; bool compute_pass_active; bool xfer_pass_active; void reset() { state = ngfi::CMD_BUFFER_STATE_NEW; renderpass_active = false; compute_pass_active = false; xfer_pass_active = false; } }; UTEST(cmdbuf_state, new_to_ready) { mock_cmd_buffer buf; buf.reset(); bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY); ASSERT_TRUE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state); } UTEST(cmdbuf_state, ready_to_recording) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_READY; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_RECORDING); ASSERT_TRUE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state); } UTEST(cmdbuf_state, recording_to_ready_to_submit) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_RECORDING; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT); ASSERT_TRUE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT, buf.state); } UTEST(cmdbuf_state, recording_to_ready_to_submit_fails_with_active_renderpass) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_RECORDING; buf.renderpass_active = true; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT); ASSERT_FALSE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state); // State unchanged. } UTEST(cmdbuf_state, recording_to_ready_to_submit_fails_with_active_compute_pass) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_RECORDING; buf.compute_pass_active = true; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT); ASSERT_FALSE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state); } UTEST(cmdbuf_state, recording_to_ready_to_submit_fails_with_active_xfer_pass) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_RECORDING; buf.xfer_pass_active = true; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT); ASSERT_FALSE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state); } UTEST(cmdbuf_state, ready_to_submit_to_pending) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_PENDING); ASSERT_TRUE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_PENDING, buf.state); } UTEST(cmdbuf_state, ready_to_pending) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_READY; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_PENDING); ASSERT_TRUE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_PENDING, buf.state); } UTEST(cmdbuf_state, pending_to_submitted) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_PENDING; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_SUBMITTED); ASSERT_TRUE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_SUBMITTED, buf.state); } UTEST(cmdbuf_state, submitted_to_ready) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_SUBMITTED; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY); ASSERT_TRUE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state); } UTEST(cmdbuf_state, ready_to_ready) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_READY; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY); ASSERT_TRUE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state); } UTEST(cmdbuf_state, ready_to_submit_to_recording) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_RECORDING); ASSERT_TRUE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state); } UTEST(cmdbuf_state, cannot_transition_to_new) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_READY; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_NEW); ASSERT_FALSE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state); // State unchanged. } UTEST(cmdbuf_state, new_to_recording_fails) { mock_cmd_buffer buf; buf.reset(); bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_RECORDING); ASSERT_FALSE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_NEW, buf.state); } UTEST(cmdbuf_state, new_to_pending_fails) { mock_cmd_buffer buf; buf.reset(); bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_PENDING); ASSERT_FALSE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_NEW, buf.state); } UTEST(cmdbuf_state, recording_to_pending_fails) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_RECORDING; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_PENDING); ASSERT_FALSE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state); } UTEST(cmdbuf_state, ready_to_submitted_fails) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_READY; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_SUBMITTED); ASSERT_FALSE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state); } UTEST(cmdbuf_state, pending_to_ready_fails) { mock_cmd_buffer buf; buf.reset(); buf.state = ngfi::CMD_BUFFER_STATE_PENDING; bool result = ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY); ASSERT_FALSE(result); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_PENDING, buf.state); } UTEST(cmdbuf_state, full_lifecycle) { mock_cmd_buffer buf; buf.reset(); // NEW -> READY ASSERT_TRUE(ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY)); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state); // READY -> RECORDING ASSERT_TRUE(ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_RECORDING)); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_RECORDING, buf.state); // RECORDING -> READY_TO_SUBMIT ASSERT_TRUE(ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT)); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY_TO_SUBMIT, buf.state); // READY_TO_SUBMIT -> PENDING ASSERT_TRUE(ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_PENDING)); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_PENDING, buf.state); // PENDING -> SUBMITTED ASSERT_TRUE(ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_SUBMITTED)); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_SUBMITTED, buf.state); // SUBMITTED -> READY (reuse) ASSERT_TRUE(ngfi::transition_cmd_buf(&buf, ngfi::CMD_BUFFER_STATE_READY)); ASSERT_EQ(ngfi::CMD_BUFFER_STATE_READY, buf.state); } UTEST(chunked_list, append_single_element) { ngfi::arena a(1024); ngfi::chunked_list list; int* ptr = list.append(42, a); ASSERT_NE(nullptr, ptr); ASSERT_EQ(42, *ptr); } UTEST(chunked_list, append_multiple_elements) { ngfi::arena a(1024); ngfi::chunked_list list; for (int i = 0; i < 5; ++i) { int* ptr = list.append(i * 10, a); ASSERT_NE(nullptr, ptr); ASSERT_EQ(i * 10, *ptr); } } UTEST(chunked_list, iterate_elements) { ngfi::arena a(1024); ngfi::chunked_list list; list.append(1, a); list.append(2, a); list.append(3, a); int sum = 0; int count = 0; for (auto it = list.begin(); !(it == list.end()); ++it) { sum += *it; ++count; } ASSERT_EQ(3, count); ASSERT_EQ(6, sum); } UTEST(chunked_list, iterate_empty) { ngfi::chunked_list list; int count = 0; for (auto it = list.begin(); !(it == list.end()); ++it) { ++count; } ASSERT_EQ(0, count); } UTEST(chunked_list, clear) { ngfi::arena a(1024); ngfi::chunked_list list; list.append(1, a); list.append(2, a); list.append(3, a); list.clear(); int count = 0; for (auto it = list.begin(); !(it == list.end()); ++it) { ++count; } ASSERT_EQ(0, count); } UTEST(chunked_list, append_after_clear) { ngfi::arena a(1024); ngfi::chunked_list list; list.append(1, a); list.append(2, a); list.clear(); int* ptr = list.append(100, a); ASSERT_NE(nullptr, ptr); ASSERT_EQ(100, *ptr); int count = 0; int value = 0; for (auto it = list.begin(); !(it == list.end()); ++it) { value = *it; ++count; } ASSERT_EQ(1, count); ASSERT_EQ(100, value); } UTEST(chunked_list, spans_multiple_chunks) { ngfi::arena a(4096); // Use small chunk capacity to force multiple chunks. ngfi::chunked_list list; // Insert more elements than one chunk can hold. for (int i = 0; i < 10; ++i) { int* ptr = list.append(i, a); ASSERT_NE(nullptr, ptr); ASSERT_EQ(i, *ptr); } // Verify all elements are accessible via iteration. int count = 0; int sum = 0; for (auto it = list.begin(); !(it == list.end()); ++it) { sum += *it; ++count; } ASSERT_EQ(10, count); ASSERT_EQ(45, sum); // 0+1+2+...+9 = 45 } UTEST(chunked_list, iteration_order_preserved) { ngfi::arena a(4096); ngfi::chunked_list list; // Very small chunks. int expected[] = {10, 20, 30, 40, 50, 60, 70}; for (int val : expected) { list.append(val, a); } int idx = 0; for (auto it = list.begin(); !(it == list.end()); ++it) { ASSERT_EQ(expected[idx], *it); ++idx; } ASSERT_EQ(7, idx); } UTEST(chunked_list, struct_elements) { struct point { int x; int y; }; ngfi::arena a(1024); ngfi::chunked_list list; point* p1 = list.append(point{1, 2}, a); point* p2 = list.append(point{3, 4}, a); point* p3 = list.append(point{5, 6}, a); ASSERT_NE(nullptr, p1); ASSERT_NE(nullptr, p2); ASSERT_NE(nullptr, p3); ASSERT_EQ(1, p1->x); ASSERT_EQ(2, p1->y); ASSERT_EQ(3, p2->x); ASSERT_EQ(4, p2->y); ASSERT_EQ(5, p3->x); ASSERT_EQ(6, p3->y); } UTEST(chunked_list, const_iteration) { ngfi::arena a(1024); ngfi::chunked_list list; list.append(10, a); list.append(20, a); list.append(30, a); const auto& const_list = list; int sum = 0; for (auto it = const_list.begin(); !(it == const_list.end()); ++it) { sum += *it; } ASSERT_EQ(60, sum); } UTEST(chunked_list, exact_chunk_boundary) { ngfi::arena a(4096); ngfi::chunked_list list; // Chunk capacity of 5. // Insert exactly 5 elements (fills one chunk exactly). for (int i = 0; i < 5; ++i) { list.append(i, a); } int count = 0; for (auto it = list.begin(); !(it == list.end()); ++it) { ++count; } ASSERT_EQ(5, count); // Insert one more to trigger new chunk. list.append(5, a); count = 0; for (auto it = list.begin(); !(it == list.end()); ++it) { ++count; } ASSERT_EQ(6, count); } UTEST(arena, default_construction) { ngfi::arena a; ASSERT_EQ(0u, a.total_allocated()); ASSERT_EQ(0u, a.total_used()); } UTEST(arena, construction_with_capacity) { ngfi::arena a(1024); // No allocation until first alloc call. ASSERT_EQ(0u, a.total_allocated()); ASSERT_EQ(0u, a.total_used()); } UTEST(arena, alloc_basic) { ngfi::arena a(1024); void* ptr = a.alloc(64); ASSERT_NE(nullptr, ptr); ASSERT_GT(a.total_allocated(), 0u); ASSERT_GT(a.total_used(), 0u); } UTEST(arena, alloc_typed_single) { ngfi::arena a(1024); int* ptr = a.alloc(); ASSERT_NE(nullptr, ptr); *ptr = 42; ASSERT_EQ(42, *ptr); } UTEST(arena, alloc_typed_array) { ngfi::arena a(1024); int* arr = a.alloc(10); ASSERT_NE(nullptr, arr); for (int i = 0; i < 10; ++i) { arr[i] = i * 10; } for (int i = 0; i < 10; ++i) { ASSERT_EQ(i * 10, arr[i]); } } UTEST(arena, alloc_struct) { struct test_struct { int x; float y; char z; }; ngfi::arena a(1024); test_struct* ptr = a.alloc(); ASSERT_NE(nullptr, ptr); ptr->x = 100; ptr->y = 3.14f; ptr->z = 'A'; ASSERT_EQ(100, ptr->x); ASSERT_EQ(3.14f, ptr->y); ASSERT_EQ('A', ptr->z); } UTEST(arena, multiple_allocations) { ngfi::arena a(1024); int* i1 = a.alloc(); int* i2 = a.alloc(); int* i3 = a.alloc(); ASSERT_NE(nullptr, i1); ASSERT_NE(nullptr, i2); ASSERT_NE(nullptr, i3); // Pointers should be different. ASSERT_NE(i1, i2); ASSERT_NE(i2, i3); ASSERT_NE(i1, i3); *i1 = 1; *i2 = 2; *i3 = 3; ASSERT_EQ(1, *i1); ASSERT_EQ(2, *i2); ASSERT_EQ(3, *i3); } UTEST(arena, reset) { ngfi::arena a(1024); a.alloc(); a.alloc(); a.alloc(); size_t used_before = a.total_used(); ASSERT_GT(used_before, 0u); a.reset(); ASSERT_EQ(0u, a.total_used()); // Can allocate again after reset. int* ptr = a.alloc(); ASSERT_NE(nullptr, ptr); } UTEST(arena, reset_reuses_memory) { ngfi::arena a(1024); int* ptr1 = a.alloc(); ASSERT_NE(nullptr, ptr1); size_t allocated_after_first = a.total_allocated(); a.reset(); int* ptr2 = a.alloc(); ASSERT_NE(nullptr, ptr2); // Should reuse the same block, so total_allocated stays the same. ASSERT_EQ(allocated_after_first, a.total_allocated()); } UTEST(arena, grows_when_needed) { ngfi::arena a(64); // Small block size. // Allocate more than one block can hold. void* ptrs[20]; for (int i = 0; i < 20; ++i) { ptrs[i] = a.alloc(32); ASSERT_NE(nullptr, ptrs[i]); } // Should have grown. ASSERT_GT(a.total_allocated(), 64u); } UTEST(arena, alignment_basic) { ngfi::arena a(1024); // Allocate with 16-byte alignment. void* ptr = a.alloc_aligned(32, 16); ASSERT_NE(nullptr, ptr); ASSERT_EQ(0u, reinterpret_cast(ptr) % 16); } UTEST(arena, alignment_various) { ngfi::arena a(4096); for (size_t align = 1; align <= 128; align *= 2) { void* ptr = a.alloc_aligned(16, align); ASSERT_NE(nullptr, ptr); ASSERT_EQ(0u, reinterpret_cast(ptr) % align); } } UTEST(arena, typed_alloc_alignment) { struct alignas(32) aligned_struct { char data[32]; }; ngfi::arena a(1024); aligned_struct* ptr = a.alloc(); ASSERT_NE(nullptr, ptr); ASSERT_EQ(0u, reinterpret_cast(ptr) % 32); } UTEST(arena, alloc_zero_size_returns_null) { ngfi::arena a(1024); void* ptr = a.alloc(0); ASSERT_EQ(nullptr, ptr); } UTEST(arena, alloc_without_capacity_returns_null) { ngfi::arena a; // No capacity set. void* ptr = a.alloc(64); ASSERT_EQ(nullptr, ptr); } UTEST(arena, set_block_size) { ngfi::arena a; a.set_block_size(512); void* ptr = a.alloc(64); ASSERT_NE(nullptr, ptr); ASSERT_GT(a.total_allocated(), 0u); } UTEST(arena, move_construction) { ngfi::arena a1(1024); int* ptr = a1.alloc(); *ptr = 42; size_t allocated = a1.total_allocated(); size_t used = a1.total_used(); ngfi::arena a2(ngfi::move(a1)); ASSERT_EQ(allocated, a2.total_allocated()); ASSERT_EQ(used, a2.total_used()); ASSERT_EQ(0u, a1.total_allocated()); ASSERT_EQ(0u, a1.total_used()); // Original pointer should still be valid. ASSERT_EQ(42, *ptr); } UTEST(arena, total_used_tracks_allocations) { ngfi::arena a(1024); size_t used0 = a.total_used(); a.alloc(); size_t used1 = a.total_used(); a.alloc(); size_t used2 = a.total_used(); ASSERT_EQ(0u, used0); ASSERT_GT(used1, used0); ASSERT_GT(used2, used1); } UTEST(arena, large_allocation) { ngfi::arena a(64); // Small default block size. // Request larger than default block size. void* ptr = a.alloc(256); ASSERT_NE(nullptr, ptr); ASSERT_GE(a.total_allocated(), 256u); } UTEST(arena, many_small_allocations) { ngfi::arena a(1024); for (int i = 0; i < 100; ++i) { char* ptr = a.alloc(); ASSERT_NE(nullptr, ptr); *ptr = static_cast(i); } ASSERT_GE(a.total_used(), 100u); } UTEST (frame_token, encode_decode) { const uint16_t test_ctx_id = 65534u; const uint8_t test_max_inflight_frames = 3u, test_frame_id = 255u; const uintptr_t test_token = ngfi_encode_frame_token(test_ctx_id, test_max_inflight_frames, test_frame_id); ASSERT_EQ(test_ctx_id, ngfi_frame_ctx_id(test_token)); ASSERT_EQ(test_max_inflight_frames, ngfi_frame_max_inflight_frames(test_token)); ASSERT_EQ(test_frame_id, ngfi_frame_id(test_token)); } ================================================ FILE: tests/vk-backend-tests.cpp ================================================ /** * Copyright (c) 2026 nicegraf contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ // NOTE: this file is meant to be included at the bottom of the vulkan backend implementation in test mode! #include "utest.h" static ngfvk_sync_state empty_sync_state() { ngfvk_sync_state result; memset(&result, 0, sizeof(result)); result.layout = VK_IMAGE_LAYOUT_UNDEFINED; return result; } #define test_stg_access_mask(expected_result, stages, accesses) { \ const ngfvk_sync_barrier_masks m = {accesses, stages}; \ const uint32_t result = ngfvk_per_stage_access_mask(&m); \ ASSERT_EQ(expected_result, result); \ } #define test_barrier(sync_state, dsm, dam, expected_src_stage_mask, expected_src_access_mask, expected_src_layout, expected_dst_layout) { \ ngfvk_barrier_data bar; \ const ngfvk_sync_req sync_req = {{dam, dsm}, expected_dst_layout}; \ const bool barrier_necessary = \ ngfvk_sync_barrier(sync_state, &sync_req, &bar); \ const bool barrier_expected = \ expected_src_stage_mask != 0 || (expected_src_layout != expected_dst_layout); \ if (!barrier_expected) { \ ASSERT_FALSE(barrier_necessary); \ } else { \ ASSERT_TRUE(barrier_necessary); \ ASSERT_EQ(expected_src_stage_mask, bar.src_stage_mask); \ ASSERT_EQ(expected_src_access_mask, bar.src_access_mask); \ ASSERT_EQ(dsm, bar.dst_stage_mask); \ ASSERT_EQ(dam, bar.dst_access_mask); \ ASSERT_EQ(expected_src_layout, bar.src_layout); \ ASSERT_EQ(expected_dst_layout, bar.dst_layout); \ } \ } #define test_sync_req_merge(dst_req, src_req, success_expected, expected_stage_mask, expected_access_mask, expected_layout) { \ const bool success = ngfvk_sync_req_merge(&dst_req, &src_req); \ ASSERT_EQ(success_expected, success); \ ASSERT_EQ(expected_stage_mask, dst_req.barrier_masks.stage_mask); \ ASSERT_EQ(expected_access_mask, dst_req.barrier_masks.access_mask); \ ASSERT_EQ(expected_layout, dst_req.layout); \ } UTEST(vk_sync, barrier_attrib_TwGr) { ngfvk_sync_state sync_state = empty_sync_state(); test_barrier( &sync_state, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, 0, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); test_barrier( &sync_state, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); } UTEST(vk_sync, barrier_index_TwGr) { ngfvk_sync_state sync_state = empty_sync_state(); test_barrier( &sync_state, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, 0, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); test_barrier( &sync_state, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_INDEX_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); } UTEST(vk_sync, barrier_texture_TwGr) { ngfvk_sync_state sync_state = empty_sync_state(); test_barrier( &sync_state, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); test_barrier( &sync_state, (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT), VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); } UTEST(vk_sync, barrier_index_TwGrGr) { ngfvk_sync_state sync_state = empty_sync_state(); test_barrier( &sync_state, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); test_barrier( &sync_state, (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT), VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); test_barrier( &sync_state, (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT), VK_ACCESS_SHADER_READ_BIT, 0, 0, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); } UTEST(vk_sync, barrier_texture_TwGrGrCrCr) { ngfvk_sync_state sync_state = empty_sync_state(); test_barrier( &sync_state, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); test_barrier( &sync_state, (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT), VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); test_barrier( &sync_state, (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT), VK_ACCESS_SHADER_READ_BIT, 0, 0, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); test_barrier( &sync_state, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT), 0, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); test_barrier( &sync_state, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, 0, 0, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); } UTEST(vk_sync, barrier_texture_TwGrGwCr) { ngfvk_sync_state sync_state = empty_sync_state(); test_barrier( &sync_state, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); test_barrier( &sync_state, (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT), VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); test_barrier( &sync_state, (VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT), (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT), (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT), VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); test_barrier( &sync_state, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, (VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT), (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); } UTEST(vk_sync, barrier_texture_GwTr) { ngfvk_sync_state sync_state = empty_sync_state(); test_barrier( &sync_state, (VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT), (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); test_barrier( &sync_state, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT, (VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT), (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); } UTEST(vk_sync, barrier_buffer_CwCw) { ngfvk_sync_state sync_state = empty_sync_state(); test_barrier( &sync_state, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, 0, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); test_barrier( &sync_state, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); } UTEST(vk_sync, barrier_buffer_GrCwCw) { ngfvk_sync_state sync_state = empty_sync_state(); test_barrier( &sync_state, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_UNIFORM_READ_BIT, 0, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); test_barrier( &sync_state, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_UNIFORM_READ_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); test_barrier( &sync_state, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); } UTEST(vk_sync, barrier_buffer_GrCrCw) { ngfvk_sync_state sync_state = empty_sync_state(); test_barrier( &sync_state, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, 0, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); test_barrier( &sync_state, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, 0, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); test_barrier( &sync_state, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT), (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT), VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); } UTEST(vk_sync, barrier_buffer_CwGrGr) { ngfvk_sync_state sync_state = empty_sync_state(); test_barrier( &sync_state, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, 0, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); test_barrier( &sync_state, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); test_barrier( &sync_state, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, 0, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); } UTEST(vk_sync, barrier_buffer_CwGrCr) { ngfvk_sync_state sync_state = empty_sync_state(); test_barrier( &sync_state, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, 0, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); test_barrier( &sync_state, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); test_barrier( &sync_state, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_UNDEFINED); } UTEST(vk_sync, req_merge_concurrent_reads) { ngfvk_sync_req dst_req = {{0, 0}, VK_IMAGE_LAYOUT_UNDEFINED}; static const ngfvk_sync_req src_reqs[] = { {.barrier_masks = {.access_mask = VK_ACCESS_SHADER_READ_BIT, .stage_mask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT}, .layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}, {.barrier_masks = {.access_mask = VK_ACCESS_SHADER_READ_BIT, .stage_mask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT}, .layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}}; test_sync_req_merge( dst_req, src_reqs[0], true, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); test_sync_req_merge( dst_req, src_reqs[1], true, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); } UTEST(vk_sync, req_merge_write) { ngfvk_sync_req dst_req = {{0, 0}, VK_IMAGE_LAYOUT_UNDEFINED}; static const ngfvk_sync_req sync_reqs[] = { {.barrier_masks = {.access_mask = VK_ACCESS_SHADER_WRITE_BIT, .stage_mask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT}, .layout = VK_IMAGE_LAYOUT_UNDEFINED}, }; test_sync_req_merge( dst_req, sync_reqs[0], true, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED); } UTEST(vk_sync, req_merge_write_write) { ngfvk_sync_req dst_req = {{0, 0}, VK_IMAGE_LAYOUT_UNDEFINED}; static const ngfvk_sync_req sync_reqs[] = { {.barrier_masks = {.access_mask = VK_ACCESS_SHADER_WRITE_BIT, .stage_mask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT}, .layout = VK_IMAGE_LAYOUT_GENERAL}, {.barrier_masks = {.access_mask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, .stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT}, .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL} }; test_sync_req_merge( dst_req, sync_reqs[0], true, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_GENERAL); test_sync_req_merge( dst_req, sync_reqs[1], false, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_GENERAL); } UTEST(vk_sync, req_merge_write_read) { ngfvk_sync_req dst_req = {{0, 0}, VK_IMAGE_LAYOUT_UNDEFINED}; static const ngfvk_sync_req sync_reqs[] = { {.barrier_masks = {.access_mask = VK_ACCESS_SHADER_WRITE_BIT, .stage_mask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT}, .layout = VK_IMAGE_LAYOUT_GENERAL}, {.barrier_masks = {.access_mask = VK_ACCESS_SHADER_READ_BIT, .stage_mask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT}, .layout = VK_IMAGE_LAYOUT_GENERAL} }; test_sync_req_merge( dst_req, sync_reqs[0], true, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_GENERAL); test_sync_req_merge( dst_req, sync_reqs[1], true, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL); } UTEST(vk_sync, req_merge_read_write) { ngfvk_sync_req dst_req = {{0, 0}, VK_IMAGE_LAYOUT_UNDEFINED}; static const ngfvk_sync_req sync_reqs[] = { {.barrier_masks = {.access_mask = VK_ACCESS_SHADER_READ_BIT, .stage_mask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT}, .layout = VK_IMAGE_LAYOUT_GENERAL}, {.barrier_masks = {.access_mask = VK_ACCESS_SHADER_WRITE_BIT, .stage_mask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT}, .layout = VK_IMAGE_LAYOUT_GENERAL} }; test_sync_req_merge( dst_req, sync_reqs[0], true, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL); test_sync_req_merge( dst_req, sync_reqs[1], true, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_GENERAL); } UTEST(vk_sync, req_merge_layout_change) { ngfvk_sync_req dst_req = {{0, 0}, VK_IMAGE_LAYOUT_UNDEFINED}; static const ngfvk_sync_req sync_reqs[] = { {.barrier_masks = {.access_mask = VK_ACCESS_SHADER_READ_BIT, .stage_mask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT}, .layout = VK_IMAGE_LAYOUT_GENERAL}, {.barrier_masks = {.access_mask = VK_ACCESS_SHADER_READ_BIT, .stage_mask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT}, .layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL} }; test_sync_req_merge( dst_req, sync_reqs[0], true, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL); test_sync_req_merge( dst_req, sync_reqs[1], true, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL); } UTEST(vk_sync, stg_access_map) { #define BITMASK3x8(b7, b6, b5, b4, b3, b2, b1, b0) (((b7) << 21) | ((b6) << 18) | ((b5) << 15) | ((b4) << 12) | ((b3) << 9) | ((b2) << 6) | ((b1) << 3) | (b0) ) // clang-format: off test_stg_access_mask( BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b001), VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT); test_stg_access_mask( BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b010), VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_INDEX_READ_BIT); test_stg_access_mask( BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b001, 0b000), VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT); test_stg_access_mask( BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b101, 0b000), VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT); test_stg_access_mask( BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b101, 0b101, 0b000), VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT); test_stg_access_mask( BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b101, 0b101, 0b000), VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT); test_stg_access_mask( BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b111, 0b101, 0b101, 0b000), VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT); test_stg_access_mask( BITMASK3x8(0b000, 0b000, 0b011, 0b011, 0b000, 0b000, 0b000, 0b000), VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT); test_stg_access_mask( BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT); test_stg_access_mask( BITMASK3x8(0b000, 0b011, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT); test_stg_access_mask( BITMASK3x8(0b001, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT); test_stg_access_mask( BITMASK3x8(0b010, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT); test_stg_access_mask( BITMASK3x8(0b011, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT); test_stg_access_mask( BITMASK3x8(0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000, 0b000), VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_SHADER_READ_BIT); #undef BITMASK3x8 // clang-format: on } UTEST_MAIN()