Repository: RedSpah/xxhash_cpp Branch: master Commit: 2aad76a18f7a Files: 14 Total size: 1.0 MB Directory structure: gitextract_fzbi9j_9/ ├── .circleci/ │ └── config.yml ├── .gitattributes ├── .github/ │ └── ISSUE_TEMPLATE/ │ └── bug_report.md ├── .gitignore ├── CMakeLists.txt ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── include/ │ └── xxhash.hpp └── test/ ├── CMakeLists.txt ├── catch.hpp ├── test_main.cpp ├── xxh3.h └── xxhash.h ================================================ FILE CONTENTS ================================================ ================================================ FILE: .circleci/config.yml ================================================ # Use the latest 2.1 version of CircleCI pipeline process engine. # See: https://circleci.com/docs/2.0/configuration-reference version: 2.1 orbs: win: circleci/windows@4.1 # Define a job to be invoked later in a workflow. # See: https://circleci.com/docs/2.0/configuration-reference/#jobs jobs: build-test: executor: win/server-2019 steps: - checkout - run: choco install mingw -y - run: choco install cmake -y - run: mkdir build - run: cd build; $env:Path+=";$Env:ProgramFiles\CMake\bin"; cmake -DCMAKE_BUILD_TYPE=Release -DXXH_CPP_USE_AVX2=ON ..; cmake --build . --config Release; ctest --output-on-failure -C Release # Invoke jobs via workflows # See: https://circleci.com/docs/2.0/configuration-reference/#workflows workflows: say-hello-workflow: jobs: - build-test ================================================ FILE: .gitattributes ================================================ ############################################################################### # Set default behavior to automatically normalize line endings. ############################################################################### * text=auto ############################################################################### # Set default behavior for command prompt diff. # # This is need for earlier builds of msysgit that does not have it on by # default for csharp files. # Note: This is only used by command line ############################################################################### #*.cs diff=csharp ############################################################################### # Set the merge driver for project and solution files # # Merging from the command prompt will add diff markers to the files if there # are conflicts (Merging from VS is not affected by the settings below, in VS # the diff markers are never inserted). Diff markers may cause the following # file extensions to fail to load in VS. An alternative would be to treat # these files as binary and thus will always conflict and require user # intervention with every merge. To do so, just uncomment the entries below ############################################################################### #*.sln merge=binary #*.csproj merge=binary #*.vbproj merge=binary #*.vcxproj merge=binary #*.vcproj merge=binary #*.dbproj merge=binary #*.fsproj merge=binary #*.lsproj merge=binary #*.wixproj merge=binary #*.modelproj merge=binary #*.sqlproj merge=binary #*.wwaproj merge=binary ############################################################################### # behavior for image files # # image files are treated as binary by default. ############################################################################### #*.jpg binary #*.png binary #*.gif binary ############################################################################### # diff behavior for common document formats # # Convert binary document formats to text before diffing them. This feature # is only available from the command line. Turn it on by uncommenting the # entries below. ############################################################################### #*.doc diff=astextplain #*.DOC diff=astextplain #*.docx diff=astextplain #*.DOCX diff=astextplain #*.dot diff=astextplain #*.DOT diff=astextplain #*.pdf diff=astextplain #*.PDF diff=astextplain #*.rtf diff=astextplain #*.RTF diff=astextplain ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: Bug report about: Create a report to help us improve title: '' labels: '' assignees: '' --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Desktop (please complete the following information):** - OS: [e.g. iOS] - Browser [e.g. chrome, safari] - Version [e.g. 22] **Smartphone (please complete the following information):** - Device: [e.g. iPhone6] - OS: [e.g. iOS8.1] - Browser [e.g. stock browser, safari] - Version [e.g. 22] **Additional context** Add any other context about the problem here. ================================================ FILE: .gitignore ================================================ ## Ignore Visual Studio temporary files, build results, and ## files generated by popular Visual Studio add-ons. # User-specific files *.suo *.user *.userosscache *.sln.docstates # User-specific files (MonoDevelop/Xamarin Studio) *.userprefs # Build results [Dd]ebug/ [Dd]ebugPublic/ [Rr]elease/ [Rr]eleases/ x64/ x86/ bld/ [Bb]in/ [Oo]bj/ [Ll]og/ build/ # Visual Studio 2015 cache/options directory .vs/ # Uncomment if you have tasks that create the project's static files in wwwroot #wwwroot/ # MSTest test Results [Tt]est[Rr]esult*/ [Bb]uild[Ll]og.* # NUNIT *.VisualState.xml TestResult.xml # Build Results of an ATL Project [Dd]ebugPS/ [Rr]eleasePS/ dlldata.c # DNX project.lock.json project.fragment.lock.json artifacts/ *_i.c *_p.c *_i.h *.ilk *.meta *.obj *.pch *.pdb *.pgc *.pgd *.rsp *.sbr *.tlb *.tli *.tlh *.tmp *.tmp_proj *.log *.vspscc *.vssscc .builds *.pidb *.svclog *.scc # Chutzpah Test files _Chutzpah* # Visual C++ cache files ipch/ *.aps *.ncb *.opendb *.opensdf *.sdf *.cachefile *.VC.db *.VC.VC.opendb # Visual Studio profiler *.psess *.vsp *.vspx *.sap # TFS 2012 Local Workspace $tf/ # Guidance Automation Toolkit *.gpState # ReSharper is a .NET coding add-in _ReSharper*/ *.[Rr]e[Ss]harper *.DotSettings.user # JustCode is a .NET coding add-in .JustCode # TeamCity is a build add-in _TeamCity* # DotCover is a Code Coverage Tool *.dotCover # NCrunch _NCrunch_* .*crunch*.local.xml nCrunchTemp_* # MightyMoose *.mm.* AutoTest.Net/ # Web workbench (sass) .sass-cache/ # Installshield output folder [Ee]xpress/ # DocProject is a documentation generator add-in DocProject/buildhelp/ DocProject/Help/*.HxT DocProject/Help/*.HxC DocProject/Help/*.hhc DocProject/Help/*.hhk DocProject/Help/*.hhp DocProject/Help/Html2 DocProject/Help/html # Click-Once directory publish/ # Publish Web Output *.[Pp]ublish.xml *.azurePubxml # TODO: Comment the next line if you want to checkin your web deploy settings # but database connection strings (with potential passwords) will be unencrypted #*.pubxml *.publishproj # Microsoft Azure Web App publish settings. Comment the next line if you want to # checkin your Azure Web App publish settings, but sensitive information contained # in these scripts will be unencrypted PublishScripts/ # NuGet Packages *.nupkg # The packages folder can be ignored because of Package Restore **/packages/* # except build/, which is used as an MSBuild target. !**/packages/build/ # Uncomment if necessary however generally it will be regenerated when needed #!**/packages/repositories.config # NuGet v3's project.json files produces more ignoreable files *.nuget.props *.nuget.targets # Microsoft Azure Build Output csx/ *.build.csdef # Microsoft Azure Emulator ecf/ rcf/ # Windows Store app package directories and files AppPackages/ BundleArtifacts/ Package.StoreAssociation.xml _pkginfo.txt # Visual Studio cache files # files ending in .cache can be ignored *.[Cc]ache # but keep track of directories ending in .cache !*.[Cc]ache/ # Others ClientBin/ ~$* *~ *.dbmdl *.dbproj.schemaview *.jfm *.pfx *.publishsettings node_modules/ orleans.codegen.cs # Since there are multiple workflows, uncomment next line to ignore bower_components # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) #bower_components/ # RIA/Silverlight projects Generated_Code/ # Backup & report files from converting an old project file # to a newer Visual Studio version. Backup files are not needed, # because we have git ;-) _UpgradeReport_Files/ Backup*/ UpgradeLog*.XML UpgradeLog*.htm # SQL Server files *.mdf *.ldf # Business Intelligence projects *.rdl.data *.bim.layout *.bim_*.settings # Microsoft Fakes FakesAssemblies/ # GhostDoc plugin setting file *.GhostDoc.xml # Node.js Tools for Visual Studio .ntvs_analysis.dat # Visual Studio 6 build log *.plg # Visual Studio 6 workspace options file *.opt # Visual Studio LightSwitch build output **/*.HTMLClient/GeneratedArtifacts **/*.DesktopClient/GeneratedArtifacts **/*.DesktopClient/ModelManifest.xml **/*.Server/GeneratedArtifacts **/*.Server/ModelManifest.xml _Pvt_Extensions # Paket dependency manager .paket/paket.exe paket-files/ # FAKE - F# Make .fake/ # JetBrains Rider .idea/ *.sln.iml # CodeRush .cr/ # Python Tools for Visual Studio (PTVS) __pycache__/ *.pyc # Test files *.o *.exe *.vcxproj *.vcxproj.filters *.cmake *.cmake.in *.sln /xxhash /xxhash_dev /build /xxhash_dev *.tcl CMakeCache.txt /test/CMakeFiles /CMakeFiles ================================================ FILE: CMakeLists.txt ================================================ cmake_minimum_required(VERSION 3.8) project(xxhash_cpp VERSION 0.8.1 LANGUAGES CXX DESCRIPTION "C++ port of the xxhash library." HOMEPAGE_URL "https://github.com/RedSpah/xxhash_cpp") include(GNUInstallDirs) add_library(${PROJECT_NAME} INTERFACE) target_compile_features(${PROJECT_NAME} INTERFACE cxx_std_17) target_include_directories(${PROJECT_NAME} INTERFACE $ $) if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64" AND APPLE) find_path(SSE2NEON_HEADER sse2neon.h PATHS ${CMAKE_SOURCE_DIR}/../sse2neon) target_include_directories(${PROJECT_NAME} INTERFACE ${SSE2NEON_HEADER}) endif() if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) include(CTest) endif() if(BUILD_TESTING) add_subdirectory(test) endif() install(TARGETS ${PROJECT_NAME} EXPORT ${PROJECT_NAME}_Targets ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) include(CMakePackageConfigHelpers) write_basic_package_version_file("${PROJECT_NAME}ConfigVersion.cmake" VERSION ${PROJECT_VERSION} COMPATIBILITY ExactVersion) file(WRITE ${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake.in "@PACKAGE_INIT@\n" "include(\"\${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@Targets.cmake\")\n" "check_required_components(\"@PROJECT_NAME@\")\n" ) configure_package_config_file( "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake.in" "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" INSTALL_DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) install(EXPORT ${PROJECT_NAME}_Targets FILE ${PROJECT_NAME}Targets.cmake NAMESPACE ${PROJECT_NAME}:: DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) install(DIRECTORY ${PROJECT_SOURCE_DIR}/include DESTINATION include) ================================================ FILE: CODE_OF_CONDUCT.md ================================================ # Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at redspah@gmail.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq ================================================ FILE: LICENSE ================================================ BSD 2-Clause License Copyright (c) 2012-2020, Yann Collet Copyright (c) 2017-2020, Red Gavin All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: README.md ================================================ # xxhash_cpp Port of the xxHash library to C++17. [![CircleCI](https://dl.circleci.com/status-badge/img/gh/RedSpah/xxhash_cpp/tree/master.svg?style=shield)](https://dl.circleci.com/status-badge/redirect/gh/RedSpah/xxhash_cpp/tree/master) Compatibility ---- | Compiler | Min. Version | |----------------------|:-------------------:| | MSVC (Visual Studio) | 19.1 (VS 2017.3 P2) | | clang | 3.9 | | gcc | 7 | Example Usage ---- ```cpp // standalone hash std::array input {322, 2137, 42069, 65536}; xxh::hash_t<32> hash = xxh::xxhash<32>(input); // hash streaming std::array buffer; xxh::hash_state_t<64> hash_stream; while (fill_buffer(buffer)) { hash_stream.update(buffer); } xxh::hash_t<64> final_hash = hash_stream.digest(); ``` The template argument specifies whether the algorithm will use the 32 or 64 bit version. Other values are not allowed. Typedefs `hash32_t`, `hash64_t`, `hash_state32_t` and `hash_state64_t` are provided. `xxh::xxhash` and `xxh::hash_state_t::update` provide several convenient overloads, all accepting optional `seed` and `endianness` arguments: * C-style `const void*` + `size_t` pair * `const std::vector&` * `const std::basic_string&` * A pair of templated iterators * `const std::array&` * `const std::initializer_list&` Build Instructions ---- The library is provided as a single standalone header, for static linking only. No build instructions are nessessary. xxHash - Extremely fast hash algorithm ====================================== xxHash is an Extremely fast Hash algorithm, running at RAM speed limits. It successfully completes the [SMHasher](http://code.google.com/p/smhasher/wiki/SMHasher) test suite which evaluates collision, dispersion and randomness qualities of hash functions. Code is highly portable, and hashes are identical on all platforms (little / big endian). Benchmarks ------------------------- The benchmark uses SMHasher speed test, compiled with Visual 2010 on a Windows Seven 32-bits box. The reference system uses a Core 2 Duo @3GHz | Name | Speed | Quality | Author | |---------------|----------|:-------:|------------------| | [xxHash] | 5.4 GB/s | 10 | Y.C. | | MurmurHash 3a | 2.7 GB/s | 10 | Austin Appleby | | SBox | 1.4 GB/s | 9 | Bret Mulvey | | Lookup3 | 1.2 GB/s | 9 | Bob Jenkins | | CityHash64 | 1.05 GB/s| 10 | Pike & Alakuijala| | FNV | 0.55 GB/s| 5 | Fowler, Noll, Vo | | CRC32 | 0.43 GB/s| 9 | | | MD5-32 | 0.33 GB/s| 10 | Ronald L.Rivest | | SHA1-32 | 0.28 GB/s| 10 | | [xxHash]: http://www.xxhash.com Q.Score is a measure of quality of the hash function. It depends on successfully passing SMHasher test set. 10 is a perfect score. Algorithms with a score < 5 are not listed on this table. A more recent version, XXH64, has been created thanks to [Mathias Westerdahl](https://github.com/JCash), which offers superior speed and dispersion for 64-bits systems. Note however that 32-bits applications will still run faster using the 32-bits version. SMHasher speed test, compiled using GCC 4.8.2, on Linux Mint 64-bits. The reference system uses a Core i5-3340M @2.7GHz | Version | Speed on 64-bits | Speed on 32-bits | |------------|------------------|------------------| | XXH64 | 13.8 GB/s | 1.9 GB/s | | XXH32 | 6.8 GB/s | 6.0 GB/s | ### License The library file `xxhash.hpp` is BSD licensed. ### Build modifiers The following macros influence xxhash behavior. They are all disabled by default. - `XXH_FORCE_NATIVE_FORMAT` : on big-endian systems : use native number representation, resulting in system-specific results. Breaks consistency with little-endian results. - `XXH_CPU_LITTLE_ENDIAN` : if defined to 0, sets the native endianness to big endian, if defined to 1, sets the native endianness to little endian, if left undefined, the endianness is resolved at runtime, before `main` is called, at the cost of endianness not being `constexpr`. - `XXH_FORCE_MEMORY_ACCESS` : if defined to 2, enables unaligned reads as an optimization, this is not standard compliant, if defined to 1, enables the use of `packed` attribute for optimization, only defined for gcc and icc otherwise, uses the default fallback method (`memcpy`) ### Other languages Beyond the C reference version, xxHash is also available on many programming languages, thanks to great contributors. They are [listed here](http://www.xxhash.com/#other-languages). ================================================ FILE: include/xxhash.hpp ================================================ #pragma once #include #include #include #include #include #include /* xxHash - Extremely Fast Hash algorithm Header File Copyright (C) 2012-2024, Yann Collet. Copyright (C) 2017-2024, Red Gavin. All rights reserved. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - xxHash source repository : https://github.com/Cyan4973/xxHash - xxHash C++ port repository : https://github.com/RedSpah/xxhash_cpp */ /* Intrinsics * Sadly has to be included in the global namespace or literally everything breaks */ #if (defined(__ARM_NEON) && defined(__APPLE__)) #include "sse2neon.h" #else #include #endif namespace xxh { /* ************************************* * Versioning ***************************************/ namespace version { constexpr int cpp_version_major = 0; constexpr int cpp_version_minor = 8; constexpr int cpp_version_release = 1; } constexpr uint32_t version_number() { return version::cpp_version_major * 10000 + version::cpp_version_minor * 100 + version::cpp_version_release; } /* ************************************* * Basic Types - Predefining uint128_t for intrin ***************************************/ namespace typedefs { struct alignas(16) uint128_t { uint64_t low64 = 0; uint64_t high64 = 0; bool operator==(const uint128_t & other) { return (low64 == other.low64 && high64 == other.high64); } bool operator>(const uint128_t & other) { return (high64 > other.high64 || low64 > other.low64); } bool operator>=(const uint128_t & other) { return (*this > other || *this == other); } bool operator<(const uint128_t & other) { return !(*this >= other); } bool operator<=(const uint128_t & other) { return !(*this > other); } bool operator!=(const uint128_t & other) { return !(*this == other); } uint128_t(uint64_t low, uint64_t high) : low64(low), high64(high) {} uint128_t() {} }; } using uint128_t = typedefs::uint128_t; /* ************************************* * Compiler / Platform Specific Features ***************************************/ namespace intrin { /*!XXH_CPU_LITTLE_ENDIAN : * This is a CPU endian detection macro, will be * automatically set to 1 (little endian) if it is left undefined. * If compiling for a big endian system (why), XXH_CPU_LITTLE_ENDIAN has to be explicitly defined as 0. */ #ifndef XXH_CPU_LITTLE_ENDIAN # define XXH_CPU_LITTLE_ENDIAN 1 #endif /* Vectorization Detection * NOTE: XXH_NEON and XXH_VSX aren't supported in this C++ port. * The primary reason is that I don't have access to an ARM and PowerPC * machines to test them, and the secondary reason is that I even doubt anyone writing * code for such machines would bother using a C++ port rather than the original C version. */ #ifndef XXH_VECTOR /* can be predefined on command line */ # if defined(__AVX512F__) # define XXH_VECTOR 3 /* AVX512 for Skylake and Icelake */ # elif defined(__AVX2__) # define XXH_VECTOR 2 /* AVX2 for Haswell and Bulldozer */ # elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) # define XXH_VECTOR 1 /* SSE2 for Pentium 4 and all x86_64 */ # else # define XXH_VECTOR 0 /* Portable scalar version */ # endif #endif constexpr int vector_mode = XXH_VECTOR; #if XXH_VECTOR == 3 /* AVX512 for Skylake and Icelake */ constexpr int acc_align = 64; using avx512_underlying = __m512i; using avx2_underlying = __m256i; using sse2_underlying = __m128i; #elif XXH_VECTOR == 2 /* AVX2 for Haswell and Bulldozer */ constexpr int acc_align = 32; using avx512_underlying = void; using avx2_underlying = __m256i; using sse2_underlying = __m128i; #elif XXH_VECTOR == 1 /* SSE2 for Pentium 4 and all x86_64 */ using avx512_underlying = void; using avx2_underlying = void; //std::array<__m128i, 2>; using sse2_underlying = __m128i; constexpr int acc_align = 16; #else /* Portable scalar version */ using avx512_underlying = void; using avx2_underlying = void; //std::array; using sse2_underlying = void; //std::array; constexpr int acc_align = 8; #endif /* Compiler Specifics * Defines inline macros and includes specific compiler's instrinsics. * */ #ifdef XXH_FORCE_INLINE /* First undefining the symbols in case they're already defined */ # undef XXH_FORCE_INLINE #endif #ifdef XXH_NO_INLINE # undef XXH_NO_INLINE #endif #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) # define XXH_FORCE_INLINE static __forceinline # define XXH_NO_INLINE static __declspec(noinline) # include #elif defined(__GNUC__) /* Clang / GCC */ # define XXH_FORCE_INLINE static inline __attribute__((always_inline)) # define XXH_NO_INLINE static __attribute__((noinline)) #if (defined(__ARM_NEON) && defined(__APPLE__)) # include "sse2neon.h" # else # include # endif #else # define XXH_FORCE_INLINE static inline # define XXH_NO_INLINE static #endif /* Prefetch * Can be disabled by defining XXH_NO_PREFETCH */ #if defined(XXH_NO_PREFETCH) XXH_FORCE_INLINE void prefetch(const void* ptr) {} #elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) XXH_FORCE_INLINE void prefetch(const void* ptr) { _mm_prefetch((const char*)(ptr), _MM_HINT_T0); } #elif defined(__GNUC__) XXH_FORCE_INLINE void prefetch(const void* ptr) { __builtin_prefetch((ptr), 0, 3); } #else XXH_FORCE_INLINE void prefetch(const void* ptr) {} #endif /* Restrict * Defines macro for restrict, which in C++ is sadly just a compiler extension (for now). * Can be disabled by defining XXH_NO_RESTRICT */ #ifdef XXH_RESTRICT # undef XXH_RESTRICT #endif #if (defined(__GNUC__) || defined(_MSC_VER)) && defined(__cplusplus) && !defined(XXH_NO_RESTRICT) # define XXH_RESTRICT __restrict #else # define XXH_RESTRICT #endif /* Likely / Unlikely * Defines macros for Likely / Unlikely, which are official in C++20, but sadly this library aims the previous standard. * Not present on MSVC. * Can be disabled by defining XXH_NO_BRANCH_HINTS */ #if ((defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)) && !defined(XXH_NO_BRANCH_HINTS) # define XXH_likely(x) __builtin_expect(x, 1) # define XXH_unlikely(x) __builtin_expect(x, 0) #else # define XXH_likely(x) (x) # define XXH_unlikely(x) (x) #endif /* _MM_PERM_ENUM type * Defines the type for the parameters of the shuffle function, in a way that works with -fpedantic while not breaking non-x86 compatibility. */ #if defined(__i386__) || defined(__x86_64__) || defined(_M_X64) using mm_perm_enum_t = _MM_PERM_ENUM; #else using mm_perm_enum_t = int; #endif namespace bit_ops { #if defined(_MSC_VER) static inline uint32_t rotl32(uint32_t x, int32_t r) { return _rotl(x, r); } static inline uint64_t rotl64(uint64_t x, int32_t r) { return _rotl64(x, r); } static inline uint32_t rotr32(uint32_t x, int32_t r) { return _rotr(x, r); } static inline uint64_t rotr64(uint64_t x, int32_t r) { return _rotr64(x, r); } #else static inline uint32_t rotl32(uint32_t x, int32_t r) { return ((x << r) | (x >> (32 - r))); } static inline uint64_t rotl64(uint64_t x, int32_t r) { return ((x << r) | (x >> (64 - r))); } static inline uint32_t rotr32(uint32_t x, int32_t r) { return ((x >> r) | (x << (32 - r))); } static inline uint64_t rotr64(uint64_t x, int32_t r) { return ((x >> r) | (x << (64 - r))); } #endif #if defined(_MSC_VER) /* Visual Studio */ static inline uint32_t swap32(uint32_t x) { return _byteswap_ulong(x); } static inline uint64_t swap64(uint64_t x) { return _byteswap_uint64(x); } #elif defined(__GNUC__) static inline uint32_t swap32(uint32_t x) { return __builtin_bswap32(x); } static inline uint64_t swap64(uint64_t x) { return __builtin_bswap64(x); } #else static inline uint32_t swap32(uint32_t x) { return ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) | ((x >> 8) & 0x0000ff00) | ((x >> 24) & 0x000000ff); } static inline uint64_t swap64(uint64_t x) { return ((x << 56) & 0xff00000000000000ULL) | ((x << 40) & 0x00ff000000000000ULL) | ((x << 24) & 0x0000ff0000000000ULL) | ((x << 8) & 0x000000ff00000000ULL) | ((x >> 8) & 0x00000000ff000000ULL) | ((x >> 24) & 0x0000000000ff0000ULL) | ((x >> 40) & 0x000000000000ff00ULL) | ((x >> 56) & 0x00000000000000ffULL); } #endif #if defined(_MSC_VER) && defined(_M_IX86) // Only for 32-bit MSVC. XXH_FORCE_INLINE uint64_t mult32to64(uint32_t x, uint32_t y) { return __emulu(x, y); } #else XXH_FORCE_INLINE uint64_t mult32to64(uint32_t x, uint32_t y) { return (uint64_t)(uint32_t)(x) * (uint64_t)(uint32_t)(y); } #endif #if defined(__GNUC__) && !defined(__clang__) && defined(__i386__) __attribute__((__target__("no-sse"))) #endif static inline uint128_t mult64to128(uint64_t lhs, uint64_t rhs) { #if defined(__GNUC__) && !defined(__wasm__) \ && defined(__SIZEOF_INT128__) \ || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) __uint128_t product = (__uint128_t)lhs * (__uint128_t)rhs; uint128_t r128; r128.low64 = (uint64_t)(product); r128.high64 = (uint64_t)(product >> 64); return r128; #elif defined(_M_X64) || defined(_M_IA64) #ifndef _MSC_VER # pragma intrinsic(_umul128) #endif uint64_t product_high; uint64_t const product_low = _umul128(lhs, rhs, &product_high); uint128_t r128; r128.low64 = product_low; r128.high64 = product_high; return r128; #else uint64_t const lo_lo = bit_ops::mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); uint64_t const hi_lo = bit_ops::mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); uint64_t const lo_hi = bit_ops::mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); uint64_t const hi_hi = bit_ops::mult32to64(lhs >> 32, rhs >> 32); /* Now add the products together. These will never overflow. */ uint64_t const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; uint64_t const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; uint64_t const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); uint128_t r128; r128.low64 = lower; r128.high64 = upper; return r128; #endif } } } /* ************************************* * Basic Types - Everything else ***************************************/ namespace typedefs { /* ************************************* * Basic Types - Detail ***************************************/ template struct hash_type { using type = void; }; template <> struct hash_type<32> { using type = uint32_t; }; template <> struct hash_type<64> { using type = uint64_t; }; template <> struct hash_type<128> { using type = uint128_t; }; template struct vec_type { using type = void; }; template <> struct vec_type<64> { using type = uint64_t; }; template <> struct vec_type<128> { using type = intrin::sse2_underlying; }; template <> struct vec_type<256> { using type = intrin::avx2_underlying; }; template <> struct vec_type<512> { using type = intrin::avx512_underlying; }; /* Rationale * On the surface level uint_type appears to be pointless, * as it is just a copy of hash_type. They do use the same types, * that is true, but the reasoning for the difference is aimed at humans, * not the compiler, as a difference between values that are 'just' numbers, * and those that represent actual hash values. */ template struct uint_type { using type = void; }; template <> struct uint_type<32> { using type = uint32_t; }; template <> struct uint_type<64> { using type = uint64_t; }; template <> struct uint_type<128> { using type = uint128_t; }; } template using hash_t = typename typedefs::hash_type::type; using hash32_t = hash_t<32>; using hash64_t = hash_t<64>; using hash128_t = hash_t<128>; template using vec_t = typename typedefs::vec_type::type; using vec64_t = vec_t<64>; using vec128_t = vec_t<128>; using vec256_t = vec_t<256>; using vec512_t = vec_t<512>; template using uint_t = typename typedefs::uint_type::type; /* ************************************* * Bit Operations ***************************************/ namespace bit_ops { /* **************************************** * Bit Operations ******************************************/ template static inline uint_t rotl(uint_t n, int32_t r) { if constexpr (N == 32) { return intrin::bit_ops::rotl32(n, r); } if constexpr (N == 64) { return intrin::bit_ops::rotl64(n, r); } } template static inline uint_t rotr(uint_t n, int32_t r) { if constexpr (N == 32) { return intrin::bit_ops::rotr32(n, r); } if constexpr (N == 64) { return intrin::bit_ops::rotr64(n, r); } } template static inline uint_t swap(uint_t n) { if constexpr (N == 32) { return intrin::bit_ops::swap32(n); } if constexpr (N == 64) { return intrin::bit_ops::swap64(n); } } template static inline vec_t mul32to64(vec_t x, vec_t y) { if constexpr (N == 64) { return intrin::bit_ops::mult32to64(static_cast(x), static_cast(y)); } else { return 0; } } static inline uint128_t mul64to128(uint64_t x, uint64_t y) { return intrin::bit_ops::mult64to128(x, y); } static inline uint64_t mul128fold64(uint64_t x, uint64_t y) { uint128_t product = mul64to128(x, y); return (product.low64 ^ product.high64); } } /* ************************************* * Memory Functions ***************************************/ namespace mem_ops { /* ************************************* * Endianness ***************************************/ constexpr bool is_little_endian() { return (XXH_CPU_LITTLE_ENDIAN == 1); } /* ************************************* * Memory Access ***************************************/ template static inline uint_t read(const void* memPtr) { uint_t val; memcpy(&val, memPtr, sizeof(val)); return val; } template static inline uint_t readLE(const void* ptr) { if constexpr (is_little_endian()) { return read(ptr); } else { return bit_ops::swap(read(ptr)); } } template static inline uint_t readBE(const void* ptr) { if constexpr (is_little_endian()) { return bit_ops::swap(read(ptr)); } else { return read(ptr); } } template static void writeLE(void* dst, uint_t v) { if constexpr (!is_little_endian()) { v = bit_ops::swap(v); } memcpy(dst, &v, sizeof(v)); } } /* ************************************* * Vector Functions ***************************************/ namespace vec_ops { template XXH_FORCE_INLINE vec_t loadu(const vec_t* input) { static_assert(!(N != 128 && N != 256 && N != 64 && N != 512), "Invalid template argument passed to xxh::vec_ops::loadu"); if constexpr (N == 128) { return _mm_loadu_si128(input); } if constexpr (N == 256) { return _mm256_loadu_si256(input); } if constexpr (N == 512) { return _mm512_loadu_si512(input); } if constexpr (N == 64) { return mem_ops::readLE<64>(input); } } // 'xorv' instead of 'xor' because 'xor' is a weird wacky alternate operator expression thing. template XXH_FORCE_INLINE vec_t xorv(vec_t a, vec_t b) { static_assert(!(N != 128 && N != 256 && N != 64 && N != 512), "Invalid argument passed to xxh::vec_ops::xorv"); if constexpr (N == 128) { return _mm_xor_si128(a, b); } if constexpr (N == 256) { return _mm256_xor_si256(a, b); } if constexpr (N == 512) { return _mm512_xor_si512(a, b); } if constexpr (N == 64) { return a ^ b; } } template XXH_FORCE_INLINE vec_t mul(vec_t a, vec_t b) { static_assert(!(N != 128 && N != 256 && N != 64 && N != 512), "Invalid argument passed to xxh::vec_ops::mul"); if constexpr (N == 128) { return _mm_mul_epu32(a, b); } if constexpr (N == 256) { return _mm256_mul_epu32(a, b); } if constexpr (N == 512) { return _mm512_mul_epu32(a, b); } if constexpr (N == 64) { return a * b; } } template XXH_FORCE_INLINE vec_t add(vec_t a, vec_t b) { static_assert(!(N != 128 && N != 256 && N != 64 && N != 512), "Invalid argument passed to xxh::vec_ops::add"); if constexpr (N == 128) { return _mm_add_epi64(a, b); } if constexpr (N == 256) { return _mm256_add_epi64(a, b); } if constexpr (N == 512) { return _mm512_add_epi64(a, b); } if constexpr (N == 64) { return a + b; } } template XXH_FORCE_INLINE vec_t shuffle(vec_t a) { static_assert(!(N != 128 && N != 256 && N != 64 && N != 512), "Invalid argument passed to xxh::vec_ops::shuffle"); if constexpr (N == 128) { return _mm_shuffle_epi32(a, static_cast(_MM_SHUFFLE(S1, S2, S3, S4))); } if constexpr (N == 256) { return _mm256_shuffle_epi32(a, static_cast(_MM_SHUFFLE(S1, S2, S3, S4))); } if constexpr (N == 512) { return _mm512_shuffle_epi32(a, static_cast(_MM_SHUFFLE(S1, S2, S3, S4))); } if constexpr (N == 64) { return a; } } template XXH_FORCE_INLINE vec_t set1(int64_t a) { #if (defined(__ARM_NEON) && defined(__APPLE__)) static_assert(!(N != 128 && N != 64), "Invalid argument passed to xxh::vec_ops::set1"); #else static_assert(!(N != 128 && N != 256 && N != 64 && N != 512), "Invalid argument passed to xxh::vec_ops::set1"); if constexpr (N == 256) { return _mm256_set1_epi32(static_cast(a)); } if constexpr (N == 512) { return _mm512_set1_epi32(static_cast(a)); } #endif if constexpr (N == 128) { return _mm_set1_epi32(static_cast(a)); } if constexpr (N == 64) { return a; } } template XXH_FORCE_INLINE vec_t srli(vec_t n, int a) { static_assert(!(N != 128 && N != 256 && N != 64 && N != 512), "Invalid argument passed to xxh::vec_ops::srli"); if constexpr (N == 128) { return _mm_srli_epi64(n, a); } if constexpr (N == 256) { return _mm256_srli_epi64(n, a); } if constexpr (N == 512) { return _mm512_srli_epi64(n, a); } if constexpr (N == 64) { return n >> a; } } template XXH_FORCE_INLINE vec_t slli(vec_t n, int a) { static_assert(!(N != 128 && N != 256 && N != 64 && N != 512), "Invalid argument passed to xxh::vec_ops::slli"); if constexpr (N == 128) { return _mm_slli_epi64(n, a); } if constexpr (N == 256) { return _mm256_slli_epi64(n, a); } if constexpr (N == 512) { return _mm512_slli_epi64(n, a); } if constexpr (N == 64) { return n << a; } } } /* ************************************* * Canonical represenation ***************************************/ template struct canonical_t { std::array digest{ 0 }; canonical_t(hash_t hash) { if constexpr (bit_mode < 128) { if (mem_ops::is_little_endian()) { hash = bit_ops::swap(hash); } memcpy(digest.data(), &hash, sizeof(canonical_t)); } else { if (mem_ops::is_little_endian()) { hash.low64 = bit_ops::swap<64>(hash.low64); hash.high64 = bit_ops::swap<64>(hash.high64); } memcpy(digest.data(), &hash.high64, sizeof(hash.high64)); memcpy(digest.data() + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); } } hash_t get_hash() const { if constexpr (bit_mode < 128) { return mem_ops::readBE(&digest); } else { return { mem_ops::readBE<64>(&digest[8]), mem_ops::readBE<64>(&digest) }; } } }; using canonical32_t = canonical_t<32>; using canonical64_t = canonical_t<64>; using canonical128_t = canonical_t<128>; template inline hash_t to_canonical(hash_t hash) { static_assert(!(bit_mode != 128 && bit_mode != 64 && bit_mode != 32), "Canonical form can only be obtained from 32, 64 and 128 bit hashes."); canonical_t canon(hash); hash_t res; memcpy(&res, &canon, bit_mode / 8); return res; } /* ************************************* * Algorithm Implementation - xxhash ***************************************/ namespace detail { using namespace mem_ops; using namespace bit_ops; /* ************************************* * Constants ***************************************/ constexpr static std::array primes32 = { 2654435761U, 2246822519U, 3266489917U, 668265263U, 374761393U }; constexpr static std::array primes64 = { 11400714785074694791ULL, 14029467366897019727ULL, 1609587929392839161ULL, 9650029242287828579ULL, 2870177450012600261ULL }; template constexpr uint_t PRIME(uint64_t n) { if constexpr (N == 32) { return primes32[n - 1]; } else { return primes64[n - 1]; } } /* ************************************* * Functions ***************************************/ template XXH_FORCE_INLINE uint_t avalanche(uint_t hash) { if constexpr (N == 32) { hash ^= hash >> 15; hash *= PRIME<32>(2); hash ^= hash >> 13; hash *= PRIME<32>(3); hash ^= hash >> 16; return hash; } else if constexpr (N == 64) { hash ^= hash >> 33; hash *= PRIME<64>(2); hash ^= hash >> 29; hash *= PRIME<64>(3); hash ^= hash >> 32; return hash; } else return 0; } template XXH_FORCE_INLINE uint_t round(uint_t seed, uint_t input) { seed += input * PRIME(2); if constexpr (N == 32) { seed = rotl(seed, 13); } else { seed = rotl(seed, 31); } seed *= PRIME(1); return seed; } XXH_FORCE_INLINE uint64_t mergeRound64(hash64_t acc, uint64_t val) { val = round<64>(0, val); acc ^= val; acc = acc * PRIME<64>(1) + PRIME<64>(4); return acc; } XXH_FORCE_INLINE void endian_align_sub_mergeround(hash64_t& hash_ret, uint64_t v1, uint64_t v2, uint64_t v3, uint64_t v4) { hash_ret = mergeRound64(hash_ret, v1); hash_ret = mergeRound64(hash_ret, v2); hash_ret = mergeRound64(hash_ret, v3); hash_ret = mergeRound64(hash_ret, v4); } template static inline hash_t endian_align_sub_ending(hash_t hash_ret, const uint8_t* p, const uint8_t* bEnd) { if constexpr (N == 32) { while ((p + 4) <= bEnd) { hash_ret += readLE<32>(p) * PRIME<32>(3); hash_ret = rotl<32>(hash_ret, 17) * PRIME<32>(4); p += 4; } while (p < bEnd) { hash_ret += (*p) * PRIME<32>(5); hash_ret = rotl<32>(hash_ret, 11) * PRIME<32>(1); p++; } return avalanche<32>(hash_ret); } else { while (p + 8 <= bEnd) { const uint64_t k1 = round<64>(0, readLE<64>(p)); hash_ret ^= k1; hash_ret = rotl<64>(hash_ret, 27) * PRIME<64>(1) + PRIME<64>(4); p += 8; } if (p + 4 <= bEnd) { hash_ret ^= static_cast(readLE<32>(p))* PRIME<64>(1); hash_ret = rotl<64>(hash_ret, 23) * PRIME<64>(2) + PRIME<64>(3); p += 4; } while (p < bEnd) { hash_ret ^= (*p) * PRIME<64>(5); hash_ret = rotl<64>(hash_ret, 11) * PRIME<64>(1); p++; } return avalanche<64>(hash_ret); } } template static inline hash_t endian_align(const void* input, size_t len, uint_t seed) { static_assert(!(N != 32 && N != 64), "You can only call endian_align in 32 or 64 bit mode."); const uint8_t* p = static_cast(input); const uint8_t* bEnd = p + len; hash_t hash_ret; if (len >= (N / 2)) { const uint8_t* const limit = bEnd - (N / 2); uint_t v1 = seed + PRIME(1) + PRIME(2); uint_t v2 = seed + PRIME(2); uint_t v3 = seed + 0; uint_t v4 = seed - PRIME(1); do { v1 = round(v1, readLE(p)); p += (N / 8); v2 = round(v2, readLE(p)); p += (N / 8); v3 = round(v3, readLE(p)); p += (N / 8); v4 = round(v4, readLE(p)); p += (N / 8); } while (p <= limit); hash_ret = rotl(v1, 1) + rotl(v2, 7) + rotl(v3, 12) + rotl(v4, 18); if constexpr (N == 64) { endian_align_sub_mergeround(hash_ret, v1, v2, v3, v4); } } else { hash_ret = seed + PRIME(5); } hash_ret += static_cast>(len); return endian_align_sub_ending(hash_ret, p, bEnd); } } /* ************************************* * Algorithm Implementation - xxhash3 ***************************************/ namespace detail3 { using namespace vec_ops; using namespace detail; using namespace mem_ops; using namespace bit_ops; /* ************************************* * Enums ***************************************/ enum class vec_mode : uint8_t { scalar = 0, sse2 = 1, avx2 = 2, avx512 = 3 }; /* ************************************* * Constants ***************************************/ constexpr uint64_t secret_default_size = 192; constexpr uint64_t secret_size_min = 136; constexpr uint64_t secret_consume_rate = 8; constexpr uint64_t stripe_len = 64; constexpr uint64_t acc_nb = 8; constexpr uint64_t prefetch_distance = 384; constexpr uint64_t secret_lastacc_start = 7; constexpr uint64_t secret_mergeaccs_start = 11; constexpr uint64_t midsize_max = 240; constexpr uint64_t midsize_startoffset = 3; constexpr uint64_t midsize_lastoffset = 17; constexpr vec_mode vector_mode = static_cast(intrin::vector_mode); constexpr uint64_t acc_align = intrin::acc_align; constexpr std::array vector_bit_width { 64, 128, 256, 512 }; /* ************************************* * Defaults ***************************************/ alignas(64) constexpr uint8_t default_secret[secret_default_size] = { 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, }; constexpr std::array init_acc = { PRIME<32>(3), PRIME<64>(1), PRIME<64>(2), PRIME<64>(3), PRIME<64>(4), PRIME<32>(2), PRIME<64>(5), PRIME<32>(1) }; /* ************************************* * Functions ***************************************/ XXH_FORCE_INLINE hash_t<64> avalanche(hash_t<64> h64) { constexpr uint64_t avalanche_mul_prime = 0x165667919E3779F9ULL; h64 ^= h64 >> 37; h64 *= avalanche_mul_prime; h64 ^= h64 >> 32; return h64; } XXH_FORCE_INLINE hash_t<64> rrmxmx(hash_t<64> h64, uint64_t len) { h64 ^= rotl<64>(h64, 49) ^ rotl<64>(h64, 24); h64 *= 0x9FB21C651E98DF25ULL; h64 ^= (h64 >> 35) + len; h64 *= 0x9FB21C651E98DF25ULL; h64 ^= (h64 >> 28); return h64; } XXH_FORCE_INLINE void combine_16(void* dest, hash128_t h128) { writeLE<64>(dest, readLE<64>(dest) ^ h128.low64); writeLE<64>((uint8_t*)dest + 8, readLE<64>((uint8_t*)dest + 8) ^ h128.high64); } XXH_FORCE_INLINE void accumulate_512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT input, const void* XXH_RESTRICT secret) { constexpr uint64_t bits = vector_bit_width[static_cast(vector_mode)]; using vec_t = vec_t; alignas(sizeof(vec_t)) vec_t* const xacc = static_cast(acc); const vec_t* const xinput = static_cast(input); const vec_t* const xsecret = static_cast(secret); for (size_t i = 0; i < stripe_len / sizeof(vec_t); i++) { vec_t const data_vec = loadu(xinput + i); vec_t const key_vec = loadu(xsecret + i); vec_t const data_key = xorv(data_vec, key_vec); vec_t product = set1(0); if constexpr (vector_mode == vec_mode::scalar) { product = mul32to64(srli(slli(data_key, 32),32), srli(data_key, 32)); xacc[i ^ 1] = add(xacc[i ^ 1], data_vec); xacc[i] = add(xacc[i], product); } else { vec_t const data_key_lo = shuffle(data_key); product = mul(data_key, data_key_lo); vec_t const data_swap = shuffle(data_vec); vec_t const sum = add(xacc[i], data_swap); xacc[i] = add(sum, product); } } } XXH_FORCE_INLINE void scramble_acc(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) { constexpr uint64_t bits = vector_bit_width[static_cast(vector_mode)];; using vec_t = vec_t; alignas(sizeof(vec_t)) vec_t* const xacc = (vec_t*)acc; const vec_t* const xsecret = (const vec_t*)secret; for (size_t i = 0; i < stripe_len / sizeof(vec_t); i++) { vec_t const acc_vec = xacc[i]; vec_t const shifted = srli(acc_vec, 47); vec_t const data_vec = xorv(acc_vec, shifted); vec_t const key_vec = loadu(xsecret + i); vec_t const data_key = xorv(data_vec, key_vec); if constexpr (vector_mode == vec_mode::scalar) { xacc[i] = mul(data_key, set1(PRIME<32>(1))); } else { vec_t const prime32 = set1(PRIME<32>(1)); vec_t const data_key_hi = shuffle(data_key); vec_t const prod_lo = mul(data_key, prime32); vec_t const prod_hi = mul(data_key_hi, prime32); xacc[i] = add(prod_lo, vec_ops::slli(prod_hi, 32)); } } } XXH_FORCE_INLINE void accumulate(uint64_t* XXH_RESTRICT acc, const uint8_t* XXH_RESTRICT input, const uint8_t* XXH_RESTRICT secret, size_t nbStripes) { for (size_t n = 0; n < nbStripes; n++) { const uint8_t* const in = input + n * stripe_len; intrin::prefetch(in + prefetch_distance); accumulate_512(acc, in, secret + n * secret_consume_rate); } } XXH_FORCE_INLINE void hash_long_internal_loop(uint64_t* XXH_RESTRICT acc, const uint8_t* XXH_RESTRICT input, size_t len, const uint8_t* XXH_RESTRICT secret, size_t secretSize) { size_t const nb_rounds = (secretSize - stripe_len) / secret_consume_rate; size_t const block_len = stripe_len * nb_rounds; size_t const nb_blocks = (len-1) / block_len; for (size_t n = 0; n < nb_blocks; n++) { accumulate(acc, input + n * block_len, secret, nb_rounds); scramble_acc(acc, secret + secretSize - stripe_len); } /* last partial block */ size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / stripe_len; accumulate(acc, input + nb_blocks * block_len, secret, nbStripes); /* last stripe */ const uint8_t* const p = input + len - stripe_len; accumulate_512(acc, p, secret + secretSize - stripe_len - secret_lastacc_start); } XXH_FORCE_INLINE uint64_t mix_2_accs(const uint64_t* XXH_RESTRICT acc, const uint8_t* XXH_RESTRICT secret) { return mul128fold64(acc[0] ^ readLE<64>(secret), acc[1] ^ readLE<64>(secret + 8)); } XXH_FORCE_INLINE uint64_t merge_accs(const uint64_t* XXH_RESTRICT acc, const uint8_t* XXH_RESTRICT secret, uint64_t start) { uint64_t result64 = start; result64 += mix_2_accs(acc + 0, secret + 0); result64 += mix_2_accs(acc + 2, secret + 16); result64 += mix_2_accs(acc + 4, secret + 32); result64 += mix_2_accs(acc + 6, secret + 48); return avalanche(result64); } XXH_FORCE_INLINE void init_custom_secret(uint8_t* customSecret, uint64_t seed) { for (uint64_t i = 0; i < secret_default_size / 16; i++) { writeLE<64>(customSecret + i * 16, readLE<64>(default_secret + i * 16) + seed); writeLE<64>(customSecret + i * 16 + 8, readLE<64>(default_secret + i * 16 + 8) - seed); } } template XXH_FORCE_INLINE hash_t len_1to3(const uint8_t* input, size_t len, const uint8_t* secret, uint64_t seed) { if constexpr (N == 64) { uint8_t const c1 = input[0]; uint8_t const c2 = input[len >> 1]; uint8_t const c3 = input[len - 1]; uint32_t const combined = ((uint32_t)c1 << 16) | (((uint32_t)c2) << 24) | (((uint32_t)c3) << 0) | (((uint32_t)len) << 8); uint64_t const bitflip = (readLE<32>(secret) ^ readLE<32>(secret + 4)) + seed; uint64_t const keyed = (uint64_t)combined ^ bitflip; return detail::avalanche<64>(keyed); } else { uint8_t const c1 = input[0]; uint8_t const c2 = input[len >> 1]; uint8_t const c3 = input[len - 1]; uint32_t const combinedl = ((uint32_t)c1 << 16) + (((uint32_t)c2) << 24) + (((uint32_t)c3) << 0) + (((uint32_t)len) << 8); uint32_t const combinedh = rotl<32>(swap<32>(combinedl), 13); uint64_t const bitflipl = (readLE<32>(secret) ^ readLE<32>(secret + 4)) + seed; uint64_t const bitfliph = (readLE<32>(secret + 8) ^ readLE<32>(secret + 12)) - seed; uint64_t const keyed_lo = (uint64_t)combinedl ^ bitflipl; uint64_t const keyed_hi = (uint64_t)combinedh ^ bitfliph; hash128_t const h128 = { detail::avalanche<64>(keyed_lo), detail::avalanche<64>(keyed_hi)}; return h128; } } template XXH_FORCE_INLINE hash_t len_4to8(const uint8_t* input, size_t len, const uint8_t* secret, uint64_t seed) { constexpr uint64_t mix_constant = 0x9FB21C651E98DF25ULL; seed ^= (uint64_t)swap<32>((uint32_t)seed) << 32; if constexpr (N == 64) { uint32_t const input1 = readLE<32>(input); uint32_t const input2 = readLE<32>(input + len - 4); uint64_t const bitflip = (readLE<64>(secret + 8) ^ readLE<64>(secret + 16)) - seed; uint64_t const input64 = input2 + ((uint64_t)input1 << 32); uint64_t keyed = input64 ^ bitflip; return rrmxmx(keyed, len); } else { uint32_t const input_lo = readLE<32>(input); uint32_t const input_hi = readLE<32>(input + len - 4); uint64_t const input_64 = input_lo + ((uint64_t)input_hi << 32); uint64_t const bitflip = (readLE<64>(secret + 16) ^ readLE<64>(secret + 24)) + seed; uint64_t const keyed = input_64 ^ bitflip; uint128_t m128 = mul64to128(keyed, PRIME<64>(1) + (len << 2)); m128.high64 += (m128.low64 << 1); m128.low64 ^= (m128.high64 >> 3); m128.low64 ^= (m128.low64 >> 35); m128.low64 *= mix_constant; m128.low64 ^= (m128.low64 >> 28); m128.high64 = avalanche(m128.high64); return m128; } } template XXH_FORCE_INLINE hash_t len_9to16(const uint8_t* input, size_t len, const uint8_t* secret, uint64_t seed) { if constexpr (N == 64) { uint64_t const bitflip1 = (readLE<64>(secret + 24) ^ readLE<64>(secret + 32)) + seed; uint64_t const bitflip2 = (readLE<64>(secret + 40) ^ readLE<64>(secret + 48)) - seed; uint64_t const input_lo = readLE<64>(input) ^ bitflip1; uint64_t const input_hi = readLE<64>(input + len - 8) ^ bitflip2; uint64_t const acc = len + swap<64>(input_lo) + input_hi + mul128fold64(input_lo, input_hi); return avalanche(acc); } else { uint64_t const bitflipl = (readLE<64>(secret + 32) ^ readLE<64>(secret + 40)) - seed; uint64_t const bitfliph = (readLE<64>(secret + 48) ^ readLE<64>(secret + 56)) + seed; uint64_t const input_lo = readLE<64>(input); uint64_t input_hi = readLE<64>(input + len - 8); uint128_t m128 = mul64to128(input_lo ^ input_hi ^ bitflipl, PRIME<64>(1)); m128.low64 += (uint64_t)(len - 1) << 54; input_hi ^= bitfliph; if constexpr (sizeof(void*) < sizeof(uint64_t)) // 32-bit version { m128.high64 += (input_hi & 0xFFFFFFFF00000000) + mul32to64((uint32_t)input_hi, PRIME<32>(2)); } else { m128.high64 += input_hi + mul32to64((uint32_t)input_hi, PRIME<32>(2) - 1); } m128.low64 ^= swap<64>(m128.high64); hash128_t h128 = mul64to128(m128.low64, PRIME<64>(2)); h128.high64 += m128.high64 * PRIME<64>(2); h128.low64 = avalanche(h128.low64); h128.high64 = avalanche(h128.high64); return h128; } } template XXH_FORCE_INLINE hash_t len_0to16(const uint8_t* input, size_t len, const uint8_t* secret, uint64_t seed) { if (XXH_likely(len > 8)) { return len_9to16(input, len, secret, seed); } else if (XXH_likely(len >= 4)) { return len_4to8(input, len, secret, seed); } else if (len) { return len_1to3(input, len, secret, seed); } else { if constexpr (N == 64) { return detail::avalanche<64>((seed) ^ (readLE<64>(secret + 56) ^ readLE<64>(secret + 64))); } else { uint64_t const bitflipl = readLE<64>(secret + 64) ^ readLE<64>(secret + 72); uint64_t const bitfliph = readLE<64>(secret + 80) ^ readLE<64>(secret + 88); return hash128_t(detail::avalanche<64>(( seed) ^ bitflipl), detail::avalanche<64>(( seed) ^ bitfliph)); } } } template XXH_FORCE_INLINE hash_t hash_long_internal(const uint8_t* XXH_RESTRICT input, size_t len, const uint8_t* XXH_RESTRICT secret = default_secret, size_t secretSize = sizeof(default_secret)) { alignas(acc_align) std::array acc = init_acc; if constexpr (N == 64) { hash_long_internal_loop(acc.data(), input, len, secret, secretSize); /* converge into final hash */ return merge_accs(acc.data(), secret + secret_mergeaccs_start, (uint64_t)len * PRIME<64>(1)); } else { hash_long_internal_loop(acc.data(), input, len, secret, secretSize); /* converge into final hash */ uint64_t const low64 = merge_accs(acc.data(), secret + secret_mergeaccs_start, (uint64_t)len * PRIME<64>(1)); uint64_t const high64 = merge_accs(acc.data(), secret + secretSize - sizeof(acc) - secret_mergeaccs_start, ~((uint64_t)len * PRIME<64>(2))); return hash128_t(low64, high64); } } XXH_FORCE_INLINE uint64_t mix_16b(const uint8_t* XXH_RESTRICT input, const uint8_t* XXH_RESTRICT secret, uint64_t seed) { uint64_t const input_lo = readLE<64>(input); uint64_t const input_hi = readLE<64>(input + 8); return mul128fold64(input_lo ^ (readLE<64>(secret) + seed), input_hi ^ (readLE<64>(secret + 8) - seed)); } XXH_FORCE_INLINE uint128_t mix_32b(uint128_t acc, const uint8_t* input1, const uint8_t* input2, const uint8_t* secret, uint64_t seed) { acc.low64 += mix_16b(input1, secret + 0, seed); acc.low64 ^= readLE<64>(input2) + readLE<64>(input2 + 8); acc.high64 += mix_16b(input2, secret + 16, seed); acc.high64 ^= readLE<64>(input1) + readLE<64>(input1 + 8); return acc; } template XXH_FORCE_INLINE hash_t len_17to128(const uint8_t* XXH_RESTRICT input, size_t len, const uint8_t* XXH_RESTRICT secret, uint64_t seed) { if constexpr (N == 64) { hash64_t acc = len * PRIME<64>(1); if (len > 32) { if (len > 64) { if (len > 96) { acc += mix_16b(input + 48, secret + 96, seed); acc += mix_16b(input + len - 64, secret + 112, seed); } acc += mix_16b(input + 32, secret + 64, seed); acc += mix_16b(input + len - 48, secret + 80, seed); } acc += mix_16b(input + 16, secret + 32, seed); acc += mix_16b(input + len - 32, secret + 48, seed); } acc += mix_16b(input + 0, secret + 0, seed); acc += mix_16b(input + len - 16, secret + 16, seed); return avalanche(acc); } else { hash128_t acc = { len * PRIME<64>(1), 0 }; if (len > 32) { if (len > 64) { if (len > 96) { acc = mix_32b(acc, input + 48, input + len - 64, secret + 96, seed); } acc = mix_32b(acc, input + 32, input + len - 48, secret + 64, seed); } acc = mix_32b(acc, input + 16, input + len - 32, secret + 32, seed); } acc = mix_32b(acc, input, input + len - 16, secret, seed); uint64_t const low64 = acc.low64 + acc.high64; uint64_t const high64 = (acc.low64 * PRIME<64>(1)) + (acc.high64 * PRIME<64>(4)) + ((len - seed) * PRIME<64>(2)); return { avalanche(low64), (uint64_t)0 - avalanche(high64) }; } } template XXH_NO_INLINE hash_t len_129to240(const uint8_t* XXH_RESTRICT input, size_t len, const uint8_t* XXH_RESTRICT secret, uint64_t seed) { if constexpr (N == 64) { uint64_t acc = len * PRIME<64>(1); size_t const nbRounds = len / 16; for (size_t i = 0; i < 8; i++) { acc += mix_16b(input + (i * 16), secret + (i * 16), seed); } acc = avalanche(acc); for (size_t i = 8; i < nbRounds; i++) { acc += mix_16b(input + (i * 16), secret + ((i - 8) * 16) + midsize_startoffset, seed); } /* last bytes */ acc += mix_16b(input + len - 16, secret + secret_size_min - midsize_lastoffset, seed); return avalanche(acc); } else { hash128_t acc; uint64_t const nbRounds = len / 32; acc.low64 = len * PRIME<64>(1); acc.high64 = 0; for (size_t i = 0; i < 4; i++) { acc = mix_32b(acc, input + (i * 32), input + (i * 32) + 16, secret + (i * 32), seed); } acc.low64 = avalanche(acc.low64); acc.high64 = avalanche(acc.high64); for (size_t i = 4; i < nbRounds; i++) { acc = mix_32b(acc, input + (i * 32), input + (i * 32) + 16, secret + midsize_startoffset + ((i - 4) * 32), seed); } /* last bytes */ acc = mix_32b(acc, input + len - 16, input + len - 32, secret + secret_size_min - midsize_lastoffset - 16, 0ULL - seed); uint64_t const low64 = acc.low64 + acc.high64; uint64_t const high64 = (acc.low64 * PRIME<64>(1)) + (acc.high64 * PRIME<64>(4)) + ((len - seed) * PRIME<64>(2)); return { avalanche(low64), (uint64_t)0 - avalanche(high64) }; } } template XXH_NO_INLINE hash_t xxhash3_impl(const void* XXH_RESTRICT input, size_t len, hash64_t seed, const void* XXH_RESTRICT secret = default_secret, size_t secretSize = secret_default_size, bool forceSeedUse = false) { alignas(64) uint8_t custom_secret[secret_default_size]; const void* short_secret = secret; if (seed != 0 || forceSeedUse) { init_custom_secret(custom_secret, seed); short_secret = default_secret; } if (len <= 16) { return len_0to16(static_cast(input), len, static_cast(short_secret), seed); } else if (len <= 128) { return len_17to128(static_cast(input), len, static_cast(short_secret), seed); } else if (len <= midsize_max) { return len_129to240(static_cast(input), len, static_cast(short_secret), seed); } else { return hash_long_internal(static_cast(input), len, static_cast(((seed == 0) ? secret : ((secret == default_secret) ? custom_secret : secret))), ((seed == 0) ? secretSize : ((secret == default_secret) ? secret_default_size : secretSize))); } } XXH_NO_INLINE void generate_secret(void* secret_buffer, size_t secret_size, const void* custom_seed, size_t seed_size) { if (seed_size == 0) { custom_seed = default_secret; seed_size = secret_default_size; } size_t pos = 0; while (pos < secret_size) { size_t const copy_len = std::min(secret_size - pos, seed_size); memcpy((uint8_t*)secret_buffer + pos, custom_seed, copy_len); pos += copy_len; } size_t const nbseg16 = secret_size / 16; canonical128_t scrambled(xxhash3_impl<128>(custom_seed, seed_size, 0)); for (size_t n = 0; n < nbseg16; n++) { hash128_t const h128 = xxhash3_impl<128>(&scrambled, sizeof(scrambled), n); combine_16((uint8_t*)secret_buffer + n * 16, h128); } combine_16((uint8_t*)secret_buffer + secret_size - 16, scrambled.get_hash()); } } /* ************************************* * Public Access Point - xxhash ***************************************/ template inline hash_t xxhash(const void* input, size_t len, uint_t seed = 0) { static_assert(!(bit_mode != 32 && bit_mode != 64), "xxhash can only be used in 32 and 64 bit modes."); return detail::endian_align(input, len, seed); } template inline hash_t xxhash(const std::basic_string& input, uint_t seed = 0) { static_assert(!(bit_mode != 32 && bit_mode != 64), "xxhash can only be used in 32 and 64 bit modes."); return detail::endian_align(static_cast(input.data()), input.length() * sizeof(T), seed); } template inline hash_t xxhash(ContiguousIterator begin, ContiguousIterator end, uint_t seed = 0) { static_assert(!(bit_mode != 32 && bit_mode != 64), "xxhash can only be used in 32 and 64 bit modes."); using T = typename std::decay_t; return detail::endian_align(static_cast(&*begin), (end - begin) * sizeof(T), seed); } template inline hash_t xxhash(const std::vector& input, uint_t seed = 0) { static_assert(!(bit_mode != 32 && bit_mode != 64), "xxhash can only be used in 32 and 64 bit modes."); return detail::endian_align(static_cast(input.data()), input.size() * sizeof(T), seed); } template inline hash_t xxhash(const std::array& input, uint_t seed = 0) { static_assert(!(bit_mode != 32 && bit_mode != 64), "xxhash can only be used in 32 and 64 bit modes."); return detail::endian_align(static_cast(input.data()), AN * sizeof(T), seed); } template inline hash_t xxhash(const std::initializer_list& input, uint_t seed = 0) { static_assert(!(bit_mode != 32 && bit_mode != 64), "xxhash can only be used in 32 and 64 bit modes."); return detail::endian_align(static_cast(input.begin()), input.size() * sizeof(T), seed); } /* ************************************* * Public Access Point - xxhash3 ***************************************/ template inline hash_t xxhash3(const void* input, size_t len, uint64_t seed = 0) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(input, len, seed); } template inline hash_t xxhash3(const void* input, size_t len, const void* secret, size_t secretSize) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(input, len, 0, secret, secretSize); } template inline hash_t xxhash3(const void* input, size_t len, const void* secret, size_t secretSize, uint64_t seed) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(input, len, seed, secret, secretSize, true); } template inline hash_t xxhash3(const std::basic_string& input, uint64_t seed = 0) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(static_cast(input.data()), input.length() * sizeof(T), seed); } template inline hash_t xxhash3(const std::basic_string& input, const void* secret, size_t secretSize) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(static_cast(input.data()), input.length() * sizeof(T), 0, secret, secretSize); } template inline hash_t xxhash3(const std::basic_string& input, const void* secret, size_t secretSize, uint64_t seed) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(static_cast(input.data()), input.length() * sizeof(T), seed, secret, secretSize, true); } template inline hash_t xxhash3(ContiguousIterator begin, ContiguousIterator end, uint64_t seed = 0) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); using T = typename std::decay_t; return detail3::xxhash3_impl(static_cast(&*begin), (end - begin) * sizeof(T), seed); } template inline hash_t xxhash3(ContiguousIterator begin, ContiguousIterator end, const void* secret, size_t secretSize) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); using T = typename std::decay_t; return detail3::xxhash3_impl(static_cast(&*begin), (end - begin) * sizeof(T), 0, secret, secretSize); } template inline hash_t xxhash3(ContiguousIterator begin, ContiguousIterator end, const void* secret, size_t secretSize, uint64_t seed) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); using T = typename std::decay_t; return detail3::xxhash3_impl(static_cast(&*begin), (end - begin) * sizeof(T), seed, secret, secretSize, true); } template inline hash_t xxhash3(const std::vector& input, uint64_t seed = 0) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(static_cast(input.data()), input.size() * sizeof(T), seed); } template inline hash_t xxhash3(const std::vector& input, const void* secret, size_t secretSize) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(static_cast(input.data()), input.size() * sizeof(T), 0, secret, secretSize); } template inline hash_t xxhash3(const std::vector& input, const void* secret, size_t secretSize, uint64_t seed) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(static_cast(input.data()), input.size() * sizeof(T), seed, secret, secretSize, true); } template inline hash_t xxhash3(const std::array& input, uint64_t seed = 0) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(static_cast(input.data()), AN * sizeof(T), seed); } template inline hash_t xxhash3(const std::array& input, const void* secret, size_t secretSize) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(static_cast(input.data()), AN * sizeof(T), 0, secret, secretSize); } template inline hash_t xxhash3(const std::array& input, const void* secret, size_t secretSize, uint64_t seed) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(static_cast(input.data()), AN * sizeof(T), seed, secret, secretSize, true); } template inline hash_t xxhash3(const std::initializer_list& input, uint64_t seed = 0) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(static_cast(input.begin()), input.size() * sizeof(T), seed); } template inline hash_t xxhash3(const std::initializer_list& input, const void* secret, size_t secretSize) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(static_cast(input.begin()), input.size() * sizeof(T), 0, secret, secretSize); } template inline hash_t xxhash3(const std::initializer_list& input, const void* secret, size_t secretSize, uint64_t seed) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 can only be used in 64 and 128 bit modes."); return detail3::xxhash3_impl(static_cast(input.begin()), input.size() * sizeof(T), seed, secret, secretSize, true); } /* ************************************* * Secret Generation Functions ***************************************/ inline void generate_secret(void* secret_buffer, size_t secret_size, const void* custom_seed = detail3::default_secret, size_t seed_length = 0) { detail3::generate_secret(secret_buffer, secret_size, custom_seed, seed_length); } template inline void generate_secret(void* secret_buffer, size_t secret_size, const std::array& custom_seed) { detail3::generate_secret(secret_buffer, secret_size, static_cast(custom_seed.data()), AN * sizeof(T)); } template inline void generate_secret(void* secret_buffer, size_t secret_size, const std::initializer_list& custom_seed) { detail3::generate_secret(secret_buffer, secret_size, static_cast(custom_seed.begin()), custom_seed.size() * sizeof(T)); } template inline void generate_secret(void* secret_buffer, size_t secret_size, const std::vector& custom_seed) { detail3::generate_secret(secret_buffer, secret_size, static_cast(custom_seed.data()), custom_seed.size() * sizeof(T)); } template inline void generate_secret(void* secret_buffer, size_t secret_size, const std::basic_string& custom_seed) { detail3::generate_secret(secret_buffer, secret_size, static_cast(custom_seed.data()), custom_seed.length() * sizeof(T)); } template inline void generate_secret(void* secret_buffer, size_t secret_size, ContiguousIterator begin, ContiguousIterator end) { using T = typename std::decay_t; detail3::generate_secret(secret_buffer, secret_size, static_cast(&*begin), (end - begin) * sizeof(T)); } inline void generate_secret_from_seed(void* secret_buffer, uint64_t seed = 0) { alignas(64) uint8_t custom_secret[detail3::secret_default_size]; detail3::init_custom_secret(custom_secret, seed); memcpy(secret_buffer, custom_secret, detail3::secret_default_size); } /* ************************************* * Hash streaming - xxhash ***************************************/ template class hash_state_t { uint64_t total_len = 0; uint_t v1 = 0, v2 = 0, v3 = 0, v4 = 0; std::array, 4> mem = {0, 0, 0, 0}; uint32_t memsize = 0; inline void update_impl(const void* input, size_t length) { const uint8_t* p = reinterpret_cast(input); const uint8_t* const bEnd = p + length; total_len += length; if (memsize + length < (bit_mode / 2)) { /* fill in tmp buffer */ memcpy(reinterpret_cast(mem.data()) + memsize, input, length); memsize += static_cast(length); return; } if (memsize > 0) { /* some data left from previous update */ memcpy(reinterpret_cast(mem.data()) + memsize, input, (bit_mode / 2) - memsize); const uint_t* ptr = mem.data(); v1 = detail::round(v1, mem_ops::readLE(ptr)); ptr++; v2 = detail::round(v2, mem_ops::readLE(ptr)); ptr++; v3 = detail::round(v3, mem_ops::readLE(ptr)); ptr++; v4 = detail::round(v4, mem_ops::readLE(ptr)); p += (bit_mode / 2) - memsize; memsize = 0; } while (p + (bit_mode / 2) <= bEnd) { v1 = detail::round(v1, mem_ops::readLE(p)); p += (bit_mode / 8); v2 = detail::round(v2, mem_ops::readLE(p)); p += (bit_mode / 8); v3 = detail::round(v3, mem_ops::readLE(p)); p += (bit_mode / 8); v4 = detail::round(v4, mem_ops::readLE(p)); p += (bit_mode / 8); } if (p < bEnd) { memcpy(mem.data(), p, static_cast(bEnd - p)); memsize = static_cast(bEnd - p); } } inline hash_t digest_impl() const { const uint8_t* p = reinterpret_cast(mem.data()); const uint8_t* const bEnd = reinterpret_cast(mem.data()) + memsize; hash_t hash_ret; if (total_len >= (bit_mode / 2)) { hash_ret = bit_ops::rotl(v1, 1) + bit_ops::rotl(v2, 7) + bit_ops::rotl(v3, 12) + bit_ops::rotl(v4, 18); if constexpr (bit_mode == 64) { detail::endian_align_sub_mergeround(hash_ret, v1, v2, v3, v4); } } else { hash_ret = v3 + detail::PRIME(5); } hash_ret += static_cast>(total_len); return detail::endian_align_sub_ending(hash_ret, p, bEnd); } public: hash_state_t(uint_t seed = 0) { static_assert(!(bit_mode != 32 && bit_mode != 64), "xxhash streaming can only be used in 32 and 64 bit modes."); v1 = seed + detail::PRIME(1) + detail::PRIME(2); v2 = seed + detail::PRIME(2); v3 = seed + 0; v4 = seed - detail::PRIME(1); }; void reset(uint_t seed = 0) { memset(this, 0, sizeof(hash_state_t)); v1 = seed + detail::PRIME(1) + detail::PRIME(2); v2 = seed + detail::PRIME(2); v3 = seed + 0; v4 = seed - detail::PRIME(1); } void update(const void* input, size_t length) { return update_impl(input, length); } template void update(const std::basic_string& input) { return update_impl(static_cast(input.data()), input.length() * sizeof(T)); } template void update(ContiguousIterator begin, ContiguousIterator end) { using T = typename std::decay_t; return update_impl(static_cast(&*begin), (end - begin) * sizeof(T)); } template void update(const std::vector& input) { return update_impl(static_cast(input.data()), input.size() * sizeof(T)); } template void update(const std::array& input) { return update_impl(static_cast(input.data()), AN * sizeof(T)); } template void update(const std::initializer_list& input) { return update_impl(static_cast(input.begin()), input.size() * sizeof(T)); } hash_t digest() const { return digest_impl(); } }; using hash_state32_t = hash_state_t<32>; using hash_state64_t = hash_state_t<64>; /* ************************************* * Hash streaming - xxhash3 ***************************************/ template class alignas(64) hash3_state_t { constexpr static int internal_buffer_size = 256; constexpr static int internal_buffer_stripes = (internal_buffer_size / detail3::stripe_len); alignas(64) uint64_t acc[8]; alignas(64) uint8_t customSecret[detail3::secret_default_size]; /* used to store a custom secret generated from the seed. Makes state larger. Design might change */ alignas(64) uint8_t buffer[internal_buffer_size]; uint32_t bufferedSize = 0; uint32_t nbStripesPerBlock = 0; uint32_t nbStripesSoFar = 0; uint32_t secretLimit = 0; uint32_t reserved32 = 0; uint32_t reserved32_2 = 0; uint64_t totalLen = 0; uint64_t seed = 0; bool useSeed = false; uint64_t reserved64 = 0; const uint8_t* secret = nullptr; /* note : there is some padding after, due to alignment on 64 bytes */ void consume_stripes(uint64_t* acc, uint32_t& nbStripesSoFar, size_t totalStripes, const uint8_t* input) { if (nbStripesPerBlock - nbStripesSoFar <= totalStripes) /* need a scrambling operation */ { size_t const nbStripes = nbStripesPerBlock - nbStripesSoFar; detail3::accumulate(acc, input, secret + (nbStripesSoFar * detail3::secret_consume_rate), nbStripes); detail3::scramble_acc(acc, secret + secretLimit); detail3::accumulate(acc, input + nbStripes * detail3::stripe_len, secret, totalStripes - nbStripes); nbStripesSoFar = (uint32_t)(totalStripes - nbStripes); } else { detail3::accumulate(acc, input, secret + (nbStripesSoFar * detail3::secret_consume_rate), totalStripes); nbStripesSoFar += (uint32_t)totalStripes; } } void update_impl(const void* input_, size_t len) { const uint8_t* input = static_cast(input_); const uint8_t* const bEnd = input + len; totalLen += len; if (bufferedSize + len <= internal_buffer_size) { /* fill in tmp buffer */ memcpy(buffer + bufferedSize, input, len); bufferedSize += (uint32_t)len; return; } /* input now > XXH3_INTERNALBUFFER_SIZE */ if (bufferedSize > 0) { /* some input within internal buffer: fill then consume it */ size_t const loadSize = internal_buffer_size - bufferedSize; memcpy(buffer + bufferedSize, input, loadSize); input += loadSize; consume_stripes(acc, nbStripesSoFar, internal_buffer_stripes, buffer); bufferedSize = 0; } /* consume input by full buffer quantities */ if (input + internal_buffer_size <= bEnd) { const uint8_t* const limit = bEnd - internal_buffer_size; do { consume_stripes(acc, nbStripesSoFar, internal_buffer_stripes, input); input += internal_buffer_size; } while (input < limit); memcpy(buffer + sizeof(buffer) - detail3::stripe_len, input - detail3::stripe_len, detail3::stripe_len); } if (input < bEnd) { /* some remaining input input : buffer it */ memcpy(buffer, input, (size_t)(bEnd - input)); bufferedSize = (uint32_t)(bEnd - input); } } void digest_long(uint64_t* acc_) { memcpy(acc_, acc, sizeof(acc)); /* digest locally, state remains unaltered, and can continue ingesting more input afterwards */ if (bufferedSize >= detail3::stripe_len) { size_t const totalNbStripes = (bufferedSize - 1) / detail3::stripe_len; uint32_t nbStripesSoFar = this->nbStripesSoFar; consume_stripes(acc_, nbStripesSoFar, totalNbStripes, buffer); /* one last partial stripe */ detail3::accumulate_512(acc_, buffer + bufferedSize - detail3::stripe_len, secret + secretLimit - detail3::secret_lastacc_start); } else { /* bufferedSize < STRIPE_LEN */ /* one last stripe */ uint8_t lastStripe[detail3::stripe_len]; size_t const catchupSize = detail3::stripe_len - bufferedSize; memcpy(lastStripe, buffer + sizeof(buffer) - catchupSize, catchupSize); memcpy(lastStripe + catchupSize, buffer, bufferedSize); detail3::accumulate_512(acc_, lastStripe, secret + secretLimit - detail3::secret_lastacc_start); } } void reset_internal(uint64_t seed_reset, const void* secret_reset, size_t secret_size) { memset(this, 0, sizeof(*this)); memcpy(acc, detail3::init_acc.data(), sizeof(detail3::init_acc)); seed = seed_reset; useSeed = (seed != 0); secret = (const uint8_t*)secret_reset; secretLimit = (uint32_t)(secret_size - detail3::stripe_len); nbStripesPerBlock = secretLimit / detail3::secret_consume_rate; } public: hash3_state_t operator=(hash3_state_t& other) { memcpy(this, &other, sizeof(hash3_state_t)); } hash3_state_t(uint64_t seed = 0) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 streaming can only be used in 64 and 128 bit modes."); reset(seed); } hash3_state_t(const void* secret, size_t secretSize) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 streaming can only be used in 64 and 128 bit modes."); reset(secret, secretSize); } hash3_state_t(const void* secret, size_t secretSize, uint64_t seed) { static_assert(!(bit_mode != 128 && bit_mode != 64), "xxhash3 streaming can only be used in 64 and 128 bit modes."); reset(secret, secretSize, seed); } void reset(uint64_t seed = 0) { reset_internal(seed, detail3::default_secret, detail3::secret_default_size); detail3::init_custom_secret(customSecret, seed); secret = customSecret; } void reset(const void* secret, size_t secretSize) { reset_internal(0, secret, secretSize); } void reset(const void* secret, size_t secretSize, uint64_t seed) { reset_internal(seed, secret, secretSize); useSeed = true; } void update(const void* input, size_t len) { return update_impl(static_cast(input), len); } template void update(const std::basic_string& input) { return update_impl(static_cast(input.data()), input.length() * sizeof(T)); } template void update(ContiguousIterator begin, ContiguousIterator end) { using T = typename std::decay_t; return update_impl(static_cast(&*begin), (end - begin) * sizeof(T)); } template void update(const std::vector& input) { return update_impl(static_cast(input.data()), input.size() * sizeof(T)); } template void update(const std::array& input) { return update_impl(static_cast(input.data()), AN * sizeof(T)); } template void update(const std::initializer_list& input) { return update_impl(static_cast(input.begin()), input.size() * sizeof(T)); } hash_t digest() { if (totalLen > detail3::midsize_max) { alignas(128) hash64_t acc[detail3::acc_nb]; digest_long(acc); if constexpr (bit_mode == 64) { return detail3::merge_accs(acc, secret + detail3::secret_mergeaccs_start, (uint64_t)totalLen * detail::PRIME<64>(1)); } else { uint64_t const low64 = detail3::merge_accs(acc, secret + detail3::secret_mergeaccs_start, (uint64_t)totalLen * detail::PRIME<64>(1)); uint64_t const high64 = detail3::merge_accs(acc, secret + secretLimit + detail3::stripe_len - sizeof(acc) - detail3::secret_mergeaccs_start, ~((uint64_t)totalLen * detail::PRIME<64>(2))); return { low64, high64 }; } } else { if (useSeed) { return detail3::xxhash3_impl(buffer, totalLen, seed); } return detail3::xxhash3_impl(buffer, totalLen, seed, secret, secretLimit + detail3::stripe_len); } } }; using hash3_state64_t = hash3_state_t<64>; using hash3_state128_t = hash3_state_t<128>; } ================================================ FILE: test/CMakeLists.txt ================================================ option(XXH_CPP_USE_AVX2 "Use AVX2 instructions for tests" OFF) function(xxh_cpp_add_test name xxh_vector) add_executable(${name} test_main.cpp) target_link_libraries(${name} PRIVATE xxhash_cpp) target_compile_definitions(${name} PRIVATE -DXXH_VECTOR=${xxh_vector}) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") target_compile_options(${name} PRIVATE -Wall -Wextra -pedantic -Werror) elseif(MSVC) target_compile_options(${name} PRIVATE /W3 /WX) endif() if(XXH_CPP_USE_AVX2) if(MSVC) target_compile_options(${name} PRIVATE /arch:AVX2) else() target_compile_options(${name} PRIVATE -mavx2) endif() endif() add_test(NAME ${name} COMMAND ${name}) endfunction() xxh_cpp_add_test(test_scalar 0) xxh_cpp_add_test(test_sse2 1) xxh_cpp_add_test(test_avx2 2) ================================================ FILE: test/catch.hpp ================================================ /* * Catch v2.13.10 * Generated: 2022-10-16 11:01:23.452308 * ---------------------------------------------------------- * This file has been merged from multiple headers. Please don't edit it directly * Copyright (c) 2022 Two Blue Cubes Ltd. All rights reserved. * * Distributed under the Boost Software License, Version 1.0. (See accompanying * file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ #ifndef TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED #define TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED // start catch.hpp #define CATCH_VERSION_MAJOR 2 #define CATCH_VERSION_MINOR 13 #define CATCH_VERSION_PATCH 10 #ifdef __clang__ # pragma clang system_header #elif defined __GNUC__ # pragma GCC system_header #endif // start catch_suppress_warnings.h #ifdef __clang__ # ifdef __ICC // icpc defines the __clang__ macro # pragma warning(push) # pragma warning(disable: 161 1682) # else // __ICC # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" # pragma clang diagnostic ignored "-Wswitch-enum" # pragma clang diagnostic ignored "-Wcovered-switch-default" # endif #elif defined __GNUC__ // Because REQUIREs trigger GCC's -Wparentheses, and because still // supported version of g++ have only buggy support for _Pragmas, // Wparentheses have to be suppressed globally. # pragma GCC diagnostic ignored "-Wparentheses" // See #674 for details # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wunused-variable" # pragma GCC diagnostic ignored "-Wpadded" #endif // end catch_suppress_warnings.h #if defined(CATCH_CONFIG_MAIN) || defined(CATCH_CONFIG_RUNNER) # define CATCH_IMPL # define CATCH_CONFIG_ALL_PARTS #endif // In the impl file, we want to have access to all parts of the headers // Can also be used to sanely support PCHs #if defined(CATCH_CONFIG_ALL_PARTS) # define CATCH_CONFIG_EXTERNAL_INTERFACES # if defined(CATCH_CONFIG_DISABLE_MATCHERS) # undef CATCH_CONFIG_DISABLE_MATCHERS # endif # if !defined(CATCH_CONFIG_ENABLE_CHRONO_STRINGMAKER) # define CATCH_CONFIG_ENABLE_CHRONO_STRINGMAKER # endif #endif #if !defined(CATCH_CONFIG_IMPL_ONLY) // start catch_platform.h // See e.g.: // https://opensource.apple.com/source/CarbonHeaders/CarbonHeaders-18.1/TargetConditionals.h.auto.html #ifdef __APPLE__ # include # if (defined(TARGET_OS_OSX) && TARGET_OS_OSX == 1) || \ (defined(TARGET_OS_MAC) && TARGET_OS_MAC == 1) # define CATCH_PLATFORM_MAC # elif (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE == 1) # define CATCH_PLATFORM_IPHONE # endif #elif defined(linux) || defined(__linux) || defined(__linux__) # define CATCH_PLATFORM_LINUX #elif defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER) || defined(__MINGW32__) # define CATCH_PLATFORM_WINDOWS #endif // end catch_platform.h #ifdef CATCH_IMPL # ifndef CLARA_CONFIG_MAIN # define CLARA_CONFIG_MAIN_NOT_DEFINED # define CLARA_CONFIG_MAIN # endif #endif // start catch_user_interfaces.h namespace Catch { unsigned int rngSeed(); } // end catch_user_interfaces.h // start catch_tag_alias_autoregistrar.h // start catch_common.h // start catch_compiler_capabilities.h // Detect a number of compiler features - by compiler // The following features are defined: // // CATCH_CONFIG_COUNTER : is the __COUNTER__ macro supported? // CATCH_CONFIG_WINDOWS_SEH : is Windows SEH supported? // CATCH_CONFIG_POSIX_SIGNALS : are POSIX signals supported? // CATCH_CONFIG_DISABLE_EXCEPTIONS : Are exceptions enabled? // **************** // Note to maintainers: if new toggles are added please document them // in configuration.md, too // **************** // In general each macro has a _NO_ form // (e.g. CATCH_CONFIG_NO_POSIX_SIGNALS) which disables the feature. // Many features, at point of detection, define an _INTERNAL_ macro, so they // can be combined, en-mass, with the _NO_ forms later. #ifdef __cplusplus # if (__cplusplus >= 201402L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201402L) # define CATCH_CPP14_OR_GREATER # endif # if (__cplusplus >= 201703L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) # define CATCH_CPP17_OR_GREATER # endif #endif // Only GCC compiler should be used in this block, so other compilers trying to // mask themselves as GCC should be ignored. #if defined(__GNUC__) && !defined(__clang__) && !defined(__ICC) && !defined(__CUDACC__) && !defined(__LCC__) # define CATCH_INTERNAL_START_WARNINGS_SUPPRESSION _Pragma( "GCC diagnostic push" ) # define CATCH_INTERNAL_STOP_WARNINGS_SUPPRESSION _Pragma( "GCC diagnostic pop" ) # define CATCH_INTERNAL_IGNORE_BUT_WARN(...) (void)__builtin_constant_p(__VA_ARGS__) #endif #if defined(__clang__) # define CATCH_INTERNAL_START_WARNINGS_SUPPRESSION _Pragma( "clang diagnostic push" ) # define CATCH_INTERNAL_STOP_WARNINGS_SUPPRESSION _Pragma( "clang diagnostic pop" ) // As of this writing, IBM XL's implementation of __builtin_constant_p has a bug // which results in calls to destructors being emitted for each temporary, // without a matching initialization. In practice, this can result in something // like `std::string::~string` being called on an uninitialized value. // // For example, this code will likely segfault under IBM XL: // ``` // REQUIRE(std::string("12") + "34" == "1234") // ``` // // Therefore, `CATCH_INTERNAL_IGNORE_BUT_WARN` is not implemented. # if !defined(__ibmxl__) && !defined(__CUDACC__) # define CATCH_INTERNAL_IGNORE_BUT_WARN(...) (void)__builtin_constant_p(__VA_ARGS__) /* NOLINT(cppcoreguidelines-pro-type-vararg, hicpp-vararg) */ # endif # define CATCH_INTERNAL_SUPPRESS_GLOBALS_WARNINGS \ _Pragma( "clang diagnostic ignored \"-Wexit-time-destructors\"" ) \ _Pragma( "clang diagnostic ignored \"-Wglobal-constructors\"") # define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ _Pragma( "clang diagnostic ignored \"-Wparentheses\"" ) # define CATCH_INTERNAL_SUPPRESS_UNUSED_WARNINGS \ _Pragma( "clang diagnostic ignored \"-Wunused-variable\"" ) # define CATCH_INTERNAL_SUPPRESS_ZERO_VARIADIC_WARNINGS \ _Pragma( "clang diagnostic ignored \"-Wgnu-zero-variadic-macro-arguments\"" ) # define CATCH_INTERNAL_SUPPRESS_UNUSED_TEMPLATE_WARNINGS \ _Pragma( "clang diagnostic ignored \"-Wunused-template\"" ) #endif // __clang__ //////////////////////////////////////////////////////////////////////////////// // Assume that non-Windows platforms support posix signals by default #if !defined(CATCH_PLATFORM_WINDOWS) #define CATCH_INTERNAL_CONFIG_POSIX_SIGNALS #endif //////////////////////////////////////////////////////////////////////////////// // We know some environments not to support full POSIX signals #if defined(__CYGWIN__) || defined(__QNX__) || defined(__EMSCRIPTEN__) || defined(__DJGPP__) #define CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS #endif #ifdef __OS400__ # define CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS # define CATCH_CONFIG_COLOUR_NONE #endif //////////////////////////////////////////////////////////////////////////////// // Android somehow still does not support std::to_string #if defined(__ANDROID__) # define CATCH_INTERNAL_CONFIG_NO_CPP11_TO_STRING # define CATCH_INTERNAL_CONFIG_ANDROID_LOGWRITE #endif //////////////////////////////////////////////////////////////////////////////// // Not all Windows environments support SEH properly #if defined(__MINGW32__) # define CATCH_INTERNAL_CONFIG_NO_WINDOWS_SEH #endif //////////////////////////////////////////////////////////////////////////////// // PS4 #if defined(__ORBIS__) # define CATCH_INTERNAL_CONFIG_NO_NEW_CAPTURE #endif //////////////////////////////////////////////////////////////////////////////// // Cygwin #ifdef __CYGWIN__ // Required for some versions of Cygwin to declare gettimeofday // see: http://stackoverflow.com/questions/36901803/gettimeofday-not-declared-in-this-scope-cygwin # define _BSD_SOURCE // some versions of cygwin (most) do not support std::to_string. Use the libstd check. // https://gcc.gnu.org/onlinedocs/gcc-4.8.2/libstdc++/api/a01053_source.html line 2812-2813 # if !((__cplusplus >= 201103L) && defined(_GLIBCXX_USE_C99) \ && !defined(_GLIBCXX_HAVE_BROKEN_VSWPRINTF)) # define CATCH_INTERNAL_CONFIG_NO_CPP11_TO_STRING # endif #endif // __CYGWIN__ //////////////////////////////////////////////////////////////////////////////// // Visual C++ #if defined(_MSC_VER) // Universal Windows platform does not support SEH // Or console colours (or console at all...) # if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_APP) # define CATCH_CONFIG_COLOUR_NONE # else # define CATCH_INTERNAL_CONFIG_WINDOWS_SEH # endif # if !defined(__clang__) // Handle Clang masquerading for msvc // MSVC traditional preprocessor needs some workaround for __VA_ARGS__ // _MSVC_TRADITIONAL == 0 means new conformant preprocessor // _MSVC_TRADITIONAL == 1 means old traditional non-conformant preprocessor # if !defined(_MSVC_TRADITIONAL) || (defined(_MSVC_TRADITIONAL) && _MSVC_TRADITIONAL) # define CATCH_INTERNAL_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR # endif // MSVC_TRADITIONAL // Only do this if we're not using clang on Windows, which uses `diagnostic push` & `diagnostic pop` # define CATCH_INTERNAL_START_WARNINGS_SUPPRESSION __pragma( warning(push) ) # define CATCH_INTERNAL_STOP_WARNINGS_SUPPRESSION __pragma( warning(pop) ) # endif // __clang__ #endif // _MSC_VER #if defined(_REENTRANT) || defined(_MSC_VER) // Enable async processing, as -pthread is specified or no additional linking is required # define CATCH_INTERNAL_CONFIG_USE_ASYNC #endif // _MSC_VER //////////////////////////////////////////////////////////////////////////////// // Check if we are compiled with -fno-exceptions or equivalent #if defined(__EXCEPTIONS) || defined(__cpp_exceptions) || defined(_CPPUNWIND) # define CATCH_INTERNAL_CONFIG_EXCEPTIONS_ENABLED #endif //////////////////////////////////////////////////////////////////////////////// // DJGPP #ifdef __DJGPP__ # define CATCH_INTERNAL_CONFIG_NO_WCHAR #endif // __DJGPP__ //////////////////////////////////////////////////////////////////////////////// // Embarcadero C++Build #if defined(__BORLANDC__) #define CATCH_INTERNAL_CONFIG_POLYFILL_ISNAN #endif //////////////////////////////////////////////////////////////////////////////// // Use of __COUNTER__ is suppressed during code analysis in // CLion/AppCode 2017.2.x and former, because __COUNTER__ is not properly // handled by it. // Otherwise all supported compilers support COUNTER macro, // but user still might want to turn it off #if ( !defined(__JETBRAINS_IDE__) || __JETBRAINS_IDE__ >= 20170300L ) #define CATCH_INTERNAL_CONFIG_COUNTER #endif //////////////////////////////////////////////////////////////////////////////// // RTX is a special version of Windows that is real time. // This means that it is detected as Windows, but does not provide // the same set of capabilities as real Windows does. #if defined(UNDER_RTSS) || defined(RTX64_BUILD) #define CATCH_INTERNAL_CONFIG_NO_WINDOWS_SEH #define CATCH_INTERNAL_CONFIG_NO_ASYNC #define CATCH_CONFIG_COLOUR_NONE #endif #if !defined(_GLIBCXX_USE_C99_MATH_TR1) #define CATCH_INTERNAL_CONFIG_GLOBAL_NEXTAFTER #endif // Various stdlib support checks that require __has_include #if defined(__has_include) // Check if string_view is available and usable #if __has_include() && defined(CATCH_CPP17_OR_GREATER) # define CATCH_INTERNAL_CONFIG_CPP17_STRING_VIEW #endif // Check if optional is available and usable # if __has_include() && defined(CATCH_CPP17_OR_GREATER) # define CATCH_INTERNAL_CONFIG_CPP17_OPTIONAL # endif // __has_include() && defined(CATCH_CPP17_OR_GREATER) // Check if byte is available and usable # if __has_include() && defined(CATCH_CPP17_OR_GREATER) # include # if defined(__cpp_lib_byte) && (__cpp_lib_byte > 0) # define CATCH_INTERNAL_CONFIG_CPP17_BYTE # endif # endif // __has_include() && defined(CATCH_CPP17_OR_GREATER) // Check if variant is available and usable # if __has_include() && defined(CATCH_CPP17_OR_GREATER) # if defined(__clang__) && (__clang_major__ < 8) // work around clang bug with libstdc++ https://bugs.llvm.org/show_bug.cgi?id=31852 // fix should be in clang 8, workaround in libstdc++ 8.2 # include # if defined(__GLIBCXX__) && defined(_GLIBCXX_RELEASE) && (_GLIBCXX_RELEASE < 9) # define CATCH_CONFIG_NO_CPP17_VARIANT # else # define CATCH_INTERNAL_CONFIG_CPP17_VARIANT # endif // defined(__GLIBCXX__) && defined(_GLIBCXX_RELEASE) && (_GLIBCXX_RELEASE < 9) # else # define CATCH_INTERNAL_CONFIG_CPP17_VARIANT # endif // defined(__clang__) && (__clang_major__ < 8) # endif // __has_include() && defined(CATCH_CPP17_OR_GREATER) #endif // defined(__has_include) #if defined(CATCH_INTERNAL_CONFIG_COUNTER) && !defined(CATCH_CONFIG_NO_COUNTER) && !defined(CATCH_CONFIG_COUNTER) # define CATCH_CONFIG_COUNTER #endif #if defined(CATCH_INTERNAL_CONFIG_WINDOWS_SEH) && !defined(CATCH_CONFIG_NO_WINDOWS_SEH) && !defined(CATCH_CONFIG_WINDOWS_SEH) && !defined(CATCH_INTERNAL_CONFIG_NO_WINDOWS_SEH) # define CATCH_CONFIG_WINDOWS_SEH #endif // This is set by default, because we assume that unix compilers are posix-signal-compatible by default. #if defined(CATCH_INTERNAL_CONFIG_POSIX_SIGNALS) && !defined(CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS) && !defined(CATCH_CONFIG_NO_POSIX_SIGNALS) && !defined(CATCH_CONFIG_POSIX_SIGNALS) # define CATCH_CONFIG_POSIX_SIGNALS #endif // This is set by default, because we assume that compilers with no wchar_t support are just rare exceptions. #if !defined(CATCH_INTERNAL_CONFIG_NO_WCHAR) && !defined(CATCH_CONFIG_NO_WCHAR) && !defined(CATCH_CONFIG_WCHAR) # define CATCH_CONFIG_WCHAR #endif #if !defined(CATCH_INTERNAL_CONFIG_NO_CPP11_TO_STRING) && !defined(CATCH_CONFIG_NO_CPP11_TO_STRING) && !defined(CATCH_CONFIG_CPP11_TO_STRING) # define CATCH_CONFIG_CPP11_TO_STRING #endif #if defined(CATCH_INTERNAL_CONFIG_CPP17_OPTIONAL) && !defined(CATCH_CONFIG_NO_CPP17_OPTIONAL) && !defined(CATCH_CONFIG_CPP17_OPTIONAL) # define CATCH_CONFIG_CPP17_OPTIONAL #endif #if defined(CATCH_INTERNAL_CONFIG_CPP17_STRING_VIEW) && !defined(CATCH_CONFIG_NO_CPP17_STRING_VIEW) && !defined(CATCH_CONFIG_CPP17_STRING_VIEW) # define CATCH_CONFIG_CPP17_STRING_VIEW #endif #if defined(CATCH_INTERNAL_CONFIG_CPP17_VARIANT) && !defined(CATCH_CONFIG_NO_CPP17_VARIANT) && !defined(CATCH_CONFIG_CPP17_VARIANT) # define CATCH_CONFIG_CPP17_VARIANT #endif #if defined(CATCH_INTERNAL_CONFIG_CPP17_BYTE) && !defined(CATCH_CONFIG_NO_CPP17_BYTE) && !defined(CATCH_CONFIG_CPP17_BYTE) # define CATCH_CONFIG_CPP17_BYTE #endif #if defined(CATCH_CONFIG_EXPERIMENTAL_REDIRECT) # define CATCH_INTERNAL_CONFIG_NEW_CAPTURE #endif #if defined(CATCH_INTERNAL_CONFIG_NEW_CAPTURE) && !defined(CATCH_INTERNAL_CONFIG_NO_NEW_CAPTURE) && !defined(CATCH_CONFIG_NO_NEW_CAPTURE) && !defined(CATCH_CONFIG_NEW_CAPTURE) # define CATCH_CONFIG_NEW_CAPTURE #endif #if !defined(CATCH_INTERNAL_CONFIG_EXCEPTIONS_ENABLED) && !defined(CATCH_CONFIG_DISABLE_EXCEPTIONS) # define CATCH_CONFIG_DISABLE_EXCEPTIONS #endif #if defined(CATCH_INTERNAL_CONFIG_POLYFILL_ISNAN) && !defined(CATCH_CONFIG_NO_POLYFILL_ISNAN) && !defined(CATCH_CONFIG_POLYFILL_ISNAN) # define CATCH_CONFIG_POLYFILL_ISNAN #endif #if defined(CATCH_INTERNAL_CONFIG_USE_ASYNC) && !defined(CATCH_INTERNAL_CONFIG_NO_ASYNC) && !defined(CATCH_CONFIG_NO_USE_ASYNC) && !defined(CATCH_CONFIG_USE_ASYNC) # define CATCH_CONFIG_USE_ASYNC #endif #if defined(CATCH_INTERNAL_CONFIG_ANDROID_LOGWRITE) && !defined(CATCH_CONFIG_NO_ANDROID_LOGWRITE) && !defined(CATCH_CONFIG_ANDROID_LOGWRITE) # define CATCH_CONFIG_ANDROID_LOGWRITE #endif #if defined(CATCH_INTERNAL_CONFIG_GLOBAL_NEXTAFTER) && !defined(CATCH_CONFIG_NO_GLOBAL_NEXTAFTER) && !defined(CATCH_CONFIG_GLOBAL_NEXTAFTER) # define CATCH_CONFIG_GLOBAL_NEXTAFTER #endif // Even if we do not think the compiler has that warning, we still have // to provide a macro that can be used by the code. #if !defined(CATCH_INTERNAL_START_WARNINGS_SUPPRESSION) # define CATCH_INTERNAL_START_WARNINGS_SUPPRESSION #endif #if !defined(CATCH_INTERNAL_STOP_WARNINGS_SUPPRESSION) # define CATCH_INTERNAL_STOP_WARNINGS_SUPPRESSION #endif #if !defined(CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS) # define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS #endif #if !defined(CATCH_INTERNAL_SUPPRESS_GLOBALS_WARNINGS) # define CATCH_INTERNAL_SUPPRESS_GLOBALS_WARNINGS #endif #if !defined(CATCH_INTERNAL_SUPPRESS_UNUSED_WARNINGS) # define CATCH_INTERNAL_SUPPRESS_UNUSED_WARNINGS #endif #if !defined(CATCH_INTERNAL_SUPPRESS_ZERO_VARIADIC_WARNINGS) # define CATCH_INTERNAL_SUPPRESS_ZERO_VARIADIC_WARNINGS #endif // The goal of this macro is to avoid evaluation of the arguments, but // still have the compiler warn on problems inside... #if !defined(CATCH_INTERNAL_IGNORE_BUT_WARN) # define CATCH_INTERNAL_IGNORE_BUT_WARN(...) #endif #if defined(__APPLE__) && defined(__apple_build_version__) && (__clang_major__ < 10) # undef CATCH_INTERNAL_SUPPRESS_UNUSED_TEMPLATE_WARNINGS #elif defined(__clang__) && (__clang_major__ < 5) # undef CATCH_INTERNAL_SUPPRESS_UNUSED_TEMPLATE_WARNINGS #endif #if !defined(CATCH_INTERNAL_SUPPRESS_UNUSED_TEMPLATE_WARNINGS) # define CATCH_INTERNAL_SUPPRESS_UNUSED_TEMPLATE_WARNINGS #endif #if defined(CATCH_CONFIG_DISABLE_EXCEPTIONS) #define CATCH_TRY if ((true)) #define CATCH_CATCH_ALL if ((false)) #define CATCH_CATCH_ANON(type) if ((false)) #else #define CATCH_TRY try #define CATCH_CATCH_ALL catch (...) #define CATCH_CATCH_ANON(type) catch (type) #endif #if defined(CATCH_INTERNAL_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR) && !defined(CATCH_CONFIG_NO_TRADITIONAL_MSVC_PREPROCESSOR) && !defined(CATCH_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR) #define CATCH_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR #endif // end catch_compiler_capabilities.h #define INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line ) name##line #define INTERNAL_CATCH_UNIQUE_NAME_LINE( name, line ) INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line ) #ifdef CATCH_CONFIG_COUNTER # define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __COUNTER__ ) #else # define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __LINE__ ) #endif #include #include #include // We need a dummy global operator<< so we can bring it into Catch namespace later struct Catch_global_namespace_dummy {}; std::ostream& operator<<(std::ostream&, Catch_global_namespace_dummy); namespace Catch { struct CaseSensitive { enum Choice { Yes, No }; }; class NonCopyable { NonCopyable( NonCopyable const& ) = delete; NonCopyable( NonCopyable && ) = delete; NonCopyable& operator = ( NonCopyable const& ) = delete; NonCopyable& operator = ( NonCopyable && ) = delete; protected: NonCopyable(); virtual ~NonCopyable(); }; struct SourceLineInfo { SourceLineInfo() = delete; SourceLineInfo( char const* _file, std::size_t _line ) noexcept : file( _file ), line( _line ) {} SourceLineInfo( SourceLineInfo const& other ) = default; SourceLineInfo& operator = ( SourceLineInfo const& ) = default; SourceLineInfo( SourceLineInfo&& ) noexcept = default; SourceLineInfo& operator = ( SourceLineInfo&& ) noexcept = default; bool empty() const noexcept { return file[0] == '\0'; } bool operator == ( SourceLineInfo const& other ) const noexcept; bool operator < ( SourceLineInfo const& other ) const noexcept; char const* file; std::size_t line; }; std::ostream& operator << ( std::ostream& os, SourceLineInfo const& info ); // Bring in operator<< from global namespace into Catch namespace // This is necessary because the overload of operator<< above makes // lookup stop at namespace Catch using ::operator<<; // Use this in variadic streaming macros to allow // >> +StreamEndStop // as well as // >> stuff +StreamEndStop struct StreamEndStop { std::string operator+() const; }; template T const& operator + ( T const& value, StreamEndStop ) { return value; } } #define CATCH_INTERNAL_LINEINFO \ ::Catch::SourceLineInfo( __FILE__, static_cast( __LINE__ ) ) // end catch_common.h namespace Catch { struct RegistrarForTagAliases { RegistrarForTagAliases( char const* alias, char const* tag, SourceLineInfo const& lineInfo ); }; } // end namespace Catch #define CATCH_REGISTER_TAG_ALIAS( alias, spec ) \ CATCH_INTERNAL_START_WARNINGS_SUPPRESSION \ CATCH_INTERNAL_SUPPRESS_GLOBALS_WARNINGS \ namespace{ Catch::RegistrarForTagAliases INTERNAL_CATCH_UNIQUE_NAME( AutoRegisterTagAlias )( alias, spec, CATCH_INTERNAL_LINEINFO ); } \ CATCH_INTERNAL_STOP_WARNINGS_SUPPRESSION // end catch_tag_alias_autoregistrar.h // start catch_test_registry.h // start catch_interfaces_testcase.h #include namespace Catch { class TestSpec; struct ITestInvoker { virtual void invoke () const = 0; virtual ~ITestInvoker(); }; class TestCase; struct IConfig; struct ITestCaseRegistry { virtual ~ITestCaseRegistry(); virtual std::vector const& getAllTests() const = 0; virtual std::vector const& getAllTestsSorted( IConfig const& config ) const = 0; }; bool isThrowSafe( TestCase const& testCase, IConfig const& config ); bool matchTest( TestCase const& testCase, TestSpec const& testSpec, IConfig const& config ); std::vector filterTests( std::vector const& testCases, TestSpec const& testSpec, IConfig const& config ); std::vector const& getAllTestCasesSorted( IConfig const& config ); } // end catch_interfaces_testcase.h // start catch_stringref.h #include #include #include #include namespace Catch { /// A non-owning string class (similar to the forthcoming std::string_view) /// Note that, because a StringRef may be a substring of another string, /// it may not be null terminated. class StringRef { public: using size_type = std::size_t; using const_iterator = const char*; private: static constexpr char const* const s_empty = ""; char const* m_start = s_empty; size_type m_size = 0; public: // construction constexpr StringRef() noexcept = default; StringRef( char const* rawChars ) noexcept; constexpr StringRef( char const* rawChars, size_type size ) noexcept : m_start( rawChars ), m_size( size ) {} StringRef( std::string const& stdString ) noexcept : m_start( stdString.c_str() ), m_size( stdString.size() ) {} explicit operator std::string() const { return std::string(m_start, m_size); } public: // operators auto operator == ( StringRef const& other ) const noexcept -> bool; auto operator != (StringRef const& other) const noexcept -> bool { return !(*this == other); } auto operator[] ( size_type index ) const noexcept -> char { assert(index < m_size); return m_start[index]; } public: // named queries constexpr auto empty() const noexcept -> bool { return m_size == 0; } constexpr auto size() const noexcept -> size_type { return m_size; } // Returns the current start pointer. If the StringRef is not // null-terminated, throws std::domain_exception auto c_str() const -> char const*; public: // substrings and searches // Returns a substring of [start, start + length). // If start + length > size(), then the substring is [start, size()). // If start > size(), then the substring is empty. auto substr( size_type start, size_type length ) const noexcept -> StringRef; // Returns the current start pointer. May not be null-terminated. auto data() const noexcept -> char const*; constexpr auto isNullTerminated() const noexcept -> bool { return m_start[m_size] == '\0'; } public: // iterators constexpr const_iterator begin() const { return m_start; } constexpr const_iterator end() const { return m_start + m_size; } }; auto operator += ( std::string& lhs, StringRef const& sr ) -> std::string&; auto operator << ( std::ostream& os, StringRef const& sr ) -> std::ostream&; constexpr auto operator "" _sr( char const* rawChars, std::size_t size ) noexcept -> StringRef { return StringRef( rawChars, size ); } } // namespace Catch constexpr auto operator "" _catch_sr( char const* rawChars, std::size_t size ) noexcept -> Catch::StringRef { return Catch::StringRef( rawChars, size ); } // end catch_stringref.h // start catch_preprocessor.hpp #define CATCH_RECURSION_LEVEL0(...) __VA_ARGS__ #define CATCH_RECURSION_LEVEL1(...) CATCH_RECURSION_LEVEL0(CATCH_RECURSION_LEVEL0(CATCH_RECURSION_LEVEL0(__VA_ARGS__))) #define CATCH_RECURSION_LEVEL2(...) CATCH_RECURSION_LEVEL1(CATCH_RECURSION_LEVEL1(CATCH_RECURSION_LEVEL1(__VA_ARGS__))) #define CATCH_RECURSION_LEVEL3(...) CATCH_RECURSION_LEVEL2(CATCH_RECURSION_LEVEL2(CATCH_RECURSION_LEVEL2(__VA_ARGS__))) #define CATCH_RECURSION_LEVEL4(...) CATCH_RECURSION_LEVEL3(CATCH_RECURSION_LEVEL3(CATCH_RECURSION_LEVEL3(__VA_ARGS__))) #define CATCH_RECURSION_LEVEL5(...) CATCH_RECURSION_LEVEL4(CATCH_RECURSION_LEVEL4(CATCH_RECURSION_LEVEL4(__VA_ARGS__))) #ifdef CATCH_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR #define INTERNAL_CATCH_EXPAND_VARGS(...) __VA_ARGS__ // MSVC needs more evaluations #define CATCH_RECURSION_LEVEL6(...) CATCH_RECURSION_LEVEL5(CATCH_RECURSION_LEVEL5(CATCH_RECURSION_LEVEL5(__VA_ARGS__))) #define CATCH_RECURSE(...) CATCH_RECURSION_LEVEL6(CATCH_RECURSION_LEVEL6(__VA_ARGS__)) #else #define CATCH_RECURSE(...) CATCH_RECURSION_LEVEL5(__VA_ARGS__) #endif #define CATCH_REC_END(...) #define CATCH_REC_OUT #define CATCH_EMPTY() #define CATCH_DEFER(id) id CATCH_EMPTY() #define CATCH_REC_GET_END2() 0, CATCH_REC_END #define CATCH_REC_GET_END1(...) CATCH_REC_GET_END2 #define CATCH_REC_GET_END(...) CATCH_REC_GET_END1 #define CATCH_REC_NEXT0(test, next, ...) next CATCH_REC_OUT #define CATCH_REC_NEXT1(test, next) CATCH_DEFER ( CATCH_REC_NEXT0 ) ( test, next, 0) #define CATCH_REC_NEXT(test, next) CATCH_REC_NEXT1(CATCH_REC_GET_END test, next) #define CATCH_REC_LIST0(f, x, peek, ...) , f(x) CATCH_DEFER ( CATCH_REC_NEXT(peek, CATCH_REC_LIST1) ) ( f, peek, __VA_ARGS__ ) #define CATCH_REC_LIST1(f, x, peek, ...) , f(x) CATCH_DEFER ( CATCH_REC_NEXT(peek, CATCH_REC_LIST0) ) ( f, peek, __VA_ARGS__ ) #define CATCH_REC_LIST2(f, x, peek, ...) f(x) CATCH_DEFER ( CATCH_REC_NEXT(peek, CATCH_REC_LIST1) ) ( f, peek, __VA_ARGS__ ) #define CATCH_REC_LIST0_UD(f, userdata, x, peek, ...) , f(userdata, x) CATCH_DEFER ( CATCH_REC_NEXT(peek, CATCH_REC_LIST1_UD) ) ( f, userdata, peek, __VA_ARGS__ ) #define CATCH_REC_LIST1_UD(f, userdata, x, peek, ...) , f(userdata, x) CATCH_DEFER ( CATCH_REC_NEXT(peek, CATCH_REC_LIST0_UD) ) ( f, userdata, peek, __VA_ARGS__ ) #define CATCH_REC_LIST2_UD(f, userdata, x, peek, ...) f(userdata, x) CATCH_DEFER ( CATCH_REC_NEXT(peek, CATCH_REC_LIST1_UD) ) ( f, userdata, peek, __VA_ARGS__ ) // Applies the function macro `f` to each of the remaining parameters, inserts commas between the results, // and passes userdata as the first parameter to each invocation, // e.g. CATCH_REC_LIST_UD(f, x, a, b, c) evaluates to f(x, a), f(x, b), f(x, c) #define CATCH_REC_LIST_UD(f, userdata, ...) CATCH_RECURSE(CATCH_REC_LIST2_UD(f, userdata, __VA_ARGS__, ()()(), ()()(), ()()(), 0)) #define CATCH_REC_LIST(f, ...) CATCH_RECURSE(CATCH_REC_LIST2(f, __VA_ARGS__, ()()(), ()()(), ()()(), 0)) #define INTERNAL_CATCH_EXPAND1(param) INTERNAL_CATCH_EXPAND2(param) #define INTERNAL_CATCH_EXPAND2(...) INTERNAL_CATCH_NO## __VA_ARGS__ #define INTERNAL_CATCH_DEF(...) INTERNAL_CATCH_DEF __VA_ARGS__ #define INTERNAL_CATCH_NOINTERNAL_CATCH_DEF #define INTERNAL_CATCH_STRINGIZE(...) INTERNAL_CATCH_STRINGIZE2(__VA_ARGS__) #ifndef CATCH_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR #define INTERNAL_CATCH_STRINGIZE2(...) #__VA_ARGS__ #define INTERNAL_CATCH_STRINGIZE_WITHOUT_PARENS(param) INTERNAL_CATCH_STRINGIZE(INTERNAL_CATCH_REMOVE_PARENS(param)) #else // MSVC is adding extra space and needs another indirection to expand INTERNAL_CATCH_NOINTERNAL_CATCH_DEF #define INTERNAL_CATCH_STRINGIZE2(...) INTERNAL_CATCH_STRINGIZE3(__VA_ARGS__) #define INTERNAL_CATCH_STRINGIZE3(...) #__VA_ARGS__ #define INTERNAL_CATCH_STRINGIZE_WITHOUT_PARENS(param) (INTERNAL_CATCH_STRINGIZE(INTERNAL_CATCH_REMOVE_PARENS(param)) + 1) #endif #define INTERNAL_CATCH_MAKE_NAMESPACE2(...) ns_##__VA_ARGS__ #define INTERNAL_CATCH_MAKE_NAMESPACE(name) INTERNAL_CATCH_MAKE_NAMESPACE2(name) #define INTERNAL_CATCH_REMOVE_PARENS(...) INTERNAL_CATCH_EXPAND1(INTERNAL_CATCH_DEF __VA_ARGS__) #ifndef CATCH_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR #define INTERNAL_CATCH_MAKE_TYPE_LIST2(...) decltype(get_wrapper()) #define INTERNAL_CATCH_MAKE_TYPE_LIST(...) INTERNAL_CATCH_MAKE_TYPE_LIST2(INTERNAL_CATCH_REMOVE_PARENS(__VA_ARGS__)) #else #define INTERNAL_CATCH_MAKE_TYPE_LIST2(...) INTERNAL_CATCH_EXPAND_VARGS(decltype(get_wrapper())) #define INTERNAL_CATCH_MAKE_TYPE_LIST(...) INTERNAL_CATCH_EXPAND_VARGS(INTERNAL_CATCH_MAKE_TYPE_LIST2(INTERNAL_CATCH_REMOVE_PARENS(__VA_ARGS__))) #endif #define INTERNAL_CATCH_MAKE_TYPE_LISTS_FROM_TYPES(...)\ CATCH_REC_LIST(INTERNAL_CATCH_MAKE_TYPE_LIST,__VA_ARGS__) #define INTERNAL_CATCH_REMOVE_PARENS_1_ARG(_0) INTERNAL_CATCH_REMOVE_PARENS(_0) #define INTERNAL_CATCH_REMOVE_PARENS_2_ARG(_0, _1) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_1_ARG(_1) #define INTERNAL_CATCH_REMOVE_PARENS_3_ARG(_0, _1, _2) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_2_ARG(_1, _2) #define INTERNAL_CATCH_REMOVE_PARENS_4_ARG(_0, _1, _2, _3) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_3_ARG(_1, _2, _3) #define INTERNAL_CATCH_REMOVE_PARENS_5_ARG(_0, _1, _2, _3, _4) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_4_ARG(_1, _2, _3, _4) #define INTERNAL_CATCH_REMOVE_PARENS_6_ARG(_0, _1, _2, _3, _4, _5) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_5_ARG(_1, _2, _3, _4, _5) #define INTERNAL_CATCH_REMOVE_PARENS_7_ARG(_0, _1, _2, _3, _4, _5, _6) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_6_ARG(_1, _2, _3, _4, _5, _6) #define INTERNAL_CATCH_REMOVE_PARENS_8_ARG(_0, _1, _2, _3, _4, _5, _6, _7) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_7_ARG(_1, _2, _3, _4, _5, _6, _7) #define INTERNAL_CATCH_REMOVE_PARENS_9_ARG(_0, _1, _2, _3, _4, _5, _6, _7, _8) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_8_ARG(_1, _2, _3, _4, _5, _6, _7, _8) #define INTERNAL_CATCH_REMOVE_PARENS_10_ARG(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_9_ARG(_1, _2, _3, _4, _5, _6, _7, _8, _9) #define INTERNAL_CATCH_REMOVE_PARENS_11_ARG(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_10_ARG(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10) #define INTERNAL_CATCH_VA_NARGS_IMPL(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N #define INTERNAL_CATCH_TYPE_GEN\ template struct TypeList {};\ template\ constexpr auto get_wrapper() noexcept -> TypeList { return {}; }\ template class...> struct TemplateTypeList{};\ template class...Cs>\ constexpr auto get_wrapper() noexcept -> TemplateTypeList { return {}; }\ template\ struct append;\ template\ struct rewrap;\ template class, typename...>\ struct create;\ template class, typename>\ struct convert;\ \ template \ struct append { using type = T; };\ template< template class L1, typename...E1, template class L2, typename...E2, typename...Rest>\ struct append, L2, Rest...> { using type = typename append, Rest...>::type; };\ template< template class L1, typename...E1, typename...Rest>\ struct append, TypeList, Rest...> { using type = L1; };\ \ template< template class Container, template class List, typename...elems>\ struct rewrap, List> { using type = TypeList>; };\ template< template class Container, template class List, class...Elems, typename...Elements>\ struct rewrap, List, Elements...> { using type = typename append>, typename rewrap, Elements...>::type>::type; };\ \ template